diff --git a/Makefile b/Makefile index c252e1375..0269ec148 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: build debug test tar +.PHONY: build debug install test upload lint # go env GOPROXY := "https://goproxy.cn,direct" @@ -60,6 +60,9 @@ build: debug: $(GOENV) $(GO) build -o $(OUTPUT) $(DEBUG_FLAGS) $(PACKAGES) +install: + cp bin/curveadm ~/.curveadm/bin + test: $(GO_TEST) $(TEST_FLAGS) ./... diff --git a/cli/cli/cli.go b/cli/cli/cli.go index 703ca62e7..c462c4ede 100644 --- a/cli/cli/cli.go +++ b/cli/cli/cli.go @@ -52,7 +52,6 @@ type CurveAdm struct { pluginDir string logDir string tempDir string - dbpath string logpath string config *configure.CurveAdmConfig @@ -142,9 +141,9 @@ func (curveadm *CurveAdm) init() error { // (4) Init error code errno.Init(logpath) - // (5) New storage: create table in sqlite - dbpath := fmt.Sprintf("%s/curveadm.db", curveadm.dataDir) - s, err := storage.NewStorage(dbpath) + // (5) New storage: create table in sqlite/rqlite + dbUrl := config.GetDBUrl() + s, err := storage.NewStorage(dbUrl) if err != nil { log.Error("Init SQLite database failed", log.Field("Error", err)) @@ -174,7 +173,6 @@ func (curveadm *CurveAdm) init() error { log.Field("ClusterName", cluster.Name)) } - curveadm.dbpath = dbpath curveadm.logpath = logpath curveadm.config = config curveadm.in = os.Stdin @@ -253,7 +251,6 @@ func (curveadm *CurveAdm) DataDir() string { return curveadm.d func (curveadm *CurveAdm) PluginDir() string { return curveadm.pluginDir } func (curveadm *CurveAdm) LogDir() string { return curveadm.logDir } func (curveadm *CurveAdm) TempDir() string { return curveadm.tempDir } -func (curveadm *CurveAdm) DBPath() string { return curveadm.dbpath } func (curveadm *CurveAdm) LogPath() string { return curveadm.logpath } func (curveadm *CurveAdm) Config() *configure.CurveAdmConfig { return curveadm.config } func (curveadm *CurveAdm) SudoAlias() string { return curveadm.config.GetSudoAlias() } diff --git a/cli/command/exec.go b/cli/command/exec.go index 7b62c037a..b08bdc30c 100644 --- a/cli/command/exec.go +++ b/cli/command/exec.go @@ -20,8 +20,6 @@ * Author: Jingli Chen (Wine93) */ -// __SIGN_BY_WINE93__ - package command import ( @@ -60,6 +58,11 @@ func NewExecCommand(curveadm *cli.CurveAdm) *cobra.Command { return cmd } +// exec: +// 1. parse cluster topology +// 2. filter service +// 3. get container id +// 4. exec cmd in remote container func runExec(curveadm *cli.CurveAdm, options execOptions) error { // 1) parse cluster topology dcs, err := curveadm.ParseTopology() diff --git a/go.mod b/go.mod index 301fc078e..40b337f52 100644 --- a/go.mod +++ b/go.mod @@ -75,6 +75,7 @@ require ( github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rivo/uniseg v0.4.3 // indirect + github.com/rqlite/gorqlite v0.0.0-20230310040812-ec5e524a562e github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect diff --git a/go.sum b/go.sum index f5bdadec9..caf9ad56f 100644 --- a/go.sum +++ b/go.sum @@ -741,6 +741,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rqlite/gorqlite v0.0.0-20230310040812-ec5e524a562e h1:updBXFrJFAJO/3b/mctukZQEIVUq09iwV/wireIlZFA= +github.com/rqlite/gorqlite v0.0.0-20230310040812-ec5e524a562e/go.mod h1:xF/KoXmrRyahPfo5L7Szb5cAAUl53dMWBh9cMruGEZg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= diff --git a/internal/configure/curveadm/curveadm.go b/internal/configure/curveadm/curveadm.go index 4f1b850ba..7020bc0d8 100644 --- a/internal/configure/curveadm/curveadm.go +++ b/internal/configure/curveadm/curveadm.go @@ -25,6 +25,10 @@ package curveadm import ( + "fmt" + "os" + "regexp" + "github.com/opencurve/curveadm/internal/build" "github.com/opencurve/curveadm/internal/errno" "github.com/opencurve/curveadm/internal/utils" @@ -40,6 +44,9 @@ import ( * [ssh_connections] * retries = 3 * timeout = 10 + * + * [database] + * url = "sqlite:///home/curve/.curveadm/data/curveadm.db" */ const ( KEY_LOG_LEVEL = "log_level" @@ -48,6 +55,13 @@ const ( KEY_AUTO_UPGRADE = "auto_upgrade" KEY_SSH_RETRIES = "retries" KEY_SSH_TIMEOUT = "timeout" + KEY_DB_URL = "url" + + // rqlite://127.0.0.1:4000 + // sqlite:///home/curve/.curveadm/data/curveadm.db + REGEX_DB_URL = "^(sqlite|rqlite)://(.+)$" + DB_SQLITE = "sqlite" + DB_RQLITE = "rqlite" WITHOUT_SUDO = " " ) @@ -60,26 +74,19 @@ type ( AutoUpgrade bool SSHRetries int SSHTimeout int + DBUrl string } CurveAdm struct { Defaults map[string]interface{} `mapstructure:"defaults"` SSHConnections map[string]interface{} `mapstructure:"ssh_connections"` + DataBase map[string]interface{} `mapstructure:"database"` } ) var ( GlobalCurveAdmConfig *CurveAdmConfig - defaultCurveAdmConfig = &CurveAdmConfig{ - LogLevel: "error", - SudoAlias: "sudo", - Timeout: 180, - AutoUpgrade: true, - SSHRetries: 3, - SSHTimeout: 10, - } - SUPPORT_LOG_LEVEL = map[string]bool{ "debug": true, "info": true, @@ -92,6 +99,20 @@ func ReplaceGlobals(cfg *CurveAdmConfig) { GlobalCurveAdmConfig = cfg } +func newDefault() *CurveAdmConfig { + home, _ := os.UserHomeDir() + cfg := &CurveAdmConfig{ + LogLevel: "error", + SudoAlias: "sudo", + Timeout: 180, + AutoUpgrade: true, + SSHRetries: 3, + SSHTimeout: 10, + DBUrl: fmt.Sprintf("sqlite://%s/.curveadm/data/curveadm.db", home), + } + return cfg +} + // TODO(P2): using ItemSet to check value type func requirePositiveInt(k string, v interface{}) (int, error) { num, ok := utils.Str2Int(v.(string)) @@ -141,6 +162,7 @@ func parseDefaultsSection(cfg *CurveAdmConfig, defaults map[string]interface{}) } cfg.Timeout = num + // auto upgrade case KEY_AUTO_UPGRADE: yes, err := requirePositiveBool(KEY_AUTO_UPGRADE, v) if err != nil { @@ -189,8 +211,39 @@ func parseConnectionSection(cfg *CurveAdmConfig, connection map[string]interface return nil } +func parseDatabaseSection(cfg *CurveAdmConfig, database map[string]interface{}) error { + if database == nil { + return nil + } + + for k, v := range database { + switch k { + // database url + case KEY_DB_URL: + dbUrl := v.(string) + pattern := regexp.MustCompile(REGEX_DB_URL) + mu := pattern.FindStringSubmatch(dbUrl) + if len(mu) == 0 { + return errno.ERR_UNSUPPORT_CURVEADM_DATABASE_URL.F("url: %s", dbUrl) + } + cfg.DBUrl = dbUrl + + default: + return errno.ERR_UNSUPPORT_CURVEADM_CONFIGURE_ITEM. + F("%s: %s", k, v) + } + } + + return nil +} + +type sectionParser struct { + parser func(*CurveAdmConfig, map[string]interface{}) error + section map[string]interface{} +} + func ParseCurveAdmConfig(filename string) (*CurveAdmConfig, error) { - cfg := defaultCurveAdmConfig + cfg := newDefault() if !utils.PathExist(filename) { build.DEBUG(build.DEBUG_CURVEADM_CONFIGURE, cfg) return cfg, nil @@ -211,13 +264,16 @@ func ParseCurveAdmConfig(filename string) (*CurveAdmConfig, error) { return nil, errno.ERR_PARSE_CURVRADM_CONFIGURE_FAILED.E(err) } - err = parseDefaultsSection(cfg, global.Defaults) - if err != nil { - return nil, err + items := []sectionParser{ + {parseDefaultsSection, global.Defaults}, + {parseConnectionSection, global.SSHConnections}, + {parseDatabaseSection, global.DataBase}, } - err = parseConnectionSection(cfg, global.SSHConnections) - if err != nil { - return nil, err + for _, item := range items { + err := item.parser(cfg, item.section) + if err != nil { + return nil, err + } } build.DEBUG(build.DEBUG_CURVEADM_CONFIGURE, cfg) @@ -235,3 +291,16 @@ func (cfg *CurveAdmConfig) GetSudoAlias() string { } return cfg.SudoAlias } + +func (cfg *CurveAdmConfig) GetDBUrl() string { + return cfg.DBUrl +} + +func (cfg *CurveAdmConfig) GetDBPath() string { + pattern := regexp.MustCompile(REGEX_DB_URL) + mu := pattern.FindStringSubmatch(cfg.DBUrl) + if len(mu) == 0 || mu[1] != DB_SQLITE { + return "" + } + return mu[2] +} diff --git a/internal/errno/errno.go b/internal/errno/errno.go index 159dd74b3..3f72b0cdb 100644 --- a/internal/errno/errno.go +++ b/internal/errno/errno.go @@ -276,6 +276,7 @@ var ( // 311: configure (curveadm.cfg: invalid configure value) ERR_UNSUPPORT_CURVEADM_LOG_LEVEL = EC(311000, "unsupport curveadm log level") ERR_UNSUPPORT_CURVEADM_CONFIGURE_ITEM = EC(311001, "unsupport curveadm configure item") + ERR_UNSUPPORT_CURVEADM_DATABASE_URL = EC(311002, "unsupport curveadm database url") // 320: configure (hosts.yaml: parse failed) ERR_HOSTS_FILE_NOT_FOUND = EC(320000, "hosts file not found") diff --git a/internal/storage/driver/driver.go b/internal/storage/driver/driver.go new file mode 100644 index 000000000..004b3d478 --- /dev/null +++ b/internal/storage/driver/driver.go @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the Licensele(). + */ + +/* + * Project: CurveAdm + * Created Date: 2023-05-24 + * Author: Jingli Chen (Wine93) + */ + +package driver + +type IQueryResult interface { + Next() bool + Scan(dest ...any) error + Close() error +} + +type IWriteResult interface { + LastInsertId() (int64, error) +} + +type IDataBaseDriver interface { + Open(dbUrl string) error + Close() error + Query(query string, args ...any) (IQueryResult, error) + Write(query string, args ...any) (IWriteResult, error) +} diff --git a/internal/storage/driver/rqlite.go b/internal/storage/driver/rqlite.go new file mode 100644 index 000000000..426bb2481 --- /dev/null +++ b/internal/storage/driver/rqlite.go @@ -0,0 +1,88 @@ +package driver + +import ( + "strings" + "sync" + + "github.com/rqlite/gorqlite" + rqlite "github.com/rqlite/gorqlite" +) + +type RQLiteDB struct { + conn *rqlite.Connection + sync.Mutex +} + +type QueryResult struct { + result rqlite.QueryResult +} + +type WriteResult struct { + result rqlite.WriteResult +} + +var ( + _ IDataBaseDriver = (*RQLiteDB)(nil) + _ IQueryResult = (*QueryResult)(nil) + _ IWriteResult = (*WriteResult)(nil) +) + +func NewRQLiteDB() *RQLiteDB { + return &RQLiteDB{} +} + +func (db *RQLiteDB) Open(url string) error { + connURL := "http://" + strings.TrimPrefix(url, "rqlite://") + conn, err := gorqlite.Open(connURL) + if err != nil { + return err + } + db.conn = conn + return nil +} + +func (db *RQLiteDB) Close() error { + return nil +} + +func (result *QueryResult) Next() bool { + return result.result.Next() +} + +func (result *QueryResult) Scan(dest ...any) error { + return result.result.Scan(dest...) +} + +func (result *QueryResult) Close() error { + return nil +} + +func (db *RQLiteDB) Query(query string, args ...any) (IQueryResult, error) { + db.Lock() + defer db.Unlock() + + result, err := db.conn.QueryOneParameterized( + rqlite.ParameterizedStatement{ + Query: query, + Arguments: append([]interface{}{}, args...), + }, + ) + return &QueryResult{result: result}, err +} + +func (result *WriteResult) LastInsertId() (int64, error) { + return result.result.LastInsertID, nil +} + +func (db *RQLiteDB) Write(query string, args ...any) (IWriteResult, error) { + db.Lock() + defer db.Unlock() + + result, err := db.conn.WriteOneParameterized( + rqlite.ParameterizedStatement{ + Query: query, + Arguments: append([]interface{}{}, args...), + }, + ) + return &WriteResult{result: result}, err +} diff --git a/internal/storage/driver/sqlite.go b/internal/storage/driver/sqlite.go new file mode 100644 index 000000000..ab1378046 --- /dev/null +++ b/internal/storage/driver/sqlite.go @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2023 NetEase Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the Licensele(). + */ + +/* + * Project: CurveAdm + * Created Date: 2023-05-24 + * Author: Jingli Chen (Wine93) + */ + +package driver + +import ( + "database/sql" + "strings" + "sync" + + _ "github.com/mattn/go-sqlite3" +) + +type SQLiteDB struct { + db *sql.DB + sync.Mutex +} + +type Rows struct { + rows *sql.Rows +} + +type Result struct { + result sql.Result +} + +var ( + _ IDataBaseDriver = (*SQLiteDB)(nil) + _ IQueryResult = (*Rows)(nil) + _ IWriteResult = (*Result)(nil) +) + +func NewSQLiteDB() *SQLiteDB { + return &SQLiteDB{} +} + +func (db *SQLiteDB) Open(url string) error { + var err error + dataSourceName := strings.TrimPrefix(url, "sqlite://") + db.db, err = sql.Open("sqlite3", dataSourceName) + return err +} + +func (db *SQLiteDB) Close() error { + return db.db.Close() +} + +func (result *Rows) Next() bool { + return result.rows.Next() +} + +func (result *Rows) Scan(dest ...any) error { + return result.rows.Scan(dest...) +} + +func (result *Rows) Close() error { + return result.rows.Close() +} + +func (db *SQLiteDB) Query(query string, args ...any) (IQueryResult, error) { + db.Lock() + defer db.Unlock() + + rows, err := db.db.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{rows: rows}, nil +} + +func (result *Result) LastInsertId() (int64, error) { + return result.result.LastInsertId() +} + +func (db *SQLiteDB) Write(query string, args ...any) (IWriteResult, error) { + db.Lock() + defer db.Unlock() + + stmt, err := db.db.Prepare(query) + if err != nil { + return nil, err + } + + result, err := stmt.Exec(args...) + return &Result{result: result}, err +} diff --git a/internal/storage/sql.go b/internal/storage/sql.go index 9a0c191fc..4456f4ba2 100644 --- a/internal/storage/sql.go +++ b/internal/storage/sql.go @@ -37,9 +37,18 @@ package storage +import "time" + +// version +type Version struct { + Id int + Version string + LastConfirm string +} + var ( - // tables (hosts/clusters/containers(service)/clients/playrgound/audit) - CREATE_VERSION_TABLE = ` + // table: version + CreateVersionTable = ` CREATE TABLE IF NOT EXISTS version ( id INTEGER PRIMARY KEY AUTOINCREMENT, version TEXT NOT NULL, @@ -47,7 +56,26 @@ var ( ) ` - CREATE_HOSTS_TABLE = ` + // insert version + InsertVersion = `INSERT INTO version(version, lastconfirm) VALUES(?, "")` + + // set version + SetVersion = `UPDATE version SET version = ?, lastconfirm = ? WHERE id = ?` + + // select version + SelectVersion = `SELECT * FROM version` +) + +// hosts +type Hosts struct { + Id int + Data string + LastModifiedTime time.Time +} + +var ( + // table: hosts + CreateHostsTable = ` CREATE TABLE IF NOT EXISTS hosts ( id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT NOT NULL, @@ -55,7 +83,31 @@ var ( ) ` - CREATE_CLUSTERS_TABLE = ` + // insert hosts + InsertHosts = `INSERT INTO hosts(data, lastmodified_time) VALUES(?, datetime('now','localtime'))` + + // set hosts + SetHosts = `UPDATE hosts SET data = ?, lastmodified_time = datetime('now','localtime') WHERE id = ?` + + // select hosts + SelectHosts = `SELECT * FROM hosts` +) + +// cluster +type Cluster struct { + Id int + UUId string + Name string + Description string + CreateTime time.Time + Topology string + Pool string + Current bool +} + +var ( + // table: clusters + CreateClustersTable = ` CREATE TABLE IF NOT EXISTS clusters ( id INTEGER PRIMARY KEY AUTOINCREMENT, uuid TEXT NOT NULL, @@ -68,8 +120,48 @@ var ( ) ` + // insert cluster + InsertCluster = ` + INSERT INTO clusters(uuid, name, description, topology, pool, create_time) + VALUES(hex(randomblob(16)), ?, ?, ?, "", datetime('now','localtime')) + ` + + // delete cluster + DeleteCluster = `DELETE from clusters WHERE name = ?` + + // select cluster + SelectCluster = `SELECT * FROM clusters WHERE name LIKE ?` + + // get current cluster + GetCurrentCluster = `SELECT * FROM clusters WHERE current = 1` + + // checkout cluster + CheckoutCluster = ` + UPDATE clusters + SET current = CASE name + WHEN ? THEN 1 + ELSE 0 + END + ` + + // set cluster topology + SetClusterTopology = `UPDATE clusters SET topology = ? WHERE id = ?` + + // set cluster pool + SetClusterPool = `UPDATE clusters SET topology = ?, pool = ? WHERE id = ?` +) + +// service +type Service struct { + Id string + ClusterId int + ContainerId string +} + +var ( + // table: containers // id: clusterId_role_host_(sequence/name) - CREATE_CONTAINERS_TABLE = ` + CreateContainersTable = ` CREATE TABLE IF NOT EXISTS containers ( id TEXT PRIMARY KEY, cluster_id INTEGER NOT NULL, @@ -77,7 +169,31 @@ var ( ) ` - CREATE_CLIENTS_TABLE = ` + // insert service + InsertService = `INSERT INTO containers(id, cluster_id, container_id) VALUES(?, ?, ?)` + + // select service + SelectService = `SELECT * FROM containers WHERE id = ?` + + // select services in cluster + SelectServicesInCluster = `SELECT * FROM containers WHERE cluster_id = ?` + + // set service container id + SetContainerId = `UPDATE containers SET container_id = ? WHERE id = ?` +) + +// client +type Client struct { + Id string + Kind string + Host string + ContainerId string + AuxInfo string +} + +var ( + // table: clients + CreateClientsTable = ` CREATE TABLE IF NOT EXISTS clients ( id TEXT PRIMARY KEY, kind TEXT NOT NULL, @@ -86,117 +202,116 @@ var ( aux_info TEXT NOT NULL ) ` + // insert client + InsertClient = `INSERT INTO clients(id, kind, host, container_id, aux_info) VALUES(?, ?, ?, ?, ?)` + + // select clients + SelectClients = `SELECT * FROM clients` + + // select client by id + SelectClientById = `SELECT * FROM clients WHERE id = ?` + + // delete client + DeleteClient = `DELETE from clients WHERE id = ?` +) + +// playground +type Playground struct { + Id int + Name string + CreateTime time.Time + MountPoint string + Status string +} - CREATE_PLAYGROUND_TABLE = ` - CREATE TABLE IF NOT EXISTS playgrounds ( +var ( + // table: playground + CreatePlaygroundTable = ` + CREATE TABLE IF NOT EXISTS playgrounds ( id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL UNIQUE, create_time DATE NOT NULL, mount_point TEXT NOT NULL, - status TEXT NOT NULL - ) - ` - - CREATE_AUDIT_TABLE = ` - CREATE TABLE IF NOT EXISTS audit ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - execute_time DATE NOT NULL, - work_directory TEXT NOT NULL, - command TEXT NOT NULL, - status INTEGER DEFAULT 0, - error_code INTEGET DEFAULT 0 + status TEXT NOT NULL ) - ` - - CHECK_POOl_COLUMN = ` - SELECT COUNT(*) AS total - FROM pragma_table_info('clusters') - WHERE name='pool' ` - RENAME_CLUSTERS_TABLE = `ALTER TABLE clusters RENAME TO clusters_old` - - INSERT_CLUSTERS_FROM_OLD_TABLE = ` - INSERT INTO clusters(id, uuid, name, description, topology, pool, create_time, current) - SELECT id, uuid, name, description, topology, "", create_time, current - FROM clusters_old + // insert playground + InsertPlayground = ` + INSERT INTO playgrounds(name, create_time, mount_point, status) + VALUES(?, datetime('now','localtime'), ?, ?) ` - DROP_OLD_CLUSTERS_TABLE = `DROP TABLE clusters_old` - - // version - INSERT_VERSION = `INSERT INTO version(version, lastconfirm) VALUES(?, "")` - - SET_VERSION = `UPDATE version SET version = ?, lastconfirm = ? WHERE id = ?` - - SELECT_VERSION = `SELECT * FROM version` + // set playground status + SetPlaygroundStatus = `UPDATE playgrounds SET status = ? WHERE name = ?` - // hosts - INSERT_HOSTS = `INSERT INTO hosts(data, lastmodified_time) VALUES(?, datetime('now','localtime'))` + // select playground + SelectPlayground = `SELECT * FROM playgrounds WHERE name LIKE ?` - SET_HOSTS = `UPDATE hosts SET data = ?, lastmodified_time = datetime('now','localtime') WHERE id = ?` + // select playground by id + SelectPlaygroundById = `SELECT * FROM playgrounds WHERE id = ?` - SELECT_HOSTS = `SELECT * FROM hosts` - - // cluster - INSERT_CLUSTER = `INSERT INTO clusters(uuid, name, description, topology, pool, create_time) - VALUES(hex(randomblob(16)), ?, ?, ?, "", datetime('now','localtime'))` - - DELETE_CLUSTER = `DELETE from clusters WHERE name = ?` - - SELECT_CLUSTER = `SELECT * FROM clusters WHERE name LIKE ?` + // delete playground + DeletePlayground = `DELETE from playgrounds WHERE name = ?` +) - GET_CURRENT_CLUSTER = `SELECT * FROM clusters WHERE current = 1` +// audit log +type AuditLog struct { + Id int + ExecuteTime time.Time + WorkDirectory string + Command string + Status int + ErrorCode int +} - CHECKOUT_CLUSTER = ` - UPDATE clusters - SET current = CASE name - WHEN ? THEN 1 - ELSE 0 - END +var ( + // table: audit + CreateAuditTable = ` + CREATE TABLE IF NOT EXISTS audit ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + execute_time DATE NOT NULL, + work_directory TEXT NOT NULL, + command TEXT NOT NULL, + status INTEGER DEFAULT 0, + error_code INTEGET DEFAULT 0 + ) ` - SET_CLUSTER_TOPOLOGY = `UPDATE clusters SET topology = ? WHERE id = ?` - - SET_CLUSTER_POOL = `UPDATE clusters SET topology = ?, pool = ? WHERE id = ?` - - // service - INSERT_SERVICE = `INSERT INTO containers(id, cluster_id, container_id) VALUES(?, ?, ?)` - - SELECT_SERVICE = `SELECT * FROM containers WHERE id = ?` - - SELECT_SERVICE_IN_CLUSTER = `SELECT * FROM containers WHERE cluster_id = ?` - - SET_CONTAINER_ID = `UPDATE containers SET container_id = ? WHERE id = ?` - - // client - INSERT_CLIENT = `INSERT INTO clients(id, kind, host, container_id, aux_info) VALUES(?, ?, ?, ?, ?)` - - SELECT_CLIENTS = `SELECT * FROM clients` - - SELECT_CLIENT_BY_ID = `SELECT * FROM clients WHERE id = ?` - - DELETE_CLIENT = `DELETE from clients WHERE id = ?` - - // playground - INSERT_PLAYGROUND = `INSERT INTO playgrounds(name, create_time, mount_point, status) - VALUES(?, datetime('now','localtime'), ?, ?)` - - SET_PLAYGROUND_STATUS = `UPDATE playgrounds SET status = ? WHERE name = ?` + // insert audit log + InsertAuditLog = ` + INSERT INTO audit(execute_time, work_directory, command, status) + VALUES(?, ?, ?, ?) + ` - SELECT_PLAYGROUND = `SELECT * FROM playgrounds WHERE name LIKE ?` + // set audit log status + SetAuditLogStatus = `UPDATE audit SET status = ?, error_code = ? WHERE id = ?` - SELECT_PLAYGROUND_BY_ID = `SELECT * FROM playgrounds WHERE id = ?` + // select audit log + SelectAuditLog = `SELECT * FROM audit` - DELETE_PLAYGROUND = `DELETE from playgrounds WHERE name = ?` + // select audit log by id + SelectAuditLogById = `SELECT * FROM audit WHERE id = ?` +) - // audit - INSERT_AUDIT_LOG = `INSERT INTO audit(execute_time, work_directory, command, status) - VALUES(?, ?, ?, ?)` +var ( + // check pool column + CheckPoolColumn = ` + SELECT COUNT(*) AS total + FROM pragma_table_info('clusters') + WHERE name='pool' + ` - SET_AUDIT_LOG_STATUS = `UPDATE audit SET status = ?, error_code = ? WHERE id = ?` + // rename clusters table + RenameClustersTable = `ALTER TABLE clusters RENAME TO clusters_old` - SELECT_AUDIT_LOG = `SELECT * FROM audit` + // insert clusters from old table + InsertClustersFromOldTable = ` + INSERT INTO clusters(id, uuid, name, description, topology, pool, create_time, current) + SELECT id, uuid, name, description, topology, "", create_time, current + FROM clusters_old + ` - SELECT_AUDIT_LOG_BY_ID = `SELECT * FROM audit WHERE id = ?` + // statement: drom old clusters table + DropOldClustersTable = `DROP TABLE clusters_old` ) diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 3d4a9c440..f3757c45c 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -23,162 +23,72 @@ package storage import ( - "database/sql" "fmt" - "sync" + "regexp" "time" _ "github.com/mattn/go-sqlite3" + "github.com/opencurve/curveadm/internal/storage/driver" ) -type Version struct { - Id int - Version string - LastConfirm string -} - -type Hosts struct { - Id int - Data string - LastmodifiedTime time.Time -} - -type Cluster struct { - Id int - UUId string - Name string - Description string - CreateTime time.Time - Topology string - Pool string - Current bool -} - -type Service struct { - Id string - ClusterId int - ContainerId string -} - -type Client struct { - Id string - Kind string - Host string - ContainerId string - AuxInfo string -} - -type Playground struct { - Id int - Name string - CreateTime time.Time - MountPoint string - Status string -} +var ( + ErrInvalidDBUrl = fmt.Errorf("invalid database url") +) -type AuditLog struct { - Id int - ExecuteTime time.Time - WorkDirectory string - Command string - Status int - ErrorCode int -} +// rqlite://127.0.0.1:4000 +// sqlite:///home/curve/.curveadm/data/curveadm.db +const ( + REGEX_DB_URL = "^(sqlite|rqlite)://(.+)$" +) type Storage struct { - db *sql.DB - mutex *sync.Mutex + db driver.IDataBaseDriver } -func NewStorage(dbfile string) (*Storage, error) { - db, err := sql.Open("sqlite3", dbfile) - if err != nil { - return nil, err +func NewStorage(dbURL string) (*Storage, error) { + pattern := regexp.MustCompile(REGEX_DB_URL) + mu := pattern.FindStringSubmatch(dbURL) + if len(mu) == 0 { + return nil, ErrInvalidDBUrl } - s := &Storage{db: db, mutex: &sync.Mutex{}} - if err = s.init(); err != nil { - return nil, err + storage := &Storage{} + if mu[1] == "sqlite" { + storage.db = driver.NewSQLiteDB() + } else { + storage.db = driver.NewRQLiteDB() } - return s, nil -} - -func (s *Storage) init() error { - if err := s.execSQL(CREATE_VERSION_TABLE); err != nil { - return err - } else if err := s.execSQL(CREATE_HOSTS_TABLE); err != nil { - return err - } else if err := s.execSQL(CREATE_HOSTS_TABLE); err != nil { - return err - } else if err := s.execSQL(CREATE_CLUSTERS_TABLE); err != nil { - return err - } else if err := s.execSQL(CREATE_CONTAINERS_TABLE); err != nil { - return err - } else if err := s.execSQL(CREATE_CLIENTS_TABLE); err != nil { - return err - } else if err := s.execSQL(CREATE_PLAYGROUND_TABLE); err != nil { - return err - } else if err := s.execSQL(CREATE_AUDIT_TABLE); err != nil { - return err - } else if err := s.compatible(); err != nil { - return err + err := storage.db.Open(dbURL) + if err == nil { + err = storage.init() } - - return nil + return storage, err } -func (s *Storage) compatible() error { - tx, err := s.db.Begin() - if err != nil { - return err +func (s *Storage) init() error { + sqls := []string{ + CreateVersionTable, + CreateHostsTable, + CreateClustersTable, + CreateContainersTable, + CreateClientsTable, + CreatePlaygroundTable, + CreateAuditTable, } - err = func() error { - rows, err := tx.Query(CHECK_POOl_COLUMN) + for _, sql := range sqls { + _, err := s.db.Write(sql) if err != nil { return err } - - if rows.Next() { - count := 0 - err = rows.Scan(&count) - rows.Close() - if err != nil { - return err - } - if count != 0 { - return nil - } - } - - alterSQL := fmt.Sprintf("%s;%s;%s;%s", - RENAME_CLUSTERS_TABLE, - CREATE_CLUSTERS_TABLE, - INSERT_CLUSTERS_FROM_OLD_TABLE, - DROP_OLD_CLUSTERS_TABLE, - ) - _, err = tx.Exec(alterSQL) - return err - }() - - if err != nil { - tx.Rollback() - return err } - return tx.Commit() + return nil } -func (s *Storage) execSQL(query string, args ...interface{}) error { - s.mutex.Lock() - defer s.mutex.Unlock() - stmt, err := s.db.Prepare(query) - if err != nil { - return err - } - - _, err = stmt.Exec(args...) +func (s *Storage) write(query string, args ...any) error { + _, err := s.db.Write(query, args...) return err } @@ -192,24 +102,22 @@ func (s *Storage) SetVersion(version, lastConfirm string) error { if err != nil { return err } else if len(versions) == 0 { - return s.execSQL(INSERT_VERSION, version) + return s.write(InsertVersion, version) } - return s.execSQL(SET_VERSION, version, lastConfirm, versions[0].Id) + return s.write(SetVersion, version, lastConfirm, versions[0].Id) } func (s *Storage) GetVersions() ([]Version, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - rows, err := s.db.Query(SELECT_VERSION) + result, err := s.db.Query(SelectVersion) if err != nil { return nil, err } + defer result.Close() - defer rows.Close() var versions []Version var version Version - for rows.Next() { - err = rows.Scan(&version.Id, &version.Version, &version.LastConfirm) + for result.Next() { + err = result.Scan(&version.Id, &version.Version, &version.LastConfirm) versions = append(versions, version) break } @@ -222,24 +130,22 @@ func (s *Storage) SetHosts(data string) error { if err != nil { return err } else if len(hostses) == 0 { - return s.execSQL(INSERT_HOSTS, data) + return s.write(InsertHosts, data) } - return s.execSQL(SET_HOSTS, data, hostses[0].Id) + return s.write(SetHosts, data, hostses[0].Id) } func (s *Storage) GetHostses() ([]Hosts, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - rows, err := s.db.Query(SELECT_HOSTS) + result, err := s.db.Query(SelectHosts) if err != nil { return nil, err } + defer result.Close() - defer rows.Close() var hostses []Hosts var hosts Hosts - for rows.Next() { - err = rows.Scan(&hosts.Id, &hosts.Data, &hosts.LastmodifiedTime) + for result.Next() { + err = result.Scan(&hosts.Id, &hosts.Data, &hosts.LastModifiedTime) hostses = append(hostses, hosts) break } @@ -248,27 +154,33 @@ func (s *Storage) GetHostses() ([]Hosts, error) { // cluster func (s *Storage) InsertCluster(name, description, topology string) error { - return s.execSQL(INSERT_CLUSTER, name, description, topology) + return s.write(InsertCluster, name, description, topology) } func (s *Storage) DeleteCluster(name string) error { - return s.execSQL(DELETE_CLUSTER, name) + return s.write(DeleteCluster, name) } func (s *Storage) getClusters(query string, args ...interface{}) ([]Cluster, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - rows, err := s.db.Query(query, args...) + result, err := s.db.Query(query, args...) if err != nil { return nil, err } + defer result.Close() - defer rows.Close() clusters := []Cluster{} - for rows.Next() { + for result.Next() { cluster := Cluster{} - err = rows.Scan(&cluster.Id, &cluster.UUId, &cluster.Name, &cluster.Description, - &cluster.Topology, &cluster.Pool, &cluster.CreateTime, &cluster.Current) + err = result.Scan( + &cluster.Id, + &cluster.UUId, + &cluster.Name, + &cluster.Description, + &cluster.Topology, + &cluster.Pool, + &cluster.CreateTime, + &cluster.Current, + ) if err != nil { return nil, err } @@ -279,16 +191,16 @@ func (s *Storage) getClusters(query string, args ...interface{}) ([]Cluster, err } func (s *Storage) GetClusters(name string) ([]Cluster, error) { - return s.getClusters(SELECT_CLUSTER, name) + return s.getClusters(SelectCluster, name) } func (s *Storage) CheckoutCluster(name string) error { - return s.execSQL(CHECKOUT_CLUSTER, name) + return s.write(CheckoutCluster, name) } func (s *Storage) GetCurrentCluster() (Cluster, error) { cluster := Cluster{Id: -1, Name: ""} - clusters, err := s.getClusters(GET_CURRENT_CLUSTER) + clusters, err := s.getClusters(GetCurrentCluster) if err != nil { return cluster, err } else if len(clusters) == 1 { @@ -299,31 +211,29 @@ func (s *Storage) GetCurrentCluster() (Cluster, error) { } func (s *Storage) SetClusterTopology(id int, topology string) error { - return s.execSQL(SET_CLUSTER_TOPOLOGY, topology, id) + return s.write(SetClusterTopology, topology, id) } func (s *Storage) SetClusterPool(id int, topology, pool string) error { - return s.execSQL(SET_CLUSTER_POOL, topology, pool, id) + return s.write(SetClusterPool, topology, pool, id) } // service func (s *Storage) InsertService(clusterId int, serviceId, containerId string) error { - return s.execSQL(INSERT_SERVICE, serviceId, clusterId, containerId) + return s.write(InsertService, serviceId, clusterId, containerId) } func (s *Storage) getServices(query string, args ...interface{}) ([]Service, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - rows, err := s.db.Query(query, args...) + result, err := s.db.Query(query, args...) if err != nil { return nil, err } + defer result.Close() - defer rows.Close() services := []Service{} var service Service - for rows.Next() { - err = rows.Scan(&service.Id, &service.ClusterId, &service.ContainerId) + for result.Next() { + err = result.Scan(&service.Id, &service.ClusterId, &service.ContainerId) if err != nil { return nil, err } @@ -334,11 +244,11 @@ func (s *Storage) getServices(query string, args ...interface{}) ([]Service, err } func (s *Storage) GetServices(clusterId int) ([]Service, error) { - return s.getServices(SELECT_SERVICE_IN_CLUSTER, clusterId) + return s.getServices(SelectServicesInCluster, clusterId) } func (s *Storage) GetContainerId(serviceId string) (string, error) { - services, err := s.getServices(SELECT_SERVICE, serviceId) + services, err := s.getServices(SelectService, serviceId) if err != nil || len(services) == 0 { return "", err } @@ -347,27 +257,25 @@ func (s *Storage) GetContainerId(serviceId string) (string, error) { } func (s *Storage) SetContainId(serviceId, containerId string) error { - return s.execSQL(SET_CONTAINER_ID, containerId, serviceId) + return s.write(SetContainerId, containerId, serviceId) } // client func (s *Storage) InsertClient(id, kind, host, containerId, auxInfo string) error { - return s.execSQL(INSERT_CLIENT, id, kind, host, containerId, auxInfo) + return s.write(InsertClient, id, kind, host, containerId, auxInfo) } func (s *Storage) getClients(query string, args ...interface{}) ([]Client, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - rows, err := s.db.Query(query, args...) + result, err := s.db.Query(query, args...) if err != nil { return nil, err } + defer result.Close() - defer rows.Close() clients := []Client{} var client Client - for rows.Next() { - err = rows.Scan(&client.Id, &client.Kind, &client.Host, &client.ContainerId, &client.AuxInfo) + for result.Next() { + err = result.Scan(&client.Id, &client.Kind, &client.Host, &client.ContainerId, &client.AuxInfo) if err != nil { return nil, err } @@ -378,7 +286,7 @@ func (s *Storage) getClients(query string, args ...interface{}) ([]Client, error } func (s *Storage) GetClientContainerId(id string) (string, error) { - clients, err := s.getClients(SELECT_CLIENT_BY_ID, id) + clients, err := s.getClients(SelectClientById, id) if err != nil || len(clients) == 0 { return "", err } @@ -387,44 +295,42 @@ func (s *Storage) GetClientContainerId(id string) (string, error) { } func (s *Storage) GetClient(id string) ([]Client, error) { - return s.getClients(SELECT_CLIENT_BY_ID, id) + return s.getClients(SelectClientById, id) } func (s *Storage) GetClients() ([]Client, error) { - return s.getClients(SELECT_CLIENTS) + return s.getClients(SelectClients) } func (s *Storage) DeleteClient(id string) error { - return s.execSQL(DELETE_CLIENT, id) + return s.write(DeleteClient, id) } // playground func (s *Storage) InsertPlayground(name, mountPoint string) error { // FIXME: remove status - return s.execSQL(INSERT_PLAYGROUND, name, mountPoint, "") + return s.write(InsertPlayground, name, mountPoint, "") } func (s *Storage) SetPlaygroundStatus(name, status string) error { - return s.execSQL(SET_PLAYGROUND_STATUS, status, name) + return s.write(SetPlaygroundStatus, status, name) } func (s *Storage) DeletePlayground(name string) error { - return s.execSQL(DELETE_PLAYGROUND, name) + return s.write(DeletePlayground, name) } func (s *Storage) getPlaygrounds(query string, args ...interface{}) ([]Playground, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - rows, err := s.db.Query(query, args...) + result, err := s.db.Query(query, args...) if err != nil { return nil, err } + defer result.Close() - defer rows.Close() playgrounds := []Playground{} var playground Playground - for rows.Next() { - err = rows.Scan( + for result.Next() { + err = result.Scan( &playground.Id, &playground.Name, &playground.CreateTime, @@ -440,47 +346,38 @@ func (s *Storage) getPlaygrounds(query string, args ...interface{}) ([]Playgroun } func (s *Storage) GetPlaygrounds(name string) ([]Playground, error) { - return s.getPlaygrounds(SELECT_PLAYGROUND, name) + return s.getPlaygrounds(SelectPlayground, name) } func (s *Storage) GetPlaygroundById(id string) ([]Playground, error) { - return s.getPlaygrounds(SELECT_PLAYGROUND_BY_ID, id) + return s.getPlaygrounds(SelectPlaygroundById, id) } // audit func (s *Storage) InsertAuditLog(time time.Time, workDir, command string, status int) (int64, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - stmt, err := s.db.Prepare(INSERT_AUDIT_LOG) - if err != nil { - return -1, err - } - - result, err := stmt.Exec(time, workDir, command, status) + result, err := s.db.Write(InsertAuditLog, time, workDir, command, status) if err != nil { - return -1, err + return 0, err } return result.LastInsertId() } func (s *Storage) SetAuditLogStatus(id int64, status, errorCode int) error { - return s.execSQL(SET_AUDIT_LOG_STATUS, status, errorCode, id) + return s.write(SetAuditLogStatus, status, errorCode, id) } func (s *Storage) getAuditLogs(query string, args ...interface{}) ([]AuditLog, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - rows, err := s.db.Query(query, args...) + result, err := s.db.Query(query, args...) if err != nil { return nil, err } + defer result.Close() - defer rows.Close() auditLogs := []AuditLog{} var auditLog AuditLog - for rows.Next() { - err = rows.Scan(&auditLog.Id, + for result.Next() { + err = result.Scan(&auditLog.Id, &auditLog.ExecuteTime, &auditLog.WorkDirectory, &auditLog.Command, @@ -496,9 +393,9 @@ func (s *Storage) getAuditLogs(query string, args ...interface{}) ([]AuditLog, e } func (s *Storage) GetAuditLogs() ([]AuditLog, error) { - return s.getAuditLogs(SELECT_AUDIT_LOG) + return s.getAuditLogs(SelectAuditLog) } func (s *Storage) GetAuditLog(id int64) ([]AuditLog, error) { - return s.getAuditLogs(SELECT_AUDIT_LOG_BY_ID, id) + return s.getAuditLogs(SelectAuditLogById, id) } diff --git a/internal/task/task/common/collect_curveadm.go b/internal/task/task/common/collect_curveadm.go index 1cd3cb337..e093ac9df 100644 --- a/internal/task/task/common/collect_curveadm.go +++ b/internal/task/task/common/collect_curveadm.go @@ -43,6 +43,7 @@ func NewCollectCurveAdmTask(curveadm *cli.CurveAdm, dc *topology.DeployConfig) ( t := task.NewTask("Collect CurveAdm", subname, nil) // add step to task + dbPath := curveadm.Config().GetDBPath() secret := curveadm.MemStorage().Get(comm.KEY_SECRET).(string) urlFormat := curveadm.MemStorage().Get(comm.KEY_SUPPORT_UPLOAD_URL_FORMAT).(string) baseDir := TEMP_DIR @@ -59,11 +60,13 @@ func NewCollectCurveAdmTask(curveadm *cli.CurveAdm, dc *topology.DeployConfig) ( Paths: []string{localPath /*, hostLogDir, hostConfDir*/}, ExecOptions: options, }) - t.AddStep(&step.CopyFile{ - Source: curveadm.DBPath(), - Dest: localPath, - ExecOptions: options, - }) + if len(dbPath) > 0 { // only copy local database (like sqlite) + t.AddStep(&step.CopyFile{ + Source: dbPath, + Dest: localPath, + ExecOptions: options, + }) + } t.AddStep(&step.Tar{ File: localPath, Archive: localTarballPath, diff --git a/scripts/install.sh b/scripts/install.sh index c22e34b6d..1a7b2e7d3 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -1,60 +1,58 @@ #!/usr/bin/env bash ############################ GLOBAL VARIABLES -g_color_yellow=`printf '\033[33m'` -g_color_red=`printf '\033[31m'` -g_color_normal=`printf '\033[0m'` -g_curveadm_home="$HOME/.curveadm" -g_bin_dir="$g_curveadm_home/bin" +g_color_yellow=$(printf '\033[33m') +g_color_red=$(printf '\033[31m') +g_color_normal=$(printf '\033[0m') +g_curveadm_home="${HOME}/.curveadm" +g_bin_dir="${g_curveadm_home}/bin" +g_db_path="${g_curveadm_home}/data/curveadm.db" g_profile="${HOME}/.profile" g_root_url="https://curveadm.nos-eastchina1.126.net/release" g_latest_url="${g_root_url}/__version" g_latest_version=$(curl -Is $g_latest_url | awk 'BEGIN {FS=": "}; /^x-nos-meta-curveadm-latest-version/{print $2}') g_latest_version=${g_latest_version//[$'\t\r\n ']} -g_upgrade="$CURVEADM_UPGRADE" +g_upgrade="${CURVEADM_UPGRADE}" g_version="${CURVEADM_VERSION:=$g_latest_version}" g_download_url="${g_root_url}/curveadm-${g_version}.tar.gz" -g_plugin="$CURVEADM_PLUGIN" -g_plugin_dir="$g_curveadm_home/plugins/$g_plugin" -g_plugin_url="https://curveadm.nos-eastchina1.126.net/plugins/${g_plugin}-$(uname -m).tar.gz" ############################ BASIC FUNCTIONS msg() { - printf '%b' "$1" >&2 + printf '%b' "${1}" >&2 } success() { - msg "$g_color_yellow[✔]$g_color_normal ${1}${2}" + msg "${g_color_yellow}[✔]${g_color_normal} ${1}${2}" } die() { - msg "$g_color_red[✘]$g_color_normal ${1}${2}" + msg "${g_color_red}[✘]${g_color_normal} ${1}${2}" exit 1 } program_must_exist() { local ret='0' - command -v $1 >/dev/null 2>&1 || { local ret='1'; } + command -v "${1}" >/dev/null 2>&1 || { local ret='1'; } - if [ "$ret" -ne 0 ]; then + if [ "${ret}" -ne 0 ]; then die "You must have '$1' installed to continue.\n" fi } ############################ FUNCTIONS backup() { - if [ -d "$g_curveadm_home" ]; then - mv $g_curveadm_home "${g_curveadm_home}-$(date +%s).backup" + if [ -d "${g_curveadm_home}" ]; then + mv "${g_curveadm_home}" "${g_curveadm_home}-$(date +%s).backup" fi } setup() { - mkdir -p $g_curveadm_home/{bin,data,plugins,logs,temp} + mkdir -p "${g_curveadm_home}"/{bin,data,module,logs,temp} # generate config file - local confpath="$g_curveadm_home/curveadm.cfg" - if [ ! -f $confpath ]; then - cat << __EOF__ > $confpath + local confpath="${g_curveadm_home}/curveadm.cfg" + if [ ! -f "${confpath}" ]; then + cat << __EOF__ > "${confpath}" [defaults] log_level = error sudo_alias = "sudo" @@ -64,6 +62,9 @@ auto_upgrade = true [ssh_connections] retries = 3 timeout = 10 + +[database] +url = "${g_db_path}" __EOF__ fi } @@ -71,40 +72,22 @@ __EOF__ install_binray() { local ret=1 local tempfile="/tmp/curveadm-$(date +%s%6N).tar.gz" - curl $g_download_url -skLo $tempfile + curl "${g_download_url}" -skLo "${tempfile}" if [ $? -eq 0 ]; then - tar -zxvf $tempfile -C $g_curveadm_home --strip-components=1 1>/dev/null + tar -zxvf "${tempfile}" -C "${g_curveadm_home}" --strip-components=1 1>/dev/null ret=$? fi - rm $tempfile - if [ $ret -eq 0 ]; then - chmod 755 "$g_bin_dir/curveadm" + rm "${tempfile}" + if [ ${ret} -eq 0 ]; then + chmod 755 "${g_bin_dir}/curveadm" else die "Download curveadm failed\n" fi } -install_plugin() { - local ret=1 - mkdir -p $g_plugin_dir - local tempfile="/tmp/curveadm-plugin-$g_plugin-$(date +%s%6N).tar.gz" - curl $g_plugin_url -sLo $tempfile - if [ $? -eq 0 ]; then - tar -zxvf $tempfile -C $g_plugin_dir --strip-components=1 1>/dev/null - ret=$? - fi - - rm $tempfile - if [ $ret -eq 0 ]; then - success "Plugin '$g_plugin' installed\n" - else - die "Download plugin '$g_plugin' failed\n" - fi -} - set_profile() { - shell=`echo $SHELL | awk 'BEGIN {FS="/";} { print $NF }'` + shell=$(echo "$SHELL" | awk 'BEGIN {FS="/";} { print $NF }') if [ -f "${HOME}/.${shell}_profile" ]; then g_profile="${HOME}/.${shell}_profile" elif [ -f "${HOME}/.${shell}_login" ]; then @@ -113,21 +96,21 @@ set_profile() { g_profile="${HOME}/.${shell}rc" fi - case :$PATH: in - *:$g_bin_dir:*) ;; - *) echo "export PATH=$g_bin_dir:\$PATH" >> $g_profile ;; + case :${PATH}: in + *:${g_bin_dir}:*) ;; + *) echo "export PATH=${g_bin_dir}:\${PATH}" >> "${g_profile}" ;; esac } print_install_success() { - success "Install curveadm $g_version success, please run 'source $g_profile'\n" + success "Install curveadm ${g_version} success, please run 'source ${g_profile}'\n" } print_upgrade_success() { - if [ -f "$g_curveadm_home/CHANGELOG" ]; then - cat "$g_curveadm_home/CHANGELOG" + if [ -f "${g_curveadm_home}/CHANGELOG" ]; then + cat "${g_curveadm_home}/CHANGELOG" fi - success "Upgrade curveadm to $g_version success\n" + success "Upgrade curveadm to ${g_version} success\n" } install() { @@ -144,9 +127,7 @@ upgrade() { } main() { - if [ ! -z $g_plugin ]; then - install_plugin - elif [ "$g_upgrade" == "true" ]; then + if [ "${g_upgrade}" == "true" ]; then upgrade else install