diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a1053df6..27ecfcec 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -52,6 +52,9 @@ jobs: with: fetch-depth: 0 + - name: Free disk + run: tests/scripts/actionutils.sh free_runner_disk + - name: Install and setup run: | tests/scripts/actionutils.sh install_microceph @@ -91,23 +94,34 @@ jobs: set -eux # Show ceph's status sudo microceph.ceph status - + # Ceph status expectations for a single node cluster + test_single() { + local status="$1" + ( echo "$status" | grep -qF "mon: 1 daemons" ) || { echo fail ; return ; } + ( echo "$status" | grep -qE "mgr: .*active, " )|| { echo fail ; return ; } + ( echo "$status" | grep -qF "osd: 3 osds" ) || { echo fail ; return ; } + ( echo "$status" | grep -qF "rgw: 1 daemon" ) || { echo fail ; return ; } + echo ok + } # Confirm ceph is healthy and services started - sudo microceph.ceph status | grep -F "mon: 1 daemons" - sudo microceph.ceph status | grep -E "mgr: .*active, " - sudo microceph.ceph status | grep -F "osd: 3 osds" - sudo microceph.ceph status | grep -F "rgw: 1 daemon" - + res=$( test_single "$( sudo microceph.ceph status )" ) + [ $res = "ok" ] || { echo "single node status fails" ; exit 1 ; } # Check health after restart sudo snap stop microceph sudo snap start microceph - sleep 2m - - sudo microceph.ceph status - sudo microceph.ceph status | grep -F "mon: 1 daemons" - sudo microceph.ceph status | grep -E "mgr: .*active, " - sudo microceph.ceph status | grep -F "osd: 3 osds" - sudo microceph.ceph status | grep -F "rgw: 1 daemon" + for i in $(seq 1 16); do + status=$( sudo microceph.ceph status ) + echo "$status" + res=$( test_single "$status" ) + if [ $res = "ok" ] ; then + echo "Single tests pass" + break + else + echo "Single tests fail, retry $i/16" + sleep 15 + fi + done + sleep 1 pgrep ceph-osd || { echo "No ceph-osd process found" ; exit 1; } - name: Exercise RGW @@ -201,7 +215,7 @@ jobs: run: | set -uex # We still have failure domain OSD - lxc exec node-head -- sh -c "sudo microceph.ceph osd crush rule ls" | grep -F microceph_auto_osd + lxc exec node-head -- sh -c "sudo microceph.ceph config get mon osd_pool_default_crush_rule" | fgrep -x 1 # Add a 3rd OSD, should switch to host failure domain tests/scripts/actionutils.sh add_osd_to_node node-head for i in $(seq 1 8); do @@ -214,23 +228,40 @@ jobs: sleep 2 fi done - # Expect exactly one rule with host failure dom - rules=$( lxc exec node-head -- sh -c "sudo microceph.ceph osd crush rule ls" ) - echo $rules - echo $rules | grep -F microceph_auto_host - num=$( echo $rules | wc -l) - if [ $num != '1' ] ; then echo "Expect exactly one rule" ; exit 1 ; fi - + sleep 1 + lxc exec node-head -- sh -c "sudo microceph.ceph -s" + # Now default to host rule + lxc exec node-head -- sh -c "sudo microceph.ceph config get mon osd_pool_default_crush_rule" | fgrep -x 2 - name: Test 3 osds present run: | set -uex lxc exec node-head -- sh -c "microceph.ceph -s" | egrep "osd: 3 osds: 3 up.*3 in" - - name: Test osd host rule + - name: Test crush rules run: | set -uex lxc exec node-head -- sh -c "microceph.ceph osd crush rule ls" | grep -F microceph_auto_host - lxc exec node-head -- sh -c "microceph.ceph osd pool ls detail" | grep -F "crush_rule 1" + lxc exec node-head -- sh -c "microceph.ceph osd pool ls detail" | grep -F "crush_rule 2" + + - name: Add another OSD + run: | + tests/scripts/actionutils.sh add_osd_to_node node-wrk3 + for i in $(seq 1 8); do + res=$( ( lxc exec node-head -- sh -c 'sudo microceph.ceph -s | grep -F osd: | sed -E "s/.* ([[:digit:]]*) in .*/\1/"' ) || true ) + if [[ $res -gt 3 ]] ; then + echo "Found >3 OSDs" + break + else + echo -n '.' + sleep 2 + fi + done + + - name: Remove OSD again + run: | + set -uex + lxc exec node-wrk3 -- sh -c "microceph disk remove 3" + lxc exec node-head -- sh -c "microceph.ceph -s" | egrep "osd: 3 osds: 3 up.*3 in" - name: Test migrate services run: | @@ -239,4 +270,3 @@ jobs: sleep 2 lxc exec node-head -- sh -c "microceph status" | grep -F -A 1 node-wrk1 | grep -E "^ Services: osd$" lxc exec node-head -- sh -c "microceph status" | grep -F -A 1 node-wrk3 | grep -E "^ Services: mds, mgr, mon$" - diff --git a/microceph/api/disks.go b/microceph/api/disks.go index bb214cf6..991fc6de 100644 --- a/microceph/api/disks.go +++ b/microceph/api/disks.go @@ -2,7 +2,12 @@ package api import ( "encoding/json" + "fmt" + "github.com/canonical/microceph/microceph/common" + "github.com/gorilla/mux" "net/http" + "net/url" + "strconv" "github.com/canonical/lxd/lxd/response" "github.com/canonical/microcluster/rest" @@ -20,6 +25,13 @@ var disksCmd = rest.Endpoint{ Post: rest.EndpointAction{Handler: cmdDisksPost, ProxyTarget: true}, } +// /1.0/disks/{osdid} endpoint. +var disksDelCmd = rest.Endpoint{ + Path: "disks/{osdid}", + + Delete: rest.EndpointAction{Handler: cmdDisksDelete, ProxyTarget: true}, +} + func cmdDisksGet(s *state.State, r *http.Request) response.Response { disks, err := ceph.ListOSD(s) if err != nil { @@ -44,3 +56,45 @@ func cmdDisksPost(s *state.State, r *http.Request) response.Response { return response.EmptySyncResponse } + +// cmdDisksDelete is the handler for DELETE /1.0/disks/{osdid}. +func cmdDisksDelete(s *state.State, r *http.Request) response.Response { + var osd string + osd, err := url.PathUnescape(mux.Vars(r)["osdid"]) + if err != nil { + return response.BadRequest(err) + } + + var req types.DisksDelete + osdid, err := strconv.ParseInt(osd, 10, 64) + + if err != nil { + return response.BadRequest(err) + } + err = json.NewDecoder(r.Body).Decode(&req) + if err != nil { + return response.BadRequest(err) + } + + cs := common.CephState{State: s} + needDowngrade, err := ceph.IsDowngradeNeeded(cs, osdid) + if err != nil { + return response.InternalError(err) + } + if needDowngrade && !req.ConfirmDowngrade { + errorMsg := fmt.Errorf( + "Removing osd.%s would require a downgrade of the automatic crush rule from 'host' to 'osd' level. "+ + "Likely this will result in additional data movement. Please confirm by setting the "+ + "'--confirm-failure-domain-downgrade' flag to true", + osd, + ) + return response.BadRequest(errorMsg) + } + + err = ceph.RemoveOSD(cs, osdid, req.BypassSafety, req.Timeout) + if err != nil { + return response.SmartError(err) + } + + return response.EmptySyncResponse +} diff --git a/microceph/api/endpoints.go b/microceph/api/endpoints.go index cf83185f..7af08549 100644 --- a/microceph/api/endpoints.go +++ b/microceph/api/endpoints.go @@ -8,6 +8,7 @@ import ( // Endpoints is a global list of all API endpoints on the /1.0 endpoint of microceph. var Endpoints = []rest.Endpoint{ disksCmd, + disksDelCmd, resourcesCmd, servicesCmd, rgwServiceCmd, diff --git a/microceph/api/types/disks.go b/microceph/api/types/disks.go index d5b28c8b..c9afbdce 100644 --- a/microceph/api/types/disks.go +++ b/microceph/api/types/disks.go @@ -8,6 +8,14 @@ type DisksPost struct { Encrypt bool `json:"encrypt" yaml:"encrypt"` } +// DisksDelete holds an OSD number and a flag for forcing the removal +type DisksDelete struct { + OSD int64 `json:"osdid" yaml:"osdid"` + BypassSafety bool `json:"bypass_safety" yaml:"bypass_safety"` + ConfirmDowngrade bool `json:"confirm_downgrade" yaml:"confirm_downgrade"` + Timeout int64 `json:"timeout" yaml:"timeout"` +} + // Disks is a slice of disks type Disks []Disk diff --git a/microceph/ceph/bootstrap.go b/microceph/ceph/bootstrap.go index 420caf8d..b85ac68f 100644 --- a/microceph/ceph/bootstrap.go +++ b/microceph/ceph/bootstrap.go @@ -90,11 +90,16 @@ func Bootstrap(s common.StateInterface) error { return err } - // ensure crush rules + // setup up crush rules err = ensureCrushRules() if err != nil { return err } + // configure the default crush rule for new pools + err = setDefaultCrushRule("microceph_auto_osd") + if err != nil { + return err + } // Re-generate the configuration from the database. err = updateConfig(s) @@ -264,23 +269,3 @@ func initMds(s common.StateInterface, dataPath string) error { return nil } - -// ensureCrushRules removes the default replicated rule and adds a microceph default rule with failure domain OSD -func ensureCrushRules() error { - // Remove the default replicated rule it it exists. - if haveCrushRule("replicated_rule") { - err := removeCrushRule("replicated_rule") - if err != nil { - return fmt.Errorf("Failed to remove default replicated rule: %w", err) - } - } - // Add a microceph default rule with failure domain OSD if it does not exist. - if haveCrushRule("microceph_auto_rule") { - return nil - } - err := addCrushRule("microceph_auto_osd", "osd") - if err != nil { - return fmt.Errorf("Failed to add microceph default rule: %w", err) - } - return nil -} diff --git a/microceph/ceph/config.go b/microceph/ceph/config.go index 5a19c796..adf38a4d 100644 --- a/microceph/ceph/config.go +++ b/microceph/ceph/config.go @@ -42,8 +42,9 @@ func (c ConfigTable) Keys() (keys []string) { // so that each request for the map guarantees consistent definition. func GetConstConfigTable() ConfigTable { return ConfigTable{ - "public_network": {"global", []string{"mon", "osd"}}, - "cluster_network": {"global", []string{"osd"}}, + "public_network": {"global", []string{"mon", "osd"}}, + "cluster_network": {"global", []string{"osd"}}, + "osd_pool_default_crush_rule": {"global", []string{}}, } } diff --git a/microceph/ceph/crush.go b/microceph/ceph/crush.go index 98504607..55a0531f 100644 --- a/microceph/ceph/crush.go +++ b/microceph/ceph/crush.go @@ -3,21 +3,12 @@ package ceph import ( "encoding/json" "fmt" + "github.com/canonical/microceph/microceph/api/types" "strings" "github.com/tidwall/gjson" ) -// removeCrushRule removes a named crush rule -func removeCrushRule(name string) error { - _, err := processExec.RunCommand("ceph", "osd", "crush", "rule", "rm", name) - if err != nil { - return err - } - - return nil -} - // addCrushRule creates a new default crush rule with a given name and failure domain func addCrushRule(name string, failureDomain string) error { _, err := processExec.RunCommand("ceph", "osd", "crush", "rule", "create-replicated", name, "default", failureDomain) @@ -60,6 +51,9 @@ func getCrushRuleID(name string) (string, error) { } var jsond map[string]any err = json.Unmarshal([]byte(output), &jsond) + if err != nil { + return "", err + } val, ok := jsond["rule_id"] if !ok { return "", fmt.Errorf("rule_id not found in crush rule dump") @@ -101,3 +95,49 @@ func setPoolCrushRule(pool string, rule string) error { } return nil } + +// setDefaultCrushRule sets the default crush rule for new pools +func setDefaultCrushRule(rule string) error { + rid, err := getCrushRuleID(rule) + if err != nil { + return err + } + err = SetConfigItem(types.Config{ + Key: "osd_pool_default_crush_rule", + Value: rid, + }) + if err != nil { + return err + } + return nil +} + +// getDefaultCrushRule returns the current default crush rule for new pools +func getDefaultCrushRule() (string, error) { + configs, err := GetConfigItem(types.Config{ + Key: "osd_pool_default_crush_rule", + }) + if err != nil { + return "", err + } + return strings.TrimSpace(configs[0].Value), nil +} + +// ensureCrushRules set up the crush rules for the automatic failure domain handling. +func ensureCrushRules() error { + // Add a microceph default rule with failure domain OSD if it does not exist. + if !haveCrushRule("microceph_auto_osd") { + err := addCrushRule("microceph_auto_osd", "osd") + if err != nil { + return fmt.Errorf("Failed to add microceph default crush rule: %w", err) + } + } + // Add a microceph default rule with failure domain host if it does not exist. + if !haveCrushRule("microceph_auto_host") { + err := addCrushRule("microceph_auto_host", "host") + if err != nil { + return fmt.Errorf("Failed to add microceph default crush rule: %w", err) + } + } + return nil +} diff --git a/microceph/ceph/osd.go b/microceph/ceph/osd.go index a5e0d6c8..7cadd6c4 100644 --- a/microceph/ceph/osd.go +++ b/microceph/ceph/osd.go @@ -5,12 +5,19 @@ import ( "crypto/rand" "database/sql" "encoding/base64" + "encoding/json" + "errors" "fmt" + "github.com/canonical/lxd/shared/logger" + "github.com/canonical/microceph/microceph/common" + "math" "os" "os/exec" "path/filepath" "strconv" "strings" + "syscall" + "time" "github.com/canonical/lxd/lxd/resources" "github.com/canonical/lxd/lxd/revert" @@ -212,24 +219,25 @@ func checkEncryptSupport() error { return nil } -// setHostFailureDomain sets the host failure domain for the given host. -func setHostFailureDomain() error { +// switchFailureDomain switches the crush rules failure domain from old to new +func switchFailureDomain(old string, new string) error { var err error - if haveCrushRule("microceph_auto_host") { - // Already setup up, nothing to do. - return nil - } - err = addCrushRule("microceph_auto_host", "host") + newRule := fmt.Sprintf("microceph_auto_%s", new) + logger.Debugf("Setting default crush rule to %v", newRule) + err = setDefaultCrushRule(newRule) if err != nil { return err } - osdPools, err := getPoolsForDomain("osd") + + osdPools, err := getPoolsForDomain(old) + logger.Debugf("Found pools %v for domain %v", osdPools, old) if err != nil { return err } for _, pool := range osdPools { - err = setPoolCrushRule(pool, "microceph_auto_host") + logger.Debugf("Setting pool %v crush rule to %v", pool, newRule) + err = setPoolCrushRule(pool, newRule) if err != nil { return err } @@ -247,16 +255,10 @@ func updateFailureDomain(s *state.State) error { } if numNodes >= 3 { - err = setHostFailureDomain() + err = switchFailureDomain("osd", "host") if err != nil { return fmt.Errorf("Failed to set host failure domain: %w", err) } - if haveCrushRule("microceph_auto_osd") { - err := removeCrushRule("microceph_auto_osd") - if err != nil { - return fmt.Errorf("Failed to remove microceph_auto_osd rule: %w", err) - } - } } return nil } @@ -334,8 +336,7 @@ func AddOSD(s *state.State, path string, wipe bool, encrypt bool) error { // Wipe the block device if requested. if wipe { - // FIXME: Do a Go implementation. - _, err := processExec.RunCommand("dd", "if=/dev/zero", fmt.Sprintf("of=%s", path), "bs=4M", "count=10", "status=none") + err = timeoutWipe(path) if err != nil { return fmt.Errorf("Failed to wipe the device: %w", err) } @@ -438,28 +439,407 @@ func AddOSD(s *state.State, path string, wipe bool, encrypt bool) error { // ListOSD lists current OSD disks func ListOSD(s *state.State) (types.Disks, error) { - disks := types.Disks{} + return database.OSDQuery.List(s) +} + +// RemoveOSD removes an OSD disk +func RemoveOSD(s common.StateInterface, osd int64, bypassSafety bool, timeout int64) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout)) + defer cancel() + err := doRemoveOSD(ctx, s, osd, bypassSafety) + if err != nil { + // Checking if the error is a context deadline exceeded error + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("timeout (%ds) reached while removing osd.%d, abort", timeout, osd) + } + return err + } + return nil + +} + +// sanityCheck checks if input is valid +func sanityCheck(s common.StateInterface, osd int64) error { + // check osd is positive + if osd < 0 { + return fmt.Errorf("OSD must be a positive integer") + } + + // check if the OSD exists in the database + exists, err := database.OSDQuery.HaveOSD(s.ClusterState(), osd) + if err != nil { + return err + } + if !exists { + return fmt.Errorf("ods.%d not found", osd) + } + return nil +} + +// IsDowngradeNeeded checks if we need to downgrade the failure domain from 'host' to 'osd' level +// if we remove the given OSD +func IsDowngradeNeeded(s common.StateInterface, osd int64) (bool, error) { + currentRule, err := getDefaultCrushRule() + if err != nil { + return false, err + } + hostRule, err := getCrushRuleID("microceph_auto_host") + if err != nil { + return false, err + } + if currentRule != hostRule { + // either we're at 'osd' level or we're using a custom rule + // in both cases we won't downgrade + logger.Infof("No need to downgrade auto failure domain, current rule is %v", currentRule) + return false, nil + } + numNodes, err := database.MemberCounter.CountExclude(s.ClusterState(), osd) + logger.Infof("Number of nodes excluding osd.%v: %v", osd, numNodes) + if err != nil { + return false, err + } + if numNodes < 3 { // need to scale down + return true, nil + } + return false, nil +} + +// scaleDownFailureDomain scales down the failure domain from 'host' to 'osd' level +func scaleDownFailureDomain(s common.StateInterface, osd int64) error { + needDowngrade, err := IsDowngradeNeeded(s, osd) + logger.Debugf("Downgrade needed: %v", needDowngrade) + if err != nil { + return err + } + if !needDowngrade { + return nil + } + err = switchFailureDomain("host", "osd") + if err != nil { + return fmt.Errorf("Failed to switch failure domain: %w", err) + } + return nil +} + +// reweightOSD reweights the given OSD to the given weight +func reweightOSD(ctx context.Context, osd int64, weight float64) { + logger.Debugf("Reweighting osd.%d to %f", osd, weight) + _, err := processExec.RunCommand( + "ceph", "osd", "crush", "reweight", + fmt.Sprintf("osd.%d", osd), + fmt.Sprintf("%f", weight), + ) + if err != nil { + // only log a warn, don't treat fail to reweight as a fatal error + logger.Warnf("Failed to reweight osd.%d: %v", osd, err) + } +} + +func doPurge(osd int64) error { + // run ceph osd purge command + _, err := processExec.RunCommand( + "ceph", "osd", "purge", fmt.Sprintf("osd.%d", osd), + "--yes-i-really-mean-it", + ) + return err +} + +func purgeOSD(osd int64) error { + var err error + retries := 10 + var backoff time.Duration + + for i := 0; i < retries; i++ { + err = doPurge(osd) + if err == nil { + // Success: break the retry loop + break + } + // we're getting a RunError from processExec.RunCommand, and it + // wraps the original exit error if there's one + exitError, ok := err.(shared.RunError).Unwrap().(*exec.ExitError) + if !ok { + // not an exit error, abort and bubble up the error + logger.Warnf("Purge failed with non-exit error: %v", err) + break + } + if syscall.Errno(exitError.ExitCode()) != syscall.EBUSY { + // not a busy error, abort and bubble up the error + logger.Warnf("Purge failed with unexpected exit error: %v", exitError) + break + } + // purge failed with EBUSY - retry after a delay, and make delay exponential + logger.Infof("Purge failed %v, retrying in %v", err, backoff) + backoff = time.Duration(math.Pow(2, float64(i))) * time.Millisecond * 100 + time.Sleep(backoff) + } + + if err != nil { + logger.Errorf("Failed to purge osd.%d: %v", osd, err) + return fmt.Errorf("Failed to purge osd.%d: %w", osd, err) + } + logger.Infof("osd.%d purged", osd) + return nil +} - // Get the OSDs from the database. - err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { - records, err := database.GetDisks(ctx, tx) +func wipeDevice(s common.StateInterface, osd int64) { + var err error + // get the device path + path, _ := database.OSDQuery.Path(s.ClusterState(), osd) + // wipe the device, retry with exponential backoff + retries := 8 + var backoff time.Duration + for i := 0; i < retries; i++ { + err = timeoutWipe(path) + if err == nil { + // Success: break the retry loop + break + } + // wipe failed - retry after a delay, and make delay exponential + logger.Infof("Wipe failed %v, retrying in %v", err, backoff) + backoff = time.Duration(math.Pow(2, float64(i))) * time.Millisecond * 100 + time.Sleep(backoff) + } + if err != nil { + // log a warning, but don't treat wipe failure as a fatal error + // e.g. if the device is broken, we still want to remove it from the cluster + logger.Warnf("Fault during device wipe: %v", err) + } +} + +// timeoutWipe wipes the given device with a timeout, in order not to hang on broken disks +func timeoutWipe(path string) error { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + _, err := processExec.RunCommandContext( + ctx, + "dd", "if=/dev/zero", + fmt.Sprintf("of=%s", path), + "bs=4M", "count=10", "status=none", + ) + return err +} + +func doRemoveOSD(ctx context.Context, s common.StateInterface, osd int64, bypassSafety bool) error { + var err error + + // general sanity + err = sanityCheck(s, osd) + if err != nil { + return err + } + + if !bypassSafety { + // check: at least 3 OSDs + err = checkMinOSDs(s, osd) if err != nil { - return fmt.Errorf("Failed to fetch disks: %w", err) + return err + } + } + + err = scaleDownFailureDomain(s, osd) + if err != nil { + return err + } + + // check if the osd is still in the cluster -- if we're being re-run, it might not be + isPresent, err := haveOSDInCeph(osd) + if err != nil { + return fmt.Errorf("Failed to check if osd.%d is present in Ceph: %w", osd, err) + } + // reweight/drain data + if isPresent { + reweightOSD(ctx, osd, 0) + } + // perform safety check for stopping + if isPresent && !bypassSafety { + err = safetyCheckStop(osd) + if err != nil { + return err + } + } + // take the OSD out and down + if isPresent { + err = outDownOSD(osd) + if err != nil { + return err + } + } + // stop the OSD service + if isPresent { + err = killOSD(osd) + } + if err != nil { + return err + } + // perform safety check for destroying + if isPresent && !bypassSafety { + err = safetyCheckDestroy(osd) + if err != nil { + return err + } + } + // purge the OSD + if isPresent { + err = purgeOSD(osd) + if err != nil { + return err + } + } + // Wipe the underlying blocking device + wipeDevice(s, osd) + // Remove osd config + err = removeOSDConfig(osd) + if err != nil { + return err + } + // Remove db entry + err = database.OSDQuery.Delete(s.ClusterState(), osd) + if err != nil { + logger.Errorf("Failed to remove osd.%d from database: %v", osd, err) + return fmt.Errorf("Failed to remove osd.%d from database: %w", osd, err) + } + return nil +} + +func checkMinOSDs(s common.StateInterface, osd int64) error { + // check if we have at least 3 OSDs post-removal + disks, err := database.OSDQuery.List(s.ClusterState()) + if err != nil { + return err + } + if len(disks) <= 3 { + return fmt.Errorf("Cannot remove osd.%d we need at least 3 OSDs, have %d", osd, len(disks)) + } + return nil +} + +func outDownOSD(osd int64) error { + _, err := processExec.RunCommand("ceph", "osd", "out", fmt.Sprintf("osd.%d", osd)) + if err != nil { + logger.Errorf("Failed to take osd.%d out: %v", osd, err) + return fmt.Errorf("Failed to take osd.%d out: %w", osd, err) + } + _, err = processExec.RunCommand("ceph", "osd", "down", fmt.Sprintf("osd.%d", osd)) + if err != nil { + logger.Errorf("Failed to take osd.%d down: %v", osd, err) + return fmt.Errorf("Failed to take osd.%d down: %w", osd, err) + } + return nil +} + +func safetyCheckStop(osd int64) error { + var safeStop bool + + retries := 12 + var backoff time.Duration + + for i := 0; i < retries; i++ { + safeStop = testSafeStop(osd) + if safeStop { + // Success: break the retry loop + break } + backoff = time.Duration(math.Pow(2, float64(i))) * time.Millisecond * 100 + logger.Infof("osd.%d not ok to stop, retrying in %v", osd, backoff) + time.Sleep(backoff) + } + if !safeStop { + logger.Errorf("osd.%d failed to reach ok-to-stop", osd) + return fmt.Errorf("osd.%d failed to reach ok-to-stop", osd) + } + logger.Infof("osd.%d ok to stop", osd) + return nil +} + +func safetyCheckDestroy(osd int64) error { + var safeDestroy bool - for _, disk := range records { - disks = append(disks, types.Disk{ - Location: disk.Member, - OSD: int64(disk.OSD), - Path: disk.Path, - }) + retries := 12 + var backoff time.Duration + + for i := 0; i < retries; i++ { + safeDestroy = testSafeDestroy(osd) + if safeDestroy { + // Success: break the retry loop + break } + backoff = time.Duration(math.Pow(2, float64(i))) * time.Millisecond * 100 + logger.Infof("osd.%d not safe to destroy, retrying in %v", osd, backoff) + time.Sleep(backoff) + } + if !safeDestroy { + logger.Errorf("osd.%d failed to reach safe-to-destroy", osd) + return fmt.Errorf("osd.%d failed to reach safe-to-destroy", osd) + } + logger.Infof("osd.%d safe to destroy", osd) + return nil +} - return nil - }) +func testSafeDestroy(osd int64) bool { + // run ceph osd safe-to-destroy + _, err := processExec.RunCommand("ceph", "osd", "safe-to-destroy", fmt.Sprintf("osd.%d", osd)) + return err == nil +} + +func testSafeStop(osd int64) bool { + // run ceph osd ok-to-stop + _, err := processExec.RunCommand("ceph", "osd", "ok-to-stop", fmt.Sprintf("osd.%d", osd)) + return err == nil +} + +func removeOSDConfig(osd int64) error { + dataPath := filepath.Join(os.Getenv("SNAP_COMMON"), "data") + osdDataPath := filepath.Join(dataPath, "osd", fmt.Sprintf("ceph-%d", osd)) + err := os.RemoveAll(osdDataPath) + if err != nil { + logger.Errorf("Failed to remove osd.%d config: %v", osd, err) + return fmt.Errorf("Failed to remove osd.%d config: %w", osd, err) + } + return nil +} + +type Node struct { + ID int64 `json:"id"` + Type string `json:"type"` +} + +type JSONData struct { + Nodes []Node `json:"nodes"` +} + +// haveOSDInCeph checks if the given OSD is present in the ceph cluster +func haveOSDInCeph(osd int64) (bool, error) { + // run ceph osd tree + out, err := processExec.RunCommand("ceph", "osd", "tree", "-f", "json") if err != nil { - return nil, err + logger.Errorf("Failed to get ceph osd tree: %v", err) + return false, fmt.Errorf("Failed to get ceph osd tree: %w", err) } + // parse the json output + var tree JSONData + err = json.Unmarshal([]byte(out), &tree) + if err != nil { + logger.Errorf("Failed to parse ceph osd tree: %v", err) + return false, fmt.Errorf("Failed to parse ceph osd tree: %w", err) + } + // query the tree for the given OSD + for _, node := range tree.Nodes { + if node.Type == "osd" && node.ID == osd { + return true, nil + } + } + return false, nil +} - return disks, nil +// killOSD terminates the osd process for an osd.id +func killOSD(osd int64) error { + cmdline := fmt.Sprintf("ceph-osd .* --id %d", osd) + _, err := processExec.RunCommand("pkill", "-f", cmdline) + if err != nil { + logger.Errorf("Failed to kill osd.%d: %v", osd, err) + return fmt.Errorf("Failed to kill osd.%d: %w", osd, err) + } + return nil } diff --git a/microceph/ceph/osd_test.go b/microceph/ceph/osd_test.go index feefd858..00f1f6bb 100644 --- a/microceph/ceph/osd_test.go +++ b/microceph/ceph/osd_test.go @@ -53,9 +53,54 @@ func addOsdPoolSetExpectations(r *mocks.Runner) { r.On("RunCommand", cmdAny("ceph", 6)...).Return("ok", nil).Once() } -// Expect: run ceph osd crush rule rm -func addCrushRuleRmExpectations(r *mocks.Runner) { - r.On("RunCommand", cmdAny("ceph", 5)...).Return("ok", nil).Once() +// Expect: run ceph config set +func addSetDefaultRuleExpectations(r *mocks.Runner) { + r.On("RunCommand", cmdAny("ceph", 7)...).Return("ok", nil).Once() +} + +// Expect: run ceph osd tree +func addOsdTreeExpectations(r *mocks.Runner) { + json := `{ + "nodes" : [ + { + "children" : [ + -4, + -3, + -2 + ], + "id" : -1, + "name" : "default", + "type" : "root", + "type_id" : 11 + }, + { + "children" : [ + 0 + ], + "id" : -2, + "name" : "m-0", + "pool_weights" : {}, + "type" : "host", + "type_id" : 1 + }, + { + "crush_weight" : 0.0035858154296875, + "depth" : 2, + "exists" : 1, + "id" : 0, + "name" : "osd.0", + "pool_weights" : {}, + "primary_affinity" : 1, + "reweight" : 1, + "status" : "up", + "type" : "osd", + "type_id" : 0 + } + ], "stray" : [{ "id": 77, + "name": "osd.77", + "exists": 1} ]}` + r.On("RunCommand", cmdAny("ceph", 4)...).Return(json, nil).Once() + } func (s *osdSuite) SetupTest() { @@ -65,12 +110,17 @@ func (s *osdSuite) SetupTest() { } -// TestSetHostFailureDomain tests the setHostFailureDomain function -func (s *osdSuite) TestSetHostFailureDomain() { +// TestSwitchHostFailureDomain tests the switchFailureDomain function +func (s *osdSuite) TestSwitchHostFailureDomain() { r := mocks.NewRunner(s.T()) - // list and create crush rule + // list and create two crush rules addCrushRuleLsExpectations(r) addCrushRuleCreateExpectations(r) + addCrushRuleCreateExpectations(r) + // dump crush rules + addCrushRuleDumpExpectations(r) + // set default crush rule + addSetDefaultRuleExpectations(r) // list and dump crush rule addCrushRuleLsExpectations(r) addCrushRuleDumpExpectations(r) @@ -81,7 +131,7 @@ func (s *osdSuite) TestSetHostFailureDomain() { processExec = r - err := setHostFailureDomain() + err := switchFailureDomain("osd", "host") assert.NoError(s.T(), err) } @@ -99,9 +149,14 @@ func (s *osdSuite) TestUpdateFailureDomain() { } r := mocks.NewRunner(s.T()) - // list and create crush rule + // list and create two crush rules addCrushRuleLsExpectations(r) addCrushRuleCreateExpectations(r) + addCrushRuleCreateExpectations(r) + // dump crush rules + addCrushRuleDumpExpectations(r) + // set default crush rule + addSetDefaultRuleExpectations(r) // list and dump crush rule addCrushRuleLsExpectations(r) addCrushRuleDumpExpectations(r) @@ -109,8 +164,6 @@ func (s *osdSuite) TestUpdateFailureDomain() { addCrushRuleLsJsonExpectations(r) // set osd pool addOsdPoolSetExpectations(r) - // remove crush rule - addCrushRuleRmExpectations(r) processExec = r @@ -122,3 +175,22 @@ func (s *osdSuite) TestUpdateFailureDomain() { assert.NoError(s.T(), err) } + +// TestHaveOSDInCeph tests the haveOSDInCeph function +func (s *osdSuite) TestHaveOSDInCeph() { + r := mocks.NewRunner(s.T()) + // add osd tree expectations + addOsdTreeExpectations(r) + addOsdTreeExpectations(r) + + processExec = r + + res, err := haveOSDInCeph(0) + assert.NoError(s.T(), err) + assert.Equal(s.T(), res, true) + + res, err = haveOSDInCeph(77) + assert.NoError(s.T(), err) + assert.Equal(s.T(), res, false) + +} diff --git a/microceph/ceph/subprocess.go b/microceph/ceph/subprocess.go index 4754e1f5..bbd8f87e 100644 --- a/microceph/ceph/subprocess.go +++ b/microceph/ceph/subprocess.go @@ -1,10 +1,14 @@ package ceph -import "github.com/canonical/lxd/shared" +import ( + "context" + "github.com/canonical/lxd/shared" +) // Runner launches processes type Runner interface { RunCommand(name string, arg ...string) (string, error) + RunCommandContext(ctx context.Context, name string, arg ...string) (string, error) } // RunnerImpl for launching processes @@ -15,6 +19,11 @@ func (c RunnerImpl) RunCommand(name string, arg ...string) (string, error) { return shared.RunCommand(name, arg...) } +// RunCommandContext runs a process given a context, a path to a binary and a list of args +func (c RunnerImpl) RunCommandContext(ctx context.Context, name string, arg ...string) (string, error) { + return shared.RunCommandContext(ctx, name, arg...) +} + // Singleton runner: make this patch-able for testing purposes. // By default executes via shared.RunCommand() var processExec Runner = RunnerImpl{} diff --git a/microceph/client/client.go b/microceph/client/client.go index 5abf20f9..e2d1f14b 100644 --- a/microceph/client/client.go +++ b/microceph/client/client.go @@ -3,7 +3,9 @@ package client import ( "context" + "errors" "fmt" + "strconv" "time" "github.com/canonical/lxd/shared/api" @@ -92,3 +94,37 @@ func GetResources(ctx context.Context, c *client.Client) (*api.ResourcesStorage, return &storage, nil } + +// RemoveDisk requests Ceph removes an OSD. +func RemoveDisk(ctx context.Context, c *client.Client, data *types.DisksDelete) error { + timeout := time.Second * time.Duration(data.Timeout+5) // wait a bit longer than the operation timeout + queryCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // get disks and determine osd location + disks, err := GetDisks(ctx, c) + if err != nil { + return fmt.Errorf("Failed to get disks: %w", err) + } + var location string + for _, disk := range disks { + if disk.OSD == data.OSD { + location = disk.Location + break + } + } + if location == "" { + return fmt.Errorf("Failed to find location for osd.%d", data.OSD) + } + c = c.UseTarget(location) + + err = c.Query(queryCtx, "DELETE", api.NewURL().Path("disks", strconv.FormatInt(data.OSD, 10)), data, nil) + if err != nil { + // Checking if the error is a context deadline exceeded error + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("Failed to remove disk, timeout (%ds) reached - abort", data.Timeout) + } + return fmt.Errorf("Failed to remove disk: %w", err) + } + return nil +} diff --git a/microceph/cmd/microceph/disk.go b/microceph/cmd/microceph/disk.go index 7928df5c..7b0a1e6b 100644 --- a/microceph/cmd/microceph/disk.go +++ b/microceph/cmd/microceph/disk.go @@ -22,6 +22,10 @@ func (c *cmdDisk) Command() *cobra.Command { diskListCmd := cmdDiskList{common: c.common, disk: c} cmd.AddCommand(diskListCmd.Command()) + // Remove + diskRemoveCmd := cmdDiskRemove{common: c.common, disk: c} + cmd.AddCommand(diskRemoveCmd.Command()) + // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs cmd.Run = func(cmd *cobra.Command, args []string) { _ = cmd.Usage() } diff --git a/microceph/cmd/microceph/disk_remove.go b/microceph/cmd/microceph/disk_remove.go new file mode 100644 index 00000000..a6fe5b19 --- /dev/null +++ b/microceph/cmd/microceph/disk_remove.go @@ -0,0 +1,80 @@ +package main + +import ( + "context" + "fmt" + "strconv" + + "github.com/canonical/microcluster/microcluster" + "github.com/spf13/cobra" + + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/client" +) + +type cmdDiskRemove struct { + common *CmdControl + disk *cmdDisk + + flagBypassSafety bool + flagConfirmDowngrade bool + flagTimeout int64 +} + +func (c *cmdDiskRemove) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "remove [--timeout=300] [--bypass-safety-checks=false] [--confirm-failure-domain-downgrade=false]", + Short: "Remove a Ceph disk (OSD) given an osd.$id.", + RunE: c.Run, + } + + cmd.PersistentFlags().Int64Var(&c.flagTimeout, "timeout", 300, "Timeout to wait for safe removal (seconds), default=300") + cmd.PersistentFlags().BoolVar(&c.flagBypassSafety, "bypass-safety-checks", false, "Bypass safety checks") + cmd.PersistentFlags().BoolVar(&c.flagConfirmDowngrade, "confirm-failure-domain-downgrade", false, "Confirm failure domain downgrade if required") + + return cmd +} + +func (c *cmdDiskRemove) Run(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return cmd.Help() + } + + m, err := microcluster.App(context.Background(), microcluster.Args{StateDir: c.common.FlagStateDir, Verbose: c.common.FlagLogVerbose, Debug: c.common.FlagLogDebug}) + if err != nil { + return err + } + + cli, err := m.LocalClient() + if err != nil { + return err + } + + // parse as int + osd, err := strconv.ParseInt(args[0], 10, 64) + if err != nil { + // check arg is of osd.$id form + if len(args[0]) < 4 || args[0][:4] != "osd." { + return fmt.Errorf("Error: osd input must be either in the form $id or osd.$id, got %v", args[0]) + } + osd, err = strconv.ParseInt(args[0][4:], 10, 64) + if err != nil { + return fmt.Errorf("Error: osd input must be either in the form $id or osd.$id: got %v", args[0]) + } + } + + req := &types.DisksDelete{ + OSD: osd, + BypassSafety: c.flagBypassSafety, + ConfirmDowngrade: c.flagConfirmDowngrade, + Timeout: c.flagTimeout, + } + + fmt.Printf("Removing osd.%d, timeout %ds\n", osd, req.Timeout) + err = client.RemoveDisk(context.Background(), cli, req) + if err != nil { + return err + } + + return nil +} diff --git a/microceph/database/disk_extras.go b/microceph/database/disk_extras.go index e05b1112..91fda831 100644 --- a/microceph/database/disk_extras.go +++ b/microceph/database/disk_extras.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "github.com/canonical/microceph/microceph/api/types" "github.com/canonical/lxd/lxd/db/query" "github.com/canonical/lxd/shared/api" @@ -16,6 +17,7 @@ import ( //go:generate mockery --name MemberCounterInterface type MemberCounterInterface interface { Count(s *state.State) (int, error) + CountExclude(s *state.State, exclude int64) (int, error) } type MemberCounterImpl struct{} @@ -34,18 +36,21 @@ SELECT internal_cluster_members.name AS member, count(disks.id) AS num_disks GROUP BY internal_cluster_members.id `) -// MembersDiskCnt returns the number of disks per member for all members that have at least one disk -func MembersDiskCnt(ctx context.Context, tx *sql.Tx) ([]MemberDisk, error) { +var membersDiskCntExclude = cluster.RegisterStmt(` +SELECT internal_cluster_members.name AS member, count(disks.id) AS num_disks +FROM disks +JOIN internal_cluster_members ON disks.member_id = internal_cluster_members.id +WHERE disks.OSD != ? +GROUP BY internal_cluster_members.id +`) + +// MembersDiskCnt returns the number of disks per member for all members that have at least one disk excluding the given OSD +func MembersDiskCnt(ctx context.Context, tx *sql.Tx, exclude int64) ([]MemberDisk, error) { var err error var sqlStmt *sql.Stmt objects := make([]MemberDisk, 0) - sqlStmt, err = cluster.Stmt(tx, membersDiskCnt) - if err != nil { - return nil, fmt.Errorf("Failed to get \"membersDiskCnt\" prepared statement: %w", err) - } - dest := func(scan func(dest ...any) error) error { m := MemberDisk{} err := scan(&m.Member, &m.NumDisks) @@ -56,11 +61,27 @@ func MembersDiskCnt(ctx context.Context, tx *sql.Tx) ([]MemberDisk, error) { return nil } - err = query.SelectObjects(ctx, sqlStmt, dest) - if err != nil { - return nil, fmt.Errorf("Failed to get \"membersDiskCnt\" objects: %w", err) - } + if exclude == -1 { + sqlStmt, err = cluster.Stmt(tx, membersDiskCnt) + if err != nil { + return nil, fmt.Errorf("Failed to get \"membersDiskCnt\" prepared statement: %w", err) + } + + err = query.SelectObjects(ctx, sqlStmt, dest) + if err != nil { + return nil, fmt.Errorf("Failed to get \"membersDiskCnt\" objects: %w", err) + } + } else { + sqlStmt, err = cluster.Stmt(tx, membersDiskCntExclude) + if err != nil { + return nil, fmt.Errorf("Failed to get \"membersDiskCntExclude\" prepared statement: %w", err) + } + err = query.SelectObjects(ctx, sqlStmt, dest, exclude) + if err != nil { + return nil, fmt.Errorf("Failed to get \"membersDiskCntExclude\" objects: %w", err) + } + } return objects, err } @@ -69,7 +90,25 @@ func (m MemberCounterImpl) Count(s *state.State) (int, error) { var numNodes int err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { - records, err := MembersDiskCnt(ctx, tx) + records, err := MembersDiskCnt(ctx, tx, -1) + if err != nil { + return fmt.Errorf("Failed to fetch disks: %w", err) + } + numNodes = len(records) + return nil + }) + if err != nil { + return 0, err + } + return numNodes, nil +} + +// CountExclude returns the number of nodes in the cluster with at least one disk, excluding the given OSD +func (m MemberCounterImpl) CountExclude(s *state.State, exclude int64) (int, error) { + var numNodes int + + err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { + records, err := MembersDiskCnt(ctx, tx, exclude) if err != nil { return fmt.Errorf("Failed to fetch disks: %w", err) } @@ -84,3 +123,112 @@ func (m MemberCounterImpl) Count(s *state.State) (int, error) { // Singleton for the MemberCounterImpl, to be mocked in unit testing var MemberCounter MemberCounterInterface = MemberCounterImpl{} + +// OSDQueryInterface is for querying OSDs. Introduced for mocking. +type OSDQueryInterface interface { + HaveOSD(s *state.State, osd int64) (bool, error) + Path(s *state.State, osd int64) (string, error) + Delete(s *state.State, osd int64) error + List(s *state.State) (types.Disks, error) +} + +type OSDQueryImpl struct{} + +var osdCount = cluster.RegisterStmt(` +SELECT count(disks.id) AS num_disks +FROM disks +WHERE disks.OSD = ? +`) + +var osdPath = cluster.RegisterStmt(` +SELECT disks.path +FROM disks +WHERE disks.OSD = ? +`) + +// HaveOSD returns either false or true depending on whether the given OSD is present in the cluster +func (o OSDQueryImpl) HaveOSD(s *state.State, osd int64) (bool, error) { + var numDisks int + + err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { + sqlStmt, err := cluster.Stmt(tx, osdCount) + if err != nil { + return fmt.Errorf("Failed to get \"osdCount\" prepared statement: %w", err) + } + + err = sqlStmt.QueryRow(osd).Scan(&numDisks) + if err != nil { + return fmt.Errorf("Failed to get \"osdCount\" objects: %w", err) + } + return nil + }) + if err != nil { + return false, err + } + return numDisks > 0, nil +} + +// Path returns the path of the given OSD +func (o OSDQueryImpl) Path(s *state.State, osd int64) (string, error) { + var path string + + err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { + sqlStmt, err := cluster.Stmt(tx, osdPath) + if err != nil { + return fmt.Errorf("Failed to get \"osdPath\" prepared statement: %w", err) + } + + err = sqlStmt.QueryRow(osd).Scan(&path) + if err != nil { + return fmt.Errorf("Failed to get \"osdPath\" objects: %w", err) + } + return nil + }) + if err != nil { + return "", err + } + return path, nil +} + +// Delete OSD records for the given OSD +func (o OSDQueryImpl) Delete(s *state.State, osd int64) error { + path, err := o.Path(s, osd) + if err != nil { + return err + } + err = s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { + return DeleteDisk(ctx, tx, s.Name(), path) + }) + return err +} + +// List OSD records +func (o OSDQueryImpl) List(s *state.State) (types.Disks, error) { + disks := types.Disks{} + + // Get the OSDs from the database. + err := s.Database.Transaction(s.Context, func(ctx context.Context, tx *sql.Tx) error { + records, err := GetDisks(ctx, tx) + if err != nil { + return fmt.Errorf("Failed to fetch disks: %w", err) + } + + for _, disk := range records { + disks = append(disks, types.Disk{ + Location: disk.Member, + OSD: int64(disk.OSD), + Path: disk.Path, + }) + } + + return nil + }) + if err != nil { + return nil, err + } + + return disks, nil +} + +// Singleton for the OSDQueryImpl, to be mocked in unit testing +var OSDQuery OSDQueryInterface = OSDQueryImpl{} diff --git a/microceph/go.mod b/microceph/go.mod index bf189f4e..9fdce47e 100644 --- a/microceph/go.mod +++ b/microceph/go.mod @@ -4,13 +4,14 @@ go 1.18 require ( github.com/Rican7/retry v0.3.1 - github.com/canonical/lxd v0.0.0-20230705090120-570f7071eeb2 + github.com/canonical/lxd v0.0.0-20230829115710-2146784688fc github.com/canonical/microcluster v0.0.0-20230705140256-7726061a60bb + github.com/gorilla/mux v1.8.0 github.com/olekukonko/tablewriter v0.0.5 github.com/pborman/uuid v1.2.1 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 - github.com/tidwall/gjson v1.14.4 + github.com/tidwall/gjson v1.16.0 ) require ( @@ -25,8 +26,7 @@ require ( github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/renameio v1.0.1 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/schema v1.2.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -37,12 +37,12 @@ require ( github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-sqlite3 v1.14.17 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/muhlemmer/gu v0.3.1 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pkg/sftp v1.13.5 // indirect + github.com/pkg/sftp v1.13.6 // indirect github.com/pkg/xattr v0.4.9 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rivo/uniseg v0.4.4 // indirect @@ -50,19 +50,19 @@ require ( github.com/rogpeppe/fastuuid v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.1 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect - github.com/zitadel/oidc/v2 v2.6.3 // indirect - golang.org/x/crypto v0.10.0 // indirect - golang.org/x/net v0.11.0 // indirect - golang.org/x/oauth2 v0.9.0 // indirect + github.com/zitadel/oidc/v2 v2.8.3 // indirect + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect + golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.9.0 // indirect - golang.org/x/term v0.9.0 // indirect - golang.org/x/text v0.10.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/term v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/errgo.v1 v1.0.1 // indirect gopkg.in/httprequest.v1 v1.2.1 // indirect gopkg.in/macaroon.v2 v2.1.0 // indirect diff --git a/microceph/go.sum b/microceph/go.sum index fa7b9f90..857a75c6 100644 --- a/microceph/go.sum +++ b/microceph/go.sum @@ -52,8 +52,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/canonical/go-dqlite v1.20.0 h1:pnkn0oS0hPXWeODjvjWONKGb5KYh8kK0aruDPzZLwmU= github.com/canonical/go-dqlite v1.20.0/go.mod h1:Uvy943N8R4CFUAs59A1NVaziWY9nJ686lScY7ywurfg= -github.com/canonical/lxd v0.0.0-20230705090120-570f7071eeb2 h1:pOkOFlpAzO+aHKTQ2igNNLUX7wVZ35SXgj76edsP8TQ= -github.com/canonical/lxd v0.0.0-20230705090120-570f7071eeb2/go.mod h1:iRepjqZoVVmu2NXI55FgUAnCgoqggPTKTYMqTA4P6Qk= +github.com/canonical/lxd v0.0.0-20230829115710-2146784688fc h1:MiWDbnXBlaTrqAXWYw82ybM4R/I94fqs/QljeZ8qwLM= +github.com/canonical/lxd v0.0.0-20230829115710-2146784688fc/go.mod h1:OfCg6z25zfVH3vTq5G2Uq0/YCEmNQkNo0C0hoPWMWMQ= github.com/canonical/microcluster v0.0.0-20230705140256-7726061a60bb h1:rTBytbpzzHmWkptalBUDt5xtaA+Wi2TCNtuzJectBC0= github.com/canonical/microcluster v0.0.0-20230705140256-7726061a60bb/go.mod h1:vrMOrNO0Iu0lgGaFlKBwZdLXYNdogOgk6WqD4SneSDI= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -169,8 +169,8 @@ github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -241,8 +241,8 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= @@ -274,8 +274,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -312,8 +312,9 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= +github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -322,11 +323,12 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= -github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= +github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= @@ -337,8 +339,9 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zitadel/oidc/v2 v2.6.3 h1:YY87cAcdI+3voZqcRU2RGz3Pxky/2KsjDmYDVb6EgWw= -github.com/zitadel/oidc/v2 v2.6.3/go.mod h1:2LrbdKYLSgKxXBfct56ev4e186J7TXotlZxb6tExOO4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zitadel/oidc/v2 v2.8.3 h1:CRVlPytgjXvemh27YkjbAtHmbyk1OUSa2EMKHYvUsJo= +github.com/zitadel/oidc/v2 v2.8.3/go.mod h1:YYpB9yPa13wHB6NAS9J1GkZtSEktakM6r6LbJDFVOiE= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -360,9 +363,10 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -398,6 +402,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -435,9 +440,10 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -450,8 +456,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs= -golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -463,6 +469,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -505,19 +512,22 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -525,9 +535,10 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -583,6 +594,7 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -690,8 +702,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/microceph/mocks/ConfigWriter.go b/microceph/mocks/ConfigWriter.go new file mode 100644 index 00000000..b9a13a31 --- /dev/null +++ b/microceph/mocks/ConfigWriter.go @@ -0,0 +1,38 @@ +// Code generated by mockery v2.30.10. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// ConfigWriter is an autogenerated mock type for the ConfigWriter type +type ConfigWriter struct { + mock.Mock +} + +// WriteConfig provides a mock function with given fields: _a0 +func (_m *ConfigWriter) WriteConfig(_a0 interface{}) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewConfigWriter creates a new instance of ConfigWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewConfigWriter(t interface { + mock.TestingT + Cleanup(func()) +}) *ConfigWriter { + mock := &ConfigWriter{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/microceph/mocks/MemberCounterInterface.go b/microceph/mocks/MemberCounterInterface.go index 83701722..405aa548 100644 --- a/microceph/mocks/MemberCounterInterface.go +++ b/microceph/mocks/MemberCounterInterface.go @@ -35,6 +35,30 @@ func (_m *MemberCounterInterface) Count(s *state.State) (int, error) { return r0, r1 } +// CountExclude provides a mock function with given fields: s, exclude +func (_m *MemberCounterInterface) CountExclude(s *state.State, exclude int64) (int, error) { + ret := _m.Called(s, exclude) + + var r0 int + var r1 error + if rf, ok := ret.Get(0).(func(*state.State, int64) (int, error)); ok { + return rf(s, exclude) + } + if rf, ok := ret.Get(0).(func(*state.State, int64) int); ok { + r0 = rf(s, exclude) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(*state.State, int64) error); ok { + r1 = rf(s, exclude) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // NewMemberCounterInterface creates a new instance of MemberCounterInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMemberCounterInterface(t interface { diff --git a/microceph/mocks/OSDQueryInterface.go b/microceph/mocks/OSDQueryInterface.go new file mode 100644 index 00000000..189bae8a --- /dev/null +++ b/microceph/mocks/OSDQueryInterface.go @@ -0,0 +1,105 @@ +// Generated by mockery with a minor update as mockery confuses import paths +package mocks + +import ( + types "github.com/canonical/microceph/microceph/api/types" + state "github.com/canonical/microcluster/state" // mockery gets confused about import paths here + mock "github.com/stretchr/testify/mock" +) + +// OSDQueryInterface is an autogenerated mock type for the OSDQueryInterface type +type OSDQueryInterface struct { + mock.Mock +} + +// Delete provides a mock function with given fields: s, osd +func (_m *OSDQueryInterface) Delete(s *state.State, osd int64) error { + ret := _m.Called(s, osd) + + var r0 error + if rf, ok := ret.Get(0).(func(*state.State, int64) error); ok { + r0 = rf(s, osd) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// HaveOSD provides a mock function with given fields: s, osd +func (_m *OSDQueryInterface) HaveOSD(s *state.State, osd int64) bool { + ret := _m.Called(s, osd) + + var r0 bool + if rf, ok := ret.Get(0).(func(*state.State, int64) bool); ok { + r0 = rf(s, osd) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// List provides a mock function with given fields: s +func (_m *OSDQueryInterface) List(s *state.State) (types.Disks, error) { + ret := _m.Called(s) + + var r0 types.Disks + var r1 error + if rf, ok := ret.Get(0).(func(*state.State) (types.Disks, error)); ok { + return rf(s) + } + if rf, ok := ret.Get(0).(func(*state.State) types.Disks); ok { + r0 = rf(s) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Disks) + } + } + + if rf, ok := ret.Get(1).(func(*state.State) error); ok { + r1 = rf(s) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Path provides a mock function with given fields: s, osd +func (_m *OSDQueryInterface) Path(s *state.State, osd int64) (string, error) { + ret := _m.Called(s, osd) + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(*state.State, int64) (string, error)); ok { + return rf(s, osd) + } + if rf, ok := ret.Get(0).(func(*state.State, int64) string); ok { + r0 = rf(s, osd) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(*state.State, int64) error); ok { + r1 = rf(s, osd) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewOSDQueryInterface creates a new instance of OSDQueryInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewOSDQueryInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *OSDQueryInterface { + mock := &OSDQueryInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/microceph/mocks/PlacementIntf.go b/microceph/mocks/PlacementIntf.go index 8172c097..7a173aa8 100644 --- a/microceph/mocks/PlacementIntf.go +++ b/microceph/mocks/PlacementIntf.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.30.16. DO NOT EDIT. +// Code generated by mockery v2.30.10. DO NOT EDIT. package mocks diff --git a/microceph/mocks/Runner.go b/microceph/mocks/Runner.go index f3365e32..ff217d41 100644 --- a/microceph/mocks/Runner.go +++ b/microceph/mocks/Runner.go @@ -1,8 +1,12 @@ -// Code generated by mockery v2.14.1. DO NOT EDIT. +// Code generated by mockery v2.30.10. DO NOT EDIT. package mocks -import mock "github.com/stretchr/testify/mock" +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) // Runner is an autogenerated mock type for the Runner type type Runner struct { @@ -21,13 +25,16 @@ func (_m *Runner) RunCommand(name string, arg ...string) (string, error) { ret := _m.Called(_ca...) var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(string, ...string) (string, error)); ok { + return rf(name, arg...) + } if rf, ok := ret.Get(0).(func(string, ...string) string); ok { r0 = rf(name, arg...) } else { r0 = ret.Get(0).(string) } - var r1 error if rf, ok := ret.Get(1).(func(string, ...string) error); ok { r1 = rf(name, arg...) } else { @@ -37,13 +44,43 @@ func (_m *Runner) RunCommand(name string, arg ...string) (string, error) { return r0, r1 } -type mockConstructorTestingTNewRunner interface { - mock.TestingT - Cleanup(func()) +// RunCommandContext provides a mock function with given fields: ctx, name, arg +func (_m *Runner) RunCommandContext(ctx context.Context, name string, arg ...string) (string, error) { + _va := make([]interface{}, len(arg)) + for _i := range arg { + _va[_i] = arg[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, name) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...string) (string, error)); ok { + return rf(ctx, name, arg...) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ...string) string); ok { + r0 = rf(ctx, name, arg...) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ...string) error); ok { + r1 = rf(ctx, name, arg...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // NewRunner creates a new instance of Runner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewRunner(t mockConstructorTestingTNewRunner) *Runner { +// The first argument is typically a *testing.T value. +func NewRunner(t interface { + mock.TestingT + Cleanup(func()) +}) *Runner { mock := &Runner{} mock.Mock.Test(t) diff --git a/microceph/mocks/StateInterface.go b/microceph/mocks/StateInterface.go index 644abf4d..ea03fe29 100644 --- a/microceph/mocks/StateInterface.go +++ b/microceph/mocks/StateInterface.go @@ -27,13 +27,12 @@ func (_m *StateInterface) ClusterState() *state.State { return r0 } -type mockConstructorTestingTNewStateInterface interface { +// NewStateInterface creates a new instance of StateInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStateInterface(t interface { mock.TestingT Cleanup(func()) -} - -// NewStateInterface creates a new instance of StateInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStateInterface(t mockConstructorTestingTNewStateInterface) *StateInterface { +}) *StateInterface { mock := &StateInterface{} mock.Mock.Test(t)