From f1e0421977f86f224efd129e095cb1bd4e4814ab Mon Sep 17 00:00:00 2001 From: Utkarsh Bhatt Date: Thu, 17 Oct 2024 15:36:04 +0530 Subject: [PATCH] MicroCeph Remote Replication (2/3): RBD Mirroring (#437) # Description Adds implementation for enabling/disabling rbd mirroring (remote replication) to a configured MicroCeph remote cluster. ## Type of change Please delete options that are not relevant. - [ ] Bug fix (non-breaking change which fixes an issue) - [X] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] CleanCode (Code refactor, test updates, does not introduce functional changes) - [X] Documentation update (Contains Doc change) ## How Has This Been Tested? - [X] CI Tests - [X] Unit Tests ## Contributor's Checklist Please check that you have: - [ ] self-reviewed the code in this PR. - [X] added code comments, particularly in hard-to-understand areas. - [X] updated the user documentation with corresponding changes. - [X] added tests to verify effectiveness of this change. --------- Signed-off-by: Utkarsh Bhatt --- .github/workflows/tests.yml | 15 + docs/how-to/configure-rbd-mirroring.rst | 96 +++ docs/how-to/index.rst | 12 +- .../commands/remote-replication-rbd.rst | 98 +++ microceph/api/ops_replication.go | 145 ++++ microceph/api/remote.go | 10 +- microceph/api/servers.go | 5 + microceph/api/types/replication.go | 37 + microceph/api/types/replication_rbd.go | 197 ++++++ microceph/ceph/osd.go | 48 ++ microceph/ceph/rbd_mirror.go | 631 ++++++++++++++++++ microceph/ceph/rbd_mirror_test.go | 95 +++ microceph/ceph/replication.go | 125 ++++ microceph/ceph/replication_rbd.go | 433 ++++++++++++ .../test_assets/rbd_mirror_image_status.json | 22 + .../test_assets/rbd_mirror_pool_info.json | 13 + .../test_assets/rbd_mirror_pool_status.json | 10 + .../rbd_mirror_verbose_pool_status.json | 67 ++ microceph/client/remote.go | 2 +- microceph/client/remote_replication.go | 41 ++ microceph/cmd/microceph/remote.go | 3 + microceph/cmd/microceph/remote_list.go | 6 +- microceph/cmd/microceph/remote_replication.go | 22 + .../cmd/microceph/remote_replication_rbd.go | 38 ++ .../remote_replication_rbd_configure.go | 71 ++ .../remote_replication_rbd_disable.go | 67 ++ .../remote_replication_rbd_enable.go | 81 +++ .../microceph/remote_replication_rbd_list.go | 104 +++ .../remote_replication_rbd_status.go | 179 +++++ microceph/cmd/microcephd/main.go | 4 +- microceph/constants/constants.go | 22 +- microceph/go.mod | 33 +- microceph/go.sum | 66 +- snapcraft/commands/rbd-mirror.start | 1 - tests/scripts/actionutils.sh | 87 +++ 35 files changed, 2826 insertions(+), 60 deletions(-) create mode 100644 docs/how-to/configure-rbd-mirroring.rst create mode 100644 docs/reference/commands/remote-replication-rbd.rst create mode 100644 microceph/api/ops_replication.go create mode 100644 microceph/api/types/replication.go create mode 100644 microceph/api/types/replication_rbd.go create mode 100644 microceph/ceph/rbd_mirror.go create mode 100644 microceph/ceph/rbd_mirror_test.go create mode 100644 microceph/ceph/replication.go create mode 100644 microceph/ceph/replication_rbd.go create mode 100644 microceph/ceph/test_assets/rbd_mirror_image_status.json create mode 100644 microceph/ceph/test_assets/rbd_mirror_pool_info.json create mode 100644 microceph/ceph/test_assets/rbd_mirror_pool_status.json create mode 100644 microceph/ceph/test_assets/rbd_mirror_verbose_pool_status.json create mode 100644 microceph/client/remote_replication.go create mode 100644 microceph/cmd/microceph/remote_replication.go create mode 100644 microceph/cmd/microceph/remote_replication_rbd.go create mode 100644 microceph/cmd/microceph/remote_replication_rbd_configure.go create mode 100644 microceph/cmd/microceph/remote_replication_rbd_disable.go create mode 100644 microceph/cmd/microceph/remote_replication_rbd_enable.go create mode 100644 microceph/cmd/microceph/remote_replication_rbd_list.go create mode 100644 microceph/cmd/microceph/remote_replication_rbd_status.go diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 645af1ed..80beea31 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -706,5 +706,20 @@ jobs: - name: Verify Remote authentication run: ~/actionutils.sh remote_perform_remote_ops_check + - name: Enable RBD Mirror Daemon + run : ~/actionutils.sh remote_enable_rbd_mirror_daemon + + - name: Configure RBD mirror + run : ~/actionutils.sh remote_configure_rbd_mirroring + + - name: Wait for RBD mirror to sync images + run : ~/actionutils.sh remote_wait_for_secondary_to_sync + + - name: Verify RBD mirror + run : ~/actionutils.sh remote_verify_rbd_mirroring + + - name: Disable RBD mirror + run : ~/actionutils.sh remote_disable_rbd_mirroring + - name: Verify Remote removal run: ~/actionutils.sh remote_remove_and_verify diff --git a/docs/how-to/configure-rbd-mirroring.rst b/docs/how-to/configure-rbd-mirroring.rst new file mode 100644 index 00000000..1629d904 --- /dev/null +++ b/docs/how-to/configure-rbd-mirroring.rst @@ -0,0 +1,96 @@ +================================== +Configure RBD remote replication +================================== + +MicroCeph supports asynchronously replicating (mirroring) RBD images to a remote cluster. + +An operator can enable this on any rbd image, or a whole pool. Enabling it on a pool enables it for all the images in the pool. + +Prerequisites +-------------- +1. A primary and a secondary MicroCeph cluster, for example named "primary_cluster" and "secondary_cluster" +2. primary_cluster has imported configurations from secondary_cluster and vice versa. refer to :doc:`import remote <./import-remote-cluster>` +3. Both clusters have 2 rbd pools: pool_one and pool_two. +4. Both pools at cluster "primary_cluster" have 2 images each (image_one and image_two) while the pools at cluster "secondary_cluster" are empty. + +Enable RBD remote replication +------------------------------- + +An operator can enable replication for a given rbd pool which is present at both clusters as + +.. code-block:: none + + sudo microceph remote replication rbd enable pool_one --remote secondary_cluster + +Here, pool_one is the name of the rbd pool and it is expected to be present at both the clusters. + +Check RBD remote replication status +------------------------------------ + +The above command will enable replication for ALL the images inside pool_one, it can be checked as: + +.. code-block:: none + + sudo microceph remote replication rbd status pool_one + +------------------------+----------------------+ + | SUMMARY | HEALTH | + +-------------+----------+-------------+--------+ + | Name | pool_one | Replication | OK | + | Mode | pool | Daemon | OK | + | Image Count | 2 | Image | OK | + +-------------+----------+-------------+--------+ + + +-------------------+-----------+--------------------------------------+ + | REMOTE NAME | DIRECTION | UUID | + +-------------------+-----------+--------------------------------------+ + | secondary_cluster | rx-tx | f25af3c3-f405-4159-a5c4-220c01d27507 | + +-------------------+-----------+--------------------------------------+ + +The status shows that there are 2 images in the pool which are enabled for mirroring. + +Listing all RBD remote replication images +------------------------------------------ + +An operator can list all the images that have replication (mirroring) enabled as follows: + +.. code-block:: none + + sudo microceph remote replication rbd list + +-----------+------------+------------+---------------------+ + | POOL NAME | IMAGE NAME | IS PRIMARY | LAST LOCAL UPDATE | + +-----------+------------+------------+---------------------+ + | pool_one | image_one | true | 2024-10-08 13:54:49 | + | pool_one | image_two | true | 2024-10-08 13:55:19 | + | pool_two | image_one | true | 2024-10-08 13:55:12 | + | pool_two | image_two | true | 2024-10-08 13:55:07 | + +-----------+------------+------------+---------------------+ + +Disabling RBD remote replication +--------------------------------- + +In some cases, it may be desired to disable replication. A single image ($pool/$image) or +a whole pool ($pool) can be disabled in a single command as follows: + +Disable Pool replication: +.. code-block:: none + + sudo microceph remote replication disable pool_one + sudo microceph remote replication list + +-----------+------------+------------+---------------------+ + | POOL NAME | IMAGE NAME | IS PRIMARY | LAST LOCAL UPDATE | + +-----------+------------+------------+---------------------+ + | pool_two | image_one | true | 2024-10-08 13:55:12 | + | pool_two | image_two | true | 2024-10-08 13:55:07 | + +-----------+------------+------------+---------------------+ + +Disable Image replication: +.. code-block:: none + + sudo microceph remote replication disable pool_two/image_two + sudo microceph remote replication list + +-----------+------------+------------+---------------------+ + | POOL NAME | IMAGE NAME | IS PRIMARY | LAST LOCAL UPDATE | + +-----------+------------+------------+---------------------+ + | pool_two | image_one | true | 2024-10-08 13:55:12 | + +-----------+------------+------------+---------------------+ + diff --git a/docs/how-to/index.rst b/docs/how-to/index.rst index 9f6c1b45..ae7c5cee 100644 --- a/docs/how-to/index.rst +++ b/docs/how-to/index.rst @@ -43,8 +43,18 @@ migrate services and more. change-log-level migrate-auto-services remove-disk - import-remote-cluster +Managing a remote cluster +------------------------- + +Make MicroCeph aware of a remote cluster and configure remote replication for +RBD pools and images. + +.. toctree:: + :maxdepth: 1 + + import-remote-cluster + configure-rbd-mirroring Upgrading your cluster ---------------------- diff --git a/docs/reference/commands/remote-replication-rbd.rst b/docs/reference/commands/remote-replication-rbd.rst new file mode 100644 index 00000000..7d3db611 --- /dev/null +++ b/docs/reference/commands/remote-replication-rbd.rst @@ -0,0 +1,98 @@ +============================= +``remote replication rbd`` +============================= + +Usage: + +.. code-block:: none + + microceph remote replication rbd [command] + +Available commands: + +.. code-block:: none + + configure Configure remote replication parameters for RBD resource (Pool or Image) + disable Disable remote replication for RBD resource (Pool or Image) + enable Enable remote replication for RBD resource (Pool or Image) + list List all configured remotes replication pairs. + status Show RBD resource (Pool or Image) replication status + +Global options: + +.. code-block:: none + + -d, --debug Show all debug messages + -h, --help Print help + --state-dir Path to store state information + -v, --verbose Show all information messages + --version Print version number + +``enable`` +---------- + +Enable remote replication for RBD resource (Pool or Image) + +Usage: + +.. code-block:: none + + microceph remote replication rbd enable [flags] + +Flags: + +.. code-block:: none + + --remote string remote MicroCeph cluster name + --schedule string snapshot schedule in days, hours, or minutes using d, h, m suffix respectively + --skip-auto-enable do not auto enable rbd mirroring for all images in the pool. + --type string 'journal' or 'snapshot', defaults to journal (default "journal") + +``status`` +---------- + +Show RBD resource (Pool or Image) replication status + +Usage: + +.. code-block:: none + + microceph remote replication rbd status [flags] + +Flags: + +.. code-block:: none + + --json output as json string + +``list`` +---------- + +List all configured remotes replication pairs. + +Usage: + +.. code-block:: none + + microceph remote replication rbd list [flags] + +.. code-block:: none + + --json output as json string + --pool string RBD pool name + +``disable`` +------------ + +Disable remote replication for RBD resource (Pool or Image) + +Usage: + +.. code-block:: none + + microceph remote replication rbd disable [flags] + +.. code-block:: none + + --force forcefully disable replication for rbd resource + diff --git a/microceph/api/ops_replication.go b/microceph/api/ops_replication.go new file mode 100644 index 00000000..406ff148 --- /dev/null +++ b/microceph/api/ops_replication.go @@ -0,0 +1,145 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/canonical/lxd/lxd/response" + "github.com/canonical/lxd/shared/logger" + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/ceph" + "github.com/canonical/microceph/microceph/interfaces" + "github.com/canonical/microcluster/v2/rest" + "github.com/canonical/microcluster/v2/state" + "github.com/gorilla/mux" +) + +// Top level ops API +var opsCmd = rest.Endpoint{ + Path: "ops", +} + +// replication ops API +var opsReplicationCmd = rest.Endpoint{ + Path: "ops/replication/", +} + +// List Replications +var opsReplicationWorkloadCmd = rest.Endpoint{ + Path: "ops/replication/{wl}", + Get: rest.EndpointAction{Handler: getOpsReplicationWorkload, ProxyTarget: false}, +} + +// CRUD Replication +var opsReplicationResourceCmd = rest.Endpoint{ + Path: "ops/replication/{wl}/{name}", + Get: rest.EndpointAction{Handler: getOpsReplicationResource, ProxyTarget: false}, + Post: rest.EndpointAction{Handler: postOpsReplicationResource, ProxyTarget: false}, + Put: rest.EndpointAction{Handler: putOpsReplicationResource, ProxyTarget: false}, + Delete: rest.EndpointAction{Handler: deleteOpsReplicationResource, ProxyTarget: false}, +} + +// getOpsReplicationWorkload handles list operation +func getOpsReplicationWorkload(s state.State, r *http.Request) response.Response { + return cmdOpsReplication(s, r, types.ListReplicationRequest) +} + +// getOpsReplicationResource handles status operation for a certain resource. +func getOpsReplicationResource(s state.State, r *http.Request) response.Response { + return cmdOpsReplication(s, r, types.StatusReplicationRequest) +} + +// postOpsReplicationResource handles rep enablement for the requested resource +func postOpsReplicationResource(s state.State, r *http.Request) response.Response { + return cmdOpsReplication(s, r, types.EnableReplicationRequest) +} + +// putOpsReplicationResource handles configuration of the requested resource +func putOpsReplicationResource(s state.State, r *http.Request) response.Response { + return cmdOpsReplication(s, r, types.ConfigureReplicationRequest) +} + +// deleteOpsReplicationResource handles rep disablement for the requested resource +func deleteOpsReplicationResource(s state.State, r *http.Request) response.Response { + return cmdOpsReplication(s, r, types.DisableReplicationRequest) +} + +// cmdOpsReplication is the common handler for all requests on replication endpoint. +func cmdOpsReplication(s state.State, r *http.Request, patchRequest types.ReplicationRequestType) response.Response { + // Get workload name from API + wl, err := url.PathUnescape(mux.Vars(r)["wl"]) + if err != nil { + logger.Errorf("REP: %v", err.Error()) + return response.InternalError(err) + } + + // Get resource name from API + resource, err := url.PathUnescape(mux.Vars(r)["name"]) + if err != nil { + logger.Errorf("REP: %v", err.Error()) + return response.InternalError(err) + } + + // Populate the replication request with necessary information for RESTfullnes + var req types.ReplicationRequest + if wl == string(types.RbdWorkload) { + var data types.RbdReplicationRequest + err := json.NewDecoder(r.Body).Decode(&data) + if err != nil { + logger.Errorf("REP: failed to decode request data: %v", err.Error()) + return response.InternalError(err) + } + + // carry RbdReplicationRequest in interface object. + data.SetAPIObjectId(resource) + // Patch request type. + if len(patchRequest) != 0 { + data.RequestType = patchRequest + } + + req = data + } else { + return response.SmartError(fmt.Errorf("unknown workload %s, resource %s", wl, resource)) + } + + return handleReplicationRequest(s, r.Context(), req) +} + +// handleReplicationRequest parses the replication request and feeds it to the corresponding state machine. +func handleReplicationRequest(s state.State, ctx context.Context, req types.ReplicationRequest) response.Response { + // Fetch replication handler + wl := string(req.GetWorkloadType()) + rh := ceph.GetReplicationHandler(wl) + if rh == nil { + return response.SmartError(fmt.Errorf("no replication handler for %s workload", wl)) + } + + // Populate resource info + err := rh.PreFill(ctx, req) + if err != nil { + return response.SmartError(err) + } + + // Get FSM + repFsm := ceph.GetReplicationStateMachine(rh.GetResourceState()) + + var resp string + event := req.GetWorkloadRequestType() + // Each event is provided with, replication handler, response object and state. + err = repFsm.FireCtx(ctx, event, rh, &resp, interfaces.CephState{State: s}) + if err != nil { + return response.SmartError(err) + } + + logger.Debugf("REPFSM: Check FSM response: %s", resp) + + // If non-empty response + if len(resp) > 0 { + return response.SyncResponse(true, resp) + } + + return response.SyncResponse(true, "") +} diff --git a/microceph/api/remote.go b/microceph/api/remote.go index 9f1cf130..06379b4d 100644 --- a/microceph/api/remote.go +++ b/microceph/api/remote.go @@ -106,8 +106,9 @@ func cmdRemoteDelete(state state.State, r *http.Request) response.Response { return response.BadRequest(err) } - // Note(utkarshbhatthere): TODO for when remote replication is implemented. - // [ ] add check for remote replication pairs before deleting remotes. + if isRemoteConfigured(remoteName) { + return response.SmartError(fmt.Errorf("cannot remote remote(%s), disable RBD mirroring", remoteName)) + } // Remove remote record. err = database.DeleteRemoteDb(r.Context(), state, remoteName) @@ -126,6 +127,11 @@ func cmdRemoteDelete(state state.State, r *http.Request) response.Response { /*****************HELPER FUNCTIONS**************************/ +func isRemoteConfigured(remoteName string) bool { + // check remote configured for RBD mirroring + return ceph.IsRemoteConfiguredForRbdMirror(remoteName) +} + // renderConfAndKeyringFiles generates the $cluster.conf and $cluster.keyring files on the host. func renderConfAndKeyringFiles(remoteName string, localName string, configs map[string]string) error { monHosts := []string{} diff --git a/microceph/api/servers.go b/microceph/api/servers.go index 1169f2fc..b26f44bd 100644 --- a/microceph/api/servers.go +++ b/microceph/api/servers.go @@ -36,6 +36,11 @@ var Servers = map[string]rest.Server{ clusterCmd, remoteCmd, remoteNameCmd, + opsCmd, + // Remote Replication APIs + opsReplicationCmd, + opsReplicationWorkloadCmd, + opsReplicationResourceCmd, }, }, }, diff --git a/microceph/api/types/replication.go b/microceph/api/types/replication.go new file mode 100644 index 00000000..fb512c17 --- /dev/null +++ b/microceph/api/types/replication.go @@ -0,0 +1,37 @@ +package types + +import ( + "github.com/canonical/microceph/microceph/constants" +) + +// ################################## Generic Replication Request ################################## +// ReplicationRequestType defines the various events replication request types. +type ReplicationRequestType string + +// This value is split till '-' to get the API request type and the event name encoded in one string. +const ( + EnableReplicationRequest ReplicationRequestType = "POST-" + constants.EventEnableReplication + ConfigureReplicationRequest ReplicationRequestType = "PUT-" + constants.EventConfigureReplication + DisableReplicationRequest ReplicationRequestType = "DELETE-" + constants.EventDisableReplication + StatusReplicationRequest ReplicationRequestType = "GET-" + constants.EventStatusReplication + ListReplicationRequest ReplicationRequestType = "GET-" + constants.EventListReplication +) + +type CephWorkloadType string + +const ( + RbdWorkload CephWorkloadType = "rbd" + FsWorkload CephWorkloadType = "cephfs" + RgwWorkload CephWorkloadType = "rgw" +) + +// ReplicationRequest is interface for all Replication implementations (rbd, cephfs, rgw). +// It defines methods used by: +// 1. client code to make the API request +// 2. Replication state machine to feed the correct event trigger. +type ReplicationRequest interface { + GetWorkloadType() CephWorkloadType + GetAPIObjectId() string + GetAPIRequestType() string + GetWorkloadRequestType() string +} diff --git a/microceph/api/types/replication_rbd.go b/microceph/api/types/replication_rbd.go new file mode 100644 index 00000000..f45a1be2 --- /dev/null +++ b/microceph/api/types/replication_rbd.go @@ -0,0 +1,197 @@ +package types + +import ( + "fmt" + "net/url" + "strings" + + "github.com/canonical/lxd/shared/logger" +) + +// Types for RBD Pool status table. +type RbdPoolStatusImageBrief struct { + Name string `json:"name" yaml:"name"` + IsPrimary bool `json:"is_primary" yaml:"is_primary"` + LastLocalUpdate string `json:"last_local_update" yaml:"last_local_update"` +} + +type RbdPoolStatusRemoteBrief struct { + Name string `json:"name" yaml:"name"` + UUID string `json:"uuid" yaml:"uuid"` + Direction string `json:"direction" yaml:"direction"` +} + +type RbdPoolStatus struct { + Name string `json:"name" yaml:"name"` + Type string `json:"type" yaml:"type"` + HealthReplication string `json:"rep_health" yaml:"rep_health"` + HealthDaemon string `json:"daemon_health" yaml:"daemon_health"` + HealthImages string `json:"image_health" yaml:"image_health"` + ImageCount int `json:"image_count" yaml:"image_count"` + Images []RbdPoolStatusImageBrief `json:"images" yaml:"images"` + Remotes []RbdPoolStatusRemoteBrief `json:"remotes" yaml:"remotes"` +} + +// Types for RBD Image status table. +type RbdImageStatusRemoteBrief struct { + Name string `json:"name" yaml:"name"` + Status string `json:"status" yaml:"status"` + LastRemoteUpdate string `json:"last_remote_update" yaml:"last_remote_update"` +} + +type RbdImageStatus struct { + Name string `json:"name" yaml:"name"` + ID string `json:"id" yaml:"id"` + Type string `json:"type" yaml:"type"` + IsPrimary bool `json:"is_primary" yaml:"is_primary"` + Status string `json:"status" yaml:"status"` + LastLocalUpdate string `json:"last_local_update" yaml:"last_local_update"` + Remotes []RbdImageStatusRemoteBrief `json:"remotes" yaml:"remotes"` +} + +// Types for Rbd List + +type RbdPoolListImageBrief struct { + Name string `json:"name" yaml:"name"` + Type string `json:"Type" yaml:"Type"` + IsPrimary bool `json:"is_primary" yaml:"is_primary"` + LastLocalUpdate string `json:"last_local_update" yaml:"last_local_update"` +} + +type RbdPoolBrief struct { + Name string `json:"name" yaml:"name"` + Images []RbdPoolListImageBrief +} + +type RbdPoolList []RbdPoolBrief + +// ################################## RBD Replication Request ################################## +// RbdReplicationDirection defines Rbd mirror direction +type RbdReplicationDirection string + +const ( + RbdReplicationDirectionRXOnly RbdReplicationDirection = "rx-only" + RbdReplicationDirectionRXTX RbdReplicationDirection = "rx-tx" +) + +// RbdResourceType defines request resource type +type RbdResourceType string + +const ( + RbdResourceDisabled RbdResourceType = "disabled" + RbdResourcePool RbdResourceType = "pool" + RbdResourceImage RbdResourceType = "image" +) + +// RbdReplicationType defines mode of rbd mirroring +type RbdReplicationType string + +const ( + RbdReplicationDisabled RbdReplicationType = "disable" + RbdReplicationJournaling RbdReplicationType = "journal" + RbdReplicationSnapshot RbdReplicationType = "snapshot" +) + +// RbdReplicationRequest implements ReplicationRequest for RBD replication. +type RbdReplicationRequest struct { + SourcePool string `json:"source_pool" yaml:"source_pool"` + SourceImage string `json:"source_image" yaml:"source_image"` + RemoteName string `json:"remote" yaml:"remote"` + // snapshot in d,h,m format + Schedule string `json:"schedule" yaml:"schedule"` + ReplicationType RbdReplicationType `json:"replication_type" yaml:"replication_type"` + ResourceType RbdResourceType `json:"resource_type" yaml:"resource_type"` + RequestType ReplicationRequestType `json:"request_type" yaml:"request_type"` + IsForceOp bool `json:"force" yaml:"force"` + SkipAutoEnable bool `json:"skipAutoEnable" yaml:"skipAutoEnable"` +} + +// GetWorkloadType provides the workload name for replication request +func (req RbdReplicationRequest) GetWorkloadType() CephWorkloadType { + return RbdWorkload +} + +// GetAPIObjectId provides the API object id i.e. /replication/rbd/ +func (req RbdReplicationRequest) GetAPIObjectId() string { + // If both Pool and Image values are present encode for query. + if len(req.SourceImage) != 0 && len(req.SourcePool) != 0 { + resource := url.QueryEscape(fmt.Sprintf("%s/%s", req.SourcePool, req.SourceImage)) + logger.Debugf("REPAPI: Resource: %s", resource) + return resource + } + + return req.SourcePool +} + +// SetAPIObjectId provides the API object id i.e. /replication/rbd/ +func (req *RbdReplicationRequest) SetAPIObjectId(id string) error { + // unescape object string + object, err := url.PathUnescape(id) + if err != nil { + return err + } + + frags := strings.Split(string(object), "/") + if len(frags) > 1 { + req.SourcePool = frags[0] + req.SourceImage = frags[1] + } else { + req.SourcePool = object + } + + return nil +} + +// GetAPIRequestType provides the REST method for the request +func (req RbdReplicationRequest) GetAPIRequestType() string { + frags := strings.Split(string(req.RequestType), "-") + logger.Debugf("REPAPI: API frags: %v", frags) + if len(frags) == 0 { + return "" + } + + return frags[0] +} + +// GetWorkloadRequestType provides the event used as the FSM trigger. +func (req RbdReplicationRequest) GetWorkloadRequestType() string { + frags := strings.Split(string(req.RequestType), "-") + logger.Debugf("REPAPI: Workload frags: %v", frags) + if len(frags) < 2 { + return "" + } + + return frags[1] +} + +// ################### Helpers ############################ +// GetRbdResourceType gets the resource type of the said request +func GetRbdResourceType(poolName string, imageName string) RbdResourceType { + if len(poolName) != 0 && len(imageName) != 0 { + return RbdResourceImage + } else { + return RbdResourcePool + } +} + +func GetPoolAndImageFromResource(resource string) (string, string, error) { + var pool string + var image string + resourceFrags := strings.Split(resource, "/") + if len(resourceFrags) < 1 || len(resourceFrags) > 2 { + return "", "", fmt.Errorf("check resource name %s, should be in $pool/$image format", resource) + } + + // If only pool name is provided. + if len(resourceFrags) == 1 { + pool = resourceFrags[0] + image = "" + } else + // if both pool and image names are provided. + if len(resourceFrags) == 2 { + pool = resourceFrags[0] + image = resourceFrags[1] + } + + return pool, image, nil +} diff --git a/microceph/ceph/osd.go b/microceph/ceph/osd.go index 9b17746e..0344a045 100644 --- a/microceph/ceph/osd.go +++ b/microceph/ceph/osd.go @@ -1151,3 +1151,51 @@ func GetOSDPools() ([]types.Pool, error) { return pools, nil } + +// CephPool abstracts the paramters of a ceph pool as provided by `osd pool ls detail` +type CephPool struct { + Id int `json:"pool_id" yaml:"pool_id"` + Name string `json:"pool_name" yaml:"pool_name"` + Application map[string]interface{} `json:"application_metadata" yaml:"application_metadata"` +} + +// ListPools lists the current pools on the ceph cluster, +// Additionally filtered for requested application name. +func ListPools(application string) []CephPool { + args := []string{"osd", "pool", "ls", "detail", "--format", "json"} + + output, err := processExec.RunCommand("ceph", args...) + if err != nil { + return []CephPool{} + } + + logger.Infof("OSD: Pool list %s", output) + + ret := []CephPool{} + err = json.Unmarshal([]byte(output), &ret) + if err != nil { + logger.Warnf("Failed to Unmarshal pool details: %v", err) + return []CephPool{} + } + + // if no application filter provided. + if len(application) == 0 { + return ret + } + + // filtered return slice of maximum needed size. + filterdRet := make([]CephPool, len(ret)) + counter := 0 + for _, cephPool := range ret { + _, ok := cephPool.Application[application] + if ok { + // append to the filter slice. + logger.Infof("OSD: Found match(%s) for application(%s)", cephPool.Name, application) + filterdRet[counter] = cephPool + counter++ + } + } + + logger.Infof("OSD: Filtered Pool list %v", filterdRet) + return filterdRet +} diff --git a/microceph/ceph/rbd_mirror.go b/microceph/ceph/rbd_mirror.go new file mode 100644 index 00000000..6fc2b85f --- /dev/null +++ b/microceph/ceph/rbd_mirror.go @@ -0,0 +1,631 @@ +package ceph + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/canonical/lxd/shared/logger" + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/constants" + "github.com/tidwall/gjson" +) + +type imageSnapshotSchedule struct { + Schedule string `json:"interval" yaml:"interval"` + StartTime string `json:"start_time" yaml:"start_time"` +} + +// Ceph Commands + +// GetRbdMirrorPoolInfo fetches the mirroring info for the requested pool +func GetRbdMirrorPoolInfo(pool string, cluster string, client string) (RbdReplicationPoolInfo, error) { + response := RbdReplicationPoolInfo{} + args := []string{"mirror", "pool", "info", pool, "--format", "json"} + + // add --cluster and --id args + args = appendRemoteClusterArgs(args, cluster, client) + + output, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Warnf("REPRBD: failed pool info operation on res(%s): %v", pool, err) + return RbdReplicationPoolInfo{Mode: types.RbdResourceDisabled}, nil + } + + err = json.Unmarshal([]byte(output), &response) + if err != nil { + ne := fmt.Errorf("cannot unmarshal rbd response: %v", err) + logger.Errorf("REPRBD: %s", ne.Error()) + return RbdReplicationPoolInfo{Mode: types.RbdResourceDisabled}, ne + } + + logger.Debugf("REPRBD: Pool Info: %v", response) + + return response, nil +} + +// populatePoolStatus unmarshals the pool info json response into a structure. +func populatePoolStatus(status string) (RbdReplicationPoolStatus, error) { + summary := RbdReplicationPoolStatusCmdOutput{} + + err := json.Unmarshal([]byte(status), &summary) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return RbdReplicationPoolStatus{}, err + } + + return summary.Summary, nil +} + +// GetRbdMirrorPoolStatus fetches mirroring status for requested pool +func GetRbdMirrorPoolStatus(pool string, cluster string, client string) (RbdReplicationPoolStatus, error) { + args := []string{"mirror", "pool", "status", pool, "--format", "json"} + + output, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Warnf("failed pool status operation on res(%s): %v", pool, err) + return RbdReplicationPoolStatus{State: StateDisabledReplication}, nil + } + + logger.Infof("REPRBD: Raw Pool Status Output: %s", output) + + response, err := populatePoolStatus(output) + if err != nil { + ne := fmt.Errorf("cannot unmarshal rbd response: %v", err) + logger.Errorf(ne.Error()) + return RbdReplicationPoolStatus{State: StateDisabledReplication}, ne + } + + logger.Debugf("REPRBD: Pool Status: %v", response) + + // Count Images + count := 0 + for _, v := range response.Description { + count += v + } + + // Patch required values + response.State = StateEnabledReplication + response.ImageCount = count + + return response, nil +} + +// GetRbdMirrorVerbosePoolStatus fetches mirroring status for requested pool +func GetRbdMirrorVerbosePoolStatus(pool string, cluster string, client string) (RbdReplicationVerbosePoolStatus, error) { + response := RbdReplicationVerbosePoolStatus{Name: pool} + args := []string{"mirror", "pool", "status", pool, "--verbose", "--format", "json"} + + // Get verbose pool status + output, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Warnf("REPRBD: failed verbose pool status operation on res(%s): %v", pool, err) + return RbdReplicationVerbosePoolStatus{Summary: RbdReplicationPoolStatus{State: StateDisabledReplication}}, nil + } + + logger.Debugf("REPRBD: Raw Pool Verbose Status: %s", string(output)) + + // Unmarshal Summary into the structure. + summary := gjson.Get(string(output), "summary") + err = json.Unmarshal([]byte(summary.String()), &response.Summary) + if err != nil { + ne := fmt.Errorf("cannot unmarshal rbd response: %v", err) + logger.Errorf(ne.Error()) + return RbdReplicationVerbosePoolStatus{Summary: RbdReplicationPoolStatus{State: StateDisabledReplication}}, ne + } + + images := gjson.Get(string(output), "images") + response.Images = make([]RbdReplicationImageStatus, len(images.Array())) + + // populate images + for index, image := range images.Array() { + err := json.Unmarshal([]byte(image.String()), &response.Images[index]) + if err != nil { + name := gjson.Get(image.String(), "name") + logger.Warnf("failed to parse the image data for (%s/%s)", pool, name) + } + + response.Images[index].State = StateEnabledReplication + response.Images[index].IsPrimary = strings.Contains(response.Images[index].Description, "local image is primary") + } + + logger.Debugf("REPRBD: Pool Verbose Status: %v", response) + + // Patch required values + response.Summary.State = StateEnabledReplication + response.Summary.ImageCount = len(response.Images) + + return response, nil +} + +// GetRbdMirrorImageStatus fetches mirroring status for reqeusted image +func GetRbdMirrorImageStatus(pool string, image string, cluster string, client string) (RbdReplicationImageStatus, error) { + resource := fmt.Sprintf("%s/%s", pool, image) + response := RbdReplicationImageStatus{} + args := []string{"mirror", "image", "status", resource, "--format", "json"} + + output, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Warnf("failed image status operation on res(%s): %v", resource, err) + return RbdReplicationImageStatus{State: StateDisabledReplication}, nil + } + + err = json.Unmarshal([]byte(output), &response) + if err != nil { + ne := fmt.Errorf("cannot unmarshal rbd response: %v", err) + logger.Errorf(ne.Error()) + return RbdReplicationImageStatus{State: StateDisabledReplication}, ne + } + + logger.Debugf("REPRBD: Image Status: %v", response) + + // Patch required values + response.State = StateEnabledReplication + response.IsPrimary = strings.Contains(response.Description, "local image is primary") + + return response, nil +} + +// EnablePoolMirroring enables mirroring for an rbd pool in pool mirroring or image mirroring mode. +func EnablePoolMirroring(pool string, mode types.RbdResourceType, localName string, remoteName string) error { + // Enable pool mirroring on the local cluster. + err := configurePoolMirroring(pool, mode, "", "") + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + // Enable pool mirroring on the remote cluster. + err = configurePoolMirroring(pool, mode, localName, remoteName) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + // bootstrap peer + return BootstrapPeer(pool, localName, remoteName) +} + +// DisablePoolMirroring disables mirroring for an rbd pool. +func DisablePoolMirroring(pool string, peer RbdReplicationPeer, localName string, remoteName string) error { + // remove peer permissions + err := RemovePeer(pool, localName, remoteName) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + // Disable pool mirroring on the local cluster. + err = configurePoolMirroring(pool, types.RbdResourceDisabled, "", "") + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + // Disable pool mirroring on the remote cluster. + err = configurePoolMirroring(pool, types.RbdResourceDisabled, localName, remoteName) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + return nil +} + +// DisableMirroringAllImagesInPool disables mirroring for all images for a pool enabled in pool mirroring mode. +func DisableMirroringAllImagesInPool(poolName string) error { + poolStatus, err := GetRbdMirrorVerbosePoolStatus(poolName, "", "") + if err != nil { + err := fmt.Errorf("failed to fetch status for %s pool: %v", poolName, err) + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + disabledImages := []string{} + for _, image := range poolStatus.Images { + err := disableRbdImageFeatures(poolName, image.Name, []string{"journaling"}) + if err != nil { + return fmt.Errorf("failed to disable journaling on %s/%s", poolName, image.Name) + } + disabledImages = append(disabledImages, image.Name) + } + + logger.Infof("REPRBD: Disabled %v images in %s pool.", disabledImages, poolName) + return nil +} + +// getPeerUUID returns the peer ID for the requested peer name. +func getPeerUUID(pool string, peerName string, client string, cluster string) (string, error) { + poolInfo, err := GetRbdMirrorPoolInfo(pool, cluster, client) + if err != nil { + logger.Error(err.Error()) + return "", err + } + + for _, peer := range poolInfo.Peers { + if peer.RemoteName == peerName { + return peer.Id, nil + } + } + + return "", fmt.Errorf("no peer found") +} + +// RemovePeer removes the rbd-mirror peer permissions for requested pool. +func RemovePeer(pool string, localName string, remoteName string) error { + // find local site's peer with name $remoteName + localPeer, err := getPeerUUID(pool, remoteName, "", "") + if err != nil { + return err + } + + remotePeer, err := getPeerUUID(pool, localName, localName, remoteName) + if err != nil { + return err + } + + // Remove local cluster's peer + err = peerRemove(pool, localPeer, "", "") + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + // Remove remote's peer + err = peerRemove(pool, remotePeer, localName, remoteName) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + return nil +} + +// BootstrapPeer bootstraps the rbd-mirror peer permissions for requested pool. +func BootstrapPeer(pool string, localName string, remoteName string) error { + // create bootstrap token on local site. + token, err := peerBootstrapCreate(pool, "", localName) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + // persist the peer token + err = writeRemotePeerToken(token, remoteName) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + // import peer token on remote site + return peerBootstrapImport(pool, localName, remoteName) +} + +// ############################# Ceph Commands ############################# +func configurePoolMirroring(pool string, mode types.RbdResourceType, localName string, remoteName string) error { + var args []string + if mode == types.RbdResourceDisabled { + args = []string{"mirror", "pool", "disable", pool} + } else { + args = []string{"mirror", "pool", "enable", pool, string(mode)} + } + + // add --cluster and --id args + args = appendRemoteClusterArgs(args, remoteName, localName) + + _, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("failed to execute rbd command: %v", err) + } + + return nil +} + +// configureImageMirroring disables or enables image mirroring in requested mode. +func configureImageMirroring(req types.RbdReplicationRequest) error { + pool := req.SourcePool + image := req.SourceImage + mode := req.ReplicationType + schedule := req.Schedule + var args []string + + if mode == types.RbdReplicationDisabled { + args = []string{"mirror", "image", "disable", fmt.Sprintf("%s/%s", pool, image)} + } else { + args = []string{"mirror", "image", "enable", fmt.Sprintf("%s/%s", pool, image), string(mode)} + } + + _, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("failed to configure rbd image feature: %v", err) + } + + if mode == types.RbdReplicationSnapshot { + err = createSnapshot(pool, image) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("failed to create image(%s/%s) snapshot : %v", pool, image, err) + } + + err = configureSnapshotSchedule(pool, image, schedule, "") + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("failed to create image(%s/%s) snapshot schedule(%s) : %v", pool, image, schedule, err) + } + } + + return nil +} + +func getSnapshotSchedule(pool string, image string) (imageSnapshotSchedule, error) { + if len(pool) == 0 || len(image) == 0 { + return imageSnapshotSchedule{}, fmt.Errorf("ImageName(%s/%s) not complete", pool, image) + } + + output, err := listSnapshotSchedule(pool, image) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return imageSnapshotSchedule{}, err + } + + ret := []imageSnapshotSchedule{} + err = json.Unmarshal(output, &ret) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return imageSnapshotSchedule{}, nil + } + + return ret[0], nil +} + +func listSnapshotSchedule(pool string, image string) ([]byte, error) { + args := []string{"mirror", "snapshot", "schedule", "list"} + + if len(pool) != 0 { + args = append(args, "--pool") + args = append(args, pool) + } + + if len(image) != 0 { + args = append(args, "--image") + args = append(args, image) + } + + output, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return []byte(""), err + } + + return []byte(output), nil +} + +func listAllImagesInPool(pool string, localName string, remoteName string) []string { + args := []string{"ls", pool, "--format", "json"} + + // add --cluster and --id args + args = appendRemoteClusterArgs(args, remoteName, localName) + + output, err := processExec.RunCommand("rbd", args...) + if err != nil { + return []string{} + } + + var ret []string + err = json.Unmarshal([]byte(output), &ret) + if err != nil { + logger.Errorf("REPRBD: unexpected error encountered while parsing json output %s", output) + return []string{} + } + + return ret +} + +func configureSnapshotSchedule(pool string, image string, schedule string, startTime string) error { + var args []string + if len(schedule) == 0 { + logger.Debugf("Empty schedule, no-op for (%s/%s)", pool, image) + return nil + } + + args = []string{"mirror", "snapshot", "schedule", "add", "--pool", pool} + + if len(image) != 0 { + args = append(args, "--image") + args = append(args, image) + } + + if len(schedule) != 0 { + args = append(args, schedule) + + // Also add start-time param if provided. + if len(startTime) != 0 { + args = append(args, startTime) + } + } + + _, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + return nil +} + +// createSnapshot creates a snapshot of the requested image +func createSnapshot(pool string, image string) error { + args := []string{"mirror", "image", "snapshot", fmt.Sprintf("%s/%s", pool, image)} + + _, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return err + } + + return nil +} + +// configureImageFeatures disables or enables requested feature on rbd image. +func configureImageFeatures(pool string, image string, op string, feature string) error { + // op is enable or disable + args := []string{"feature", op, fmt.Sprintf("%s/%s", pool, image), feature} + + _, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("failed to configure rbd image feature: %v", err) + } + + return nil +} + +// peerBootstrapCreate generates peer bootstrap token on remote ceph cluster. +func peerBootstrapCreate(pool string, client string, cluster string) (string, error) { + args := []string{ + "mirror", "pool", "peer", "bootstrap", "create", "--site-name", cluster, pool, + } + + // add --cluster and --id args if remote op. + if len(client) != 0 { + args = appendRemoteClusterArgs(args, cluster, client) + } + + output, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return "", fmt.Errorf("failed to bootstrap peer token: %v", err) + } + + return output, nil +} + +// peerBootstrapImport imports the bootstrap peer on the local cluster using a tokenfile. +func peerBootstrapImport(pool string, client string, cluster string) error { + tokenPath := filepath.Join( + constants.GetPathConst().ConfPath, + "rbd_mirror", + fmt.Sprintf("%s_peer_keyring", cluster), + ) + + args := []string{ + "mirror", "pool", "peer", "bootstrap", "import", "--site-name", cluster, "--direction", "rx-tx", pool, tokenPath, + } + + // add --cluster and --id args if remote op. + if len(client) != 0 { + args = appendRemoteClusterArgs(args, cluster, client) + } + + _, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("failed to import peer bootstrap token: %v", err) + } + + return nil +} + +// peerRemove imports the bootstrap peer on the local cluster using a tokenfile. +func peerRemove(pool string, peerId string, localName string, remoteName string) error { + args := []string{ + "mirror", "pool", "peer", "remove", pool, peerId, + } + + // add --cluster and --id args + args = appendRemoteClusterArgs(args, remoteName, localName) + + _, err := processExec.RunCommand("rbd", args...) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("failed to remove peer(%s) for pool(%s): %v", peerId, pool, err) + } + + return nil +} + +// ########################### HELPERS ########################### + +func IsRemoteConfiguredForRbdMirror(remoteName string) bool { + pools := ListPools("rbd") + for _, pool := range pools { + poolInfo, err := GetRbdMirrorPoolInfo(pool.Name, "", "") + if err != nil { + return false + } + + for _, peer := range poolInfo.Peers { + if peer.RemoteName == remoteName { + return true + } + } + } + + return false +} + +// appendRemoteClusterArgs appends the cluster and client arguments to ceph commands +func appendRemoteClusterArgs(args []string, cluster string, client string) []string { + logger.Debugf("RBD Replication: old args are %v", args) + // check if appendage is needed + if len(cluster) == 0 && len(client) == 0 { + // return as is + return args + } + + if len(cluster) > 0 { + args = append(args, "--cluster") + args = append(args, cluster) + } + + if len(client) > 0 { + args = append(args, "--id") + args = append(args, client) + } + + logger.Debugf("RBD Replication: new args are %v", args) + + // return modified args + return args +} + +// writeRemotePeerToken writes the provided string to a newly created token file. +func writeRemotePeerToken(token string, remoteName string) error { + // create token Dir + tokenDirPath := filepath.Join( + constants.GetPathConst().ConfPath, + "rbd_mirror", + ) + + err := os.MkdirAll(tokenDirPath, constants.PermissionWorldNoAccess) + if err != nil { + logger.Errorf("REPRBD: %s", err.Error()) + return fmt.Errorf("unable to create %q: %w", tokenDirPath, err) + } + + // create token file + tokenFilePath := filepath.Join( + tokenDirPath, + fmt.Sprintf("%s_peer_keyring", remoteName), + ) + file, err := os.Create(tokenFilePath) + if err != nil { + ne := fmt.Errorf("failed to create the token file(%s): %w", tokenFilePath, err) + logger.Errorf("REPRBD: %s", ne.Error()) + return ne + } + + // write to file + _, err = file.WriteString(token) + if err != nil { + ne := fmt.Errorf("failed to write the token file(%s): %w", tokenFilePath, err) + logger.Errorf("REPRBD: %s", ne.Error()) + return ne + } + + return nil +} diff --git a/microceph/ceph/rbd_mirror_test.go b/microceph/ceph/rbd_mirror_test.go new file mode 100644 index 00000000..b609bf9c --- /dev/null +++ b/microceph/ceph/rbd_mirror_test.go @@ -0,0 +1,95 @@ +package ceph + +import ( + "os" + "testing" + + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/mocks" + "github.com/canonical/microceph/microceph/tests" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +type RbdMirrorSuite struct { + tests.BaseSuite + TestStateInterface *mocks.StateInterface +} + +func TestRbdMirror(t *testing.T) { + suite.Run(t, new(RbdMirrorSuite)) +} + +func (ks *RbdMirrorSuite) SetupTest() { + ks.BaseSuite.SetupTest() + ks.CopyCephConfigs() +} + +func (ks *RbdMirrorSuite) TestVerbosePoolStatus() { + r := mocks.NewRunner(ks.T()) + + output, _ := os.ReadFile("./test_assets/rbd_mirror_verbose_pool_status.json") + + // mocks and expectations + r.On("RunCommand", []interface{}{ + "rbd", "mirror", "pool", "status", "pool", "--verbose", "--format", "json"}...).Return(string(output), nil).Once() + processExec = r + + // Method call + resp, err := GetRbdMirrorVerbosePoolStatus("pool", "", "") + assert.NoError(ks.T(), err) + assert.Equal(ks.T(), resp.Name, "pool") +} + +func (ks *RbdMirrorSuite) TestPoolStatus() { + r := mocks.NewRunner(ks.T()) + + output, _ := os.ReadFile("./test_assets/rbd_mirror_pool_status.json") + + // mocks and expectations + r.On("RunCommand", []interface{}{ + "rbd", "mirror", "pool", "status", "pool", "--format", "json"}...).Return(string(output), nil).Once() + processExec = r + + // Method call + resp, err := GetRbdMirrorPoolStatus("pool", "", "") + assert.NoError(ks.T(), err) + assert.Equal(ks.T(), resp.Health, RbdReplicationHealth("OK")) + assert.Equal(ks.T(), resp.DaemonHealth, RbdReplicationHealth("OK")) + assert.Equal(ks.T(), resp.ImageHealth, RbdReplicationHealth("OK")) +} + +func (ks *RbdMirrorSuite) TestImageStatus() { + r := mocks.NewRunner(ks.T()) + + output, _ := os.ReadFile("./test_assets/rbd_mirror_image_status.json") + + // mocks and expectations + r.On("RunCommand", []interface{}{ + "rbd", "mirror", "image", "status", "pool/image_one", "--format", "json"}...).Return(string(output), nil).Once() + processExec = r + + // Method call + resp, err := GetRbdMirrorImageStatus("pool", "image_one", "", "") + assert.NoError(ks.T(), err) + assert.Equal(ks.T(), resp.Name, "image_one") + assert.Equal(ks.T(), resp.IsPrimary, true) +} + +func (ks *RbdMirrorSuite) TestPoolInfo() { + r := mocks.NewRunner(ks.T()) + + output, _ := os.ReadFile("./test_assets/rbd_mirror_pool_info.json") + + // mocks and expectations + r.On("RunCommand", []interface{}{ + "rbd", "mirror", "pool", "info", "pool", "--format", "json"}...).Return(string(output), nil).Once() + processExec = r + + // Method call + resp, err := GetRbdMirrorPoolInfo("pool", "", "") + assert.NoError(ks.T(), err) + assert.Equal(ks.T(), resp.Mode, types.RbdResourcePool) + assert.Equal(ks.T(), resp.LocalSiteName, "magical") + assert.Equal(ks.T(), resp.Peers[0].RemoteName, "simple") +} diff --git a/microceph/ceph/replication.go b/microceph/ceph/replication.go new file mode 100644 index 00000000..829e7eff --- /dev/null +++ b/microceph/ceph/replication.go @@ -0,0 +1,125 @@ +package ceph + +import ( + "context" + "fmt" + "reflect" + + "github.com/canonical/lxd/shared/logger" + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/constants" + "github.com/canonical/microceph/microceph/interfaces" + "github.com/qmuntal/stateless" +) + +type repArgIndex int +type ReplicationState string + +const ( + StateDisabledReplication ReplicationState = "replication_disabled" + StateEnabledReplication ReplicationState = "replication_enabled" +) + +const ( + repArgHandler repArgIndex = 0 + repArgResponse repArgIndex = 1 + repArgState repArgIndex = 2 +) + +type ReplicationHandlerInterface interface { + PreFill(ctx context.Context, request types.ReplicationRequest) error + GetResourceState() ReplicationState + EnableHandler(ctx context.Context, args ...any) error + DisableHandler(ctx context.Context, args ...any) error + ConfigureHandler(ctx context.Context, args ...any) error + ListHandler(ctx context.Context, args ...any) error + StatusHandler(ctx context.Context, args ...any) error +} + +func GetReplicationHandler(name string) ReplicationHandlerInterface { + // Add RGW and CephFs Replication handlers here. + table := map[string]ReplicationHandlerInterface{ + "rbd": &RbdReplicationHandler{}, + } + + rh, ok := table[name] + if !ok { + return nil + } + + return rh +} + +func getAllEvents() []stateless.Trigger { + return []stateless.Trigger{ + constants.EventEnableReplication, + constants.EventDisableReplication, + constants.EventConfigureReplication, + constants.EventListReplication, + constants.EventStatusReplication, + } +} + +func GetReplicationStateMachine(initialState ReplicationState) *stateless.StateMachine { + newFsm := stateless.NewStateMachine(initialState) + // Configure transitions for disabled state. + newFsm.Configure(StateDisabledReplication). + Permit(constants.EventEnableReplication, StateEnabledReplication). + OnEntryFrom(constants.EventDisableReplication, disableHandler). + InternalTransition(constants.EventListReplication, listHandler). + InternalTransition(constants.EventDisableReplication, disableHandler) + + // Configure transitions for enabled state. + newFsm.Configure(StateEnabledReplication). + Permit(constants.EventDisableReplication, StateDisabledReplication). + OnEntryFrom(constants.EventEnableReplication, enableHandler). + InternalTransition(constants.EventConfigureReplication, configureHandler). + InternalTransition(constants.EventListReplication, listHandler). + InternalTransition(constants.EventStatusReplication, statusHandler) + + // Check Event params type. + var outputType *string + var stateType interfaces.CephState + var inputType ReplicationHandlerInterface + for _, event := range getAllEvents() { + newFsm.SetTriggerParameters(event, reflect.TypeOf(&inputType).Elem(), reflect.TypeOf(outputType), reflect.TypeOf(stateType)) + } + + // Add logger callback for all transitions + newFsm.OnTransitioning(logTransitionHandler) + + // Add handler for unhandled transitions. + newFsm.OnUnhandledTrigger(unhandledTransitionHandler) + + logger.Debugf("REPFSM: Created from state: %s", initialState) + return newFsm +} + +func logTransitionHandler(_ context.Context, t stateless.Transition) { + logger.Infof("REPFSM: Event(%s), SrcState(%s), DstState(%s)", t.Trigger, t.Source, t.Destination) +} + +func unhandledTransitionHandler(_ context.Context, state stateless.State, trigger stateless.Trigger, _ []string) error { + return fmt.Errorf("REPFSM: operation: %s is not permitted at %s state", trigger, state) +} + +func enableHandler(ctx context.Context, args ...any) error { + rh := args[repArgHandler].(ReplicationHandlerInterface) + return rh.EnableHandler(ctx, args...) +} +func disableHandler(ctx context.Context, args ...any) error { + rh := args[repArgHandler].(ReplicationHandlerInterface) + return rh.DisableHandler(ctx, args...) +} +func configureHandler(ctx context.Context, args ...any) error { + rh := args[repArgHandler].(ReplicationHandlerInterface) + return rh.ConfigureHandler(ctx, args...) +} +func listHandler(ctx context.Context, args ...any) error { + rh := args[repArgHandler].(ReplicationHandlerInterface) + return rh.ListHandler(ctx, args...) +} +func statusHandler(ctx context.Context, args ...any) error { + rh := args[repArgHandler].(ReplicationHandlerInterface) + return rh.StatusHandler(ctx, args...) +} diff --git a/microceph/ceph/replication_rbd.go b/microceph/ceph/replication_rbd.go new file mode 100644 index 00000000..70fba9b1 --- /dev/null +++ b/microceph/ceph/replication_rbd.go @@ -0,0 +1,433 @@ +package ceph + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/canonical/lxd/shared/logger" + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/constants" + "github.com/canonical/microceph/microceph/database" + "github.com/canonical/microceph/microceph/interfaces" +) + +type RbdReplicationPeer struct { + Id string `json:"uuid"` + MirrorId string `json:"mirror_uuid"` + RemoteName string `json:"site_name"` + Direction types.RbdReplicationDirection `json:"direction"` +} + +type RbdReplicationPoolInfo struct { + Mode types.RbdResourceType `json:"mode"` + LocalSiteName string `json:"site_name"` + Peers []RbdReplicationPeer `json:"peers"` +} + +type RbdReplicationHealth string + +const ( + RbdReplicationHealthOK RbdReplicationHealth = "OK" + RbdReplicationHealthWarn RbdReplicationHealth = "WARNING" + RbdReplicationHealthErr RbdReplicationHealth = "Error" +) + +type RbdReplicationPoolStatusCmdOutput struct { + Summary RbdReplicationPoolStatus `json:"summary"` +} + +// RbdReplicationPoolStatus does not have tags defined for jason because it needs custom logic. +type RbdReplicationPoolStatus struct { + State ReplicationState + ImageCount int + Health RbdReplicationHealth `json:"health" yaml:"health"` + DaemonHealth RbdReplicationHealth `json:"daemon_health" yaml:"daemon health"` + ImageHealth RbdReplicationHealth `json:"image_health" yaml:"image health"` + Description map[string]int `json:"states" yaml:"images"` +} + +type RbdReplicationVerbosePoolStatus struct { + Name string `json:"name"` + Summary RbdReplicationPoolStatus `json:"summary"` + Images []RbdReplicationImageStatus `json:"images"` +} + +type RbdReplicationImagePeer struct { + MirrorId string `json:"mirror_uuids"` + RemoteName string `json:"site_name"` + State string `json:"state"` + Status string `json:"description"` + LastUpdate string `json:"last_update"` +} + +type RbdReplicationImageStatus struct { + Name string `json:"name"` + State ReplicationState // whether replication is enabled or disabled + IsPrimary bool // not fetched from json field hence no tag for json. + ID string `json:"global_id"` + Status string `json:"state"` + LastUpdate string `json:"last_update"` + Peers []RbdReplicationImagePeer `json:"peer_sites"` + Description string `json:"description"` +} + +type RbdReplicationHandler struct { + // Resource Info + PoolInfo RbdReplicationPoolInfo `json:"pool_info"` + PoolStatus RbdReplicationPoolStatus `json:"pool_status"` + ImageStatus RbdReplicationImageStatus `json:"image_status"` + // Request Info + Request types.RbdReplicationRequest +} + +// PreFill populates the handler struct with requested rbd pool/image information. +func (rh *RbdReplicationHandler) PreFill(ctx context.Context, request types.ReplicationRequest) error { + var err error + req := request.(types.RbdReplicationRequest) + rh.Request = req + // Populate pool Info + rh.PoolInfo, err = GetRbdMirrorPoolInfo(req.SourcePool, "", "") + if err != nil { + return err + } + + // Populate pool status + rh.PoolStatus, err = GetRbdMirrorPoolStatus(req.SourcePool, "", "") + if err != nil { + return err + } + + if req.ResourceType == types.RbdResourceImage { + // Populate image status + rh.ImageStatus, err = GetRbdMirrorImageStatus(req.SourcePool, req.SourceImage, "", "") + return err + } + + return nil +} + +// GetResourceState fetches the mirroring state for requested rbd pool/image. +func (rh *RbdReplicationHandler) GetResourceState() ReplicationState { + // Image request but mirroring is disabled on image. + if rh.Request.ResourceType == types.RbdResourceImage { + return rh.ImageStatus.State + } + + // Pool request + return rh.PoolStatus.State +} + +// EnableHandler enables mirroring for requested rbd pool/image. +func (rh *RbdReplicationHandler) EnableHandler(ctx context.Context, args ...any) error { + logger.Debugf("REPFSM: Enable handler, Req %v", rh.Request) + + st := args[repArgState].(interfaces.CephState).ClusterState() + dbRec, err := database.GetRemoteDb(ctx, st, rh.Request.RemoteName) + if err != nil { + errNew := fmt.Errorf("remote (%s) does not exist: %w", rh.Request.RemoteName, err) + return errNew + } + + logger.Infof("REPRBD: Local(%s) Remote(%s)", dbRec[0].LocalName, dbRec[0].Name) + if rh.Request.ResourceType == types.RbdResourcePool { + return handlePoolEnablement(rh, dbRec[0].LocalName, dbRec[0].Name) + } else if rh.Request.ResourceType == types.RbdResourceImage { + return handleImageEnablement(rh, dbRec[0].LocalName, dbRec[0].Name) + } + + return fmt.Errorf("unknown enable request for rbd mirroring %s", rh.Request.ResourceType) +} + +// DisableHandler disables mirroring configured for requested rbd pool/image. +func (rh *RbdReplicationHandler) DisableHandler(ctx context.Context, args ...any) error { + logger.Debugf("REPFSM: Disable handler, Req %v", rh.Request) + + st := args[repArgState].(interfaces.CephState).ClusterState() + dbRec, err := database.GetRemoteDb(ctx, st, rh.Request.RemoteName) + if err != nil { + errNew := fmt.Errorf("remote (%s) does not exist: %w", rh.Request.RemoteName, err) + return errNew + } + + logger.Infof("REPRBD: Entered RBD Disable Handler Local(%s) Remote(%s)", dbRec[0].LocalName, dbRec[0].Name) + if rh.Request.ResourceType == types.RbdResourcePool { + return handlePoolDisablement(rh, dbRec[0].LocalName, dbRec[0].Name) + } else if rh.Request.ResourceType == types.RbdResourceImage { + return handleImageDisablement(rh) + } + + return fmt.Errorf("unknown disable request for rbd mirroring %s", rh.Request.ResourceType) +} + +// ConfigureHandler configures replication properties for requested rbd pool/image. +func (rh *RbdReplicationHandler) ConfigureHandler(ctx context.Context, args ...any) error { + logger.Debugf("REPFSM: Configure handler, Req %v", rh.Request) + + schedule, err := getSnapshotSchedule(rh.Request.SourcePool, rh.Request.SourceImage) + if err != nil { + return err + } + + if rh.Request.Schedule != schedule.Schedule { + return configureSnapshotSchedule(rh.Request.SourcePool, rh.Request.SourceImage, rh.Request.Schedule, "") + } + + return nil +} + +// ListHandler fetches a list of rbd pools/images configured for mirroring. +func (rh *RbdReplicationHandler) ListHandler(ctx context.Context, args ...any) error { + logger.Debugf("REPFSM: List handler, Req %v", rh.Request) + + // fetch all ceph pools initialised with rbd application. + pools := ListPools("rbd") + + logger.Debugf("REPRBD: Scan active pools %v", pools) + + // fetch verbose pool status for each pool + statusList := types.RbdPoolList{} + for _, pool := range pools { + poolStatus, err := GetRbdMirrorVerbosePoolStatus(pool.Name, "", "") + if err != nil { + logger.Warnf("failed to fetch status for %s pool: %v", pool.Name, err) + continue + } + + images := make([]types.RbdPoolListImageBrief, len(poolStatus.Images)) + for id, image := range poolStatus.Images { + var rep_type string + if strings.Contains(image.Description, "snapshot") { + rep_type = "snapshot" + } else { + rep_type = "journaling" + } + images[id] = types.RbdPoolListImageBrief{ + Name: image.Name, + Type: rep_type, + IsPrimary: image.IsPrimary, + LastLocalUpdate: image.LastUpdate, + } + } + + statusList = append(statusList, types.RbdPoolBrief{ + Name: pool.Name, + Images: images, + }) + } + + logger.Debugf("REPRBD: List Verbose Pool status: %v", statusList) + + resp, err := json.Marshal(statusList) + if err != nil { + return fmt.Errorf("failed to marshal response(%v): %v", statusList, err) + } + + // pass response for API + *args[repArgResponse].(*string) = string(resp) + return nil +} + +// StatusHandler fetches the status of requested rbd pool/image resource. +func (rh *RbdReplicationHandler) StatusHandler(ctx context.Context, args ...any) error { + logger.Debugf("REPFSM: Status handler, Req %v", rh.Request) + + var resp any + + // Populate Status resp. + if rh.Request.ResourceType == types.RbdResourcePool { + // handle pool status + remotes := make([]types.RbdPoolStatusRemoteBrief, len(rh.PoolInfo.Peers)) + for id, remote := range rh.PoolInfo.Peers { + remotes[id] = types.RbdPoolStatusRemoteBrief{ + Name: remote.RemoteName, + Direction: string(remote.Direction), + UUID: remote.Id, + } + } + + // Also add image info + + resp = types.RbdPoolStatus{ + Name: rh.Request.SourcePool, + Type: string(rh.PoolInfo.Mode), + HealthReplication: string(rh.PoolStatus.Health), + HealthImages: string(rh.PoolStatus.ImageHealth), + HealthDaemon: string(rh.PoolStatus.DaemonHealth), + ImageCount: rh.PoolStatus.ImageCount, + Remotes: remotes, + } + } else if rh.Request.ResourceType == types.RbdResourceImage { + // handle image status + remotes := make([]types.RbdImageStatusRemoteBrief, len(rh.ImageStatus.Peers)) + for id, remote := range rh.ImageStatus.Peers { + remotes[id] = types.RbdImageStatusRemoteBrief{ + Name: remote.RemoteName, + Status: remote.Status, + LastRemoteUpdate: remote.LastUpdate, + } + } + + var rep_type string + if strings.Contains(rh.ImageStatus.Status, "snapshot") { + rep_type = "snapshot" + } else { + rep_type = "journaling" + } + + resp = types.RbdImageStatus{ + Name: fmt.Sprintf("%s/%s", rh.Request.SourcePool, rh.Request.SourceImage), + ID: rh.ImageStatus.ID, + Type: rep_type, + Status: rh.ImageStatus.Status, + LastLocalUpdate: rh.ImageStatus.LastUpdate, + IsPrimary: rh.ImageStatus.IsPrimary, + Remotes: remotes, + } + } else { + return fmt.Errorf("REPRBD: Unable resource type(%s), cannot find status", rh.Request.ResourceType) + } + + // Marshal to json string + data, err := json.Marshal(resp) + if err != nil { + err := fmt.Errorf("failed to marshal resource status: %w", err) + logger.Error(err.Error()) + return err + } + + // pass response for API + *args[repArgResponse].(*string) = string(data) + return nil +} + +// ################### Helper Functions ################### +// Enable handler for pool resource. +func handlePoolEnablement(rh *RbdReplicationHandler, localSite string, remoteSite string) error { + if rh.PoolInfo.Mode == types.RbdResourcePool { + return nil // already in pool mirroring mode + } else + + // Fail if in Image mirroring mode with Mirroring Images > 0 + if rh.PoolInfo.Mode == types.RbdResourceImage { + enabledImageCount := rh.PoolStatus.ImageCount + if enabledImageCount != 0 { + return fmt.Errorf("pool (%s) in Image mirroring mode, Disable %d mirroring Images", rh.Request.SourcePool, enabledImageCount) + } + } + + err := EnablePoolMirroring(rh.Request.SourcePool, types.RbdResourcePool, localSite, remoteSite) + if err != nil { + return err + } + + if !rh.Request.SkipAutoEnable { + // Enable mirroring for all images in pool. + images := listAllImagesInPool(rh.Request.SourcePool, "", "") + for _, image := range images { + err := enableRbdImageFeatures(rh.Request.SourcePool, image, constants.RbdJournalingEnableFeatureSet[:]) + if err != nil { + return err + } + } + } + + return nil +} + +// Enable handler for image resource. +func handleImageEnablement(rh *RbdReplicationHandler, localSite string, remoteSite string) error { + if rh.PoolInfo.Mode == types.RbdResourceDisabled { + // Enable pool mirroring in Image mirroring mode + err := EnablePoolMirroring(rh.Request.SourcePool, types.RbdResourceImage, localSite, remoteSite) + if err != nil { + logger.Error(err.Error()) + return err + } + // continue for Image enablement + } else if rh.PoolInfo.Mode == types.RbdResourcePool { + if rh.Request.ReplicationType == types.RbdReplicationJournaling { + return enableRbdImageFeatures(rh.Request.SourcePool, rh.Request.SourceImage, constants.RbdJournalingEnableFeatureSet[:]) + } else { + return fmt.Errorf("parent pool (%s) enabled in Journaling mode, Image(%s) requested in Snapshot mode", rh.Request.SourcePool, rh.Request.SourceImage) + } + } + + // pool in Image mirroring mode, Enable Image in requested mode. + return configureImageMirroring(rh.Request) +} + +// Disable handler for pool resource. +func handlePoolDisablement(rh *RbdReplicationHandler, localSite string, remoteSite string) error { + // Handle Pool already disabled + if rh.PoolInfo.Mode == types.RbdResourceDisabled { + return nil + } + + // Fail if both sites not healthy and not a forced operation. + if rh.PoolStatus.Health != RbdReplicationHealthOK && !rh.Request.IsForceOp { + return fmt.Errorf("pool replication status not OK(%s), Can't proceed", rh.PoolStatus.Health) + } + + // Fail if in Image mirroring mode with Mirroring Images > 0 + if rh.PoolInfo.Mode == types.RbdResourceImage { + enabledImageCount := rh.PoolStatus.ImageCount + if enabledImageCount != 0 { + return fmt.Errorf("pool (%s) in Image mirroring mode, has %d images mirroring", rh.Request.SourcePool, enabledImageCount) + } + } else + + // If pool in pool mirroring mode, disable all images. + if rh.PoolInfo.Mode == types.RbdResourcePool { + err := DisableMirroringAllImagesInPool(rh.Request.SourcePool) + if err != nil { + return err + } + } + + return DisablePoolMirroring(rh.Request.SourcePool, rh.PoolInfo.Peers[0], localSite, remoteSite) +} + +// Disable handler for image resource. +func handleImageDisablement(rh *RbdReplicationHandler) error { + // Pool already disabled + if rh.PoolInfo.Mode == types.RbdResourceDisabled { + return nil + } + + // Image already disabled + if rh.ImageStatus.State == StateDisabledReplication { + return nil + } + + if rh.PoolInfo.Mode == types.RbdResourcePool { + return disableRbdImageFeatures(rh.Request.SourcePool, rh.Request.SourceImage, []string{"journaling"}) + } + + // patch replication type + rh.Request.ReplicationType = types.RbdReplicationDisabled + return configureImageMirroring(rh.Request) +} + +// enableImageFeatures enables the list of rbd features on the requested resource. +func enableRbdImageFeatures(poolName string, imageName string, features []string) error { + for _, feature := range features { + err := configureImageFeatures(poolName, imageName, "enable", feature) + if err != nil && !strings.Contains(err.Error(), "one or more requested features are already enabled") { + return err + } + } + return nil +} + +// disableRbdImageFeatures disables the list of rbd features on the requested resource. +func disableRbdImageFeatures(poolName string, imageName string, features []string) error { + for _, feature := range features { + err := configureImageFeatures(poolName, imageName, "disable", feature) + if err != nil { + return err + } + } + return nil +} diff --git a/microceph/ceph/test_assets/rbd_mirror_image_status.json b/microceph/ceph/test_assets/rbd_mirror_image_status.json new file mode 100644 index 00000000..3086994c --- /dev/null +++ b/microceph/ceph/test_assets/rbd_mirror_image_status.json @@ -0,0 +1,22 @@ +{ + "name": "image_one", + "global_id": "c6e4ea14-be4a-41f4-ba76-dbb64595e600", + "state": "up+stopped", + "description": "local image is primary", + "daemon_service": { + "service_id": "14177", + "instance_id": "14221", + "daemon_id": "magical-reindeer", + "hostname": "magical-reindeer" + }, + "last_update": "2024-10-09 07:56:24", + "peer_sites": [ + { + "site_name": "simple", + "mirror_uuids": "84f58bda-4eea-45b1-9a5a-296cf1b82a65", + "state": "up+replaying", + "description": "replaying, {\"bytes_per_second\":0.0,\"entries_behind_primary\":0,\"entries_per_second\":0.0,\"non_primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1},\"primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1}}", + "last_update": "2024-10-09 07:56:28" + } + ] +} diff --git a/microceph/ceph/test_assets/rbd_mirror_pool_info.json b/microceph/ceph/test_assets/rbd_mirror_pool_info.json new file mode 100644 index 00000000..f2063b51 --- /dev/null +++ b/microceph/ceph/test_assets/rbd_mirror_pool_info.json @@ -0,0 +1,13 @@ +{ + "mode": "pool", + "site_name": "magical", + "peers": [ + { + "uuid": "f3ee5939-66a6-494f-849a-a4402ddb4d18", + "direction": "rx-tx", + "site_name": "simple", + "mirror_uuid": "84f58bda-4eea-45b1-9a5a-296cf1b82a65", + "client_name": "client.rbd-mirror-peer" + } + ] +} diff --git a/microceph/ceph/test_assets/rbd_mirror_pool_status.json b/microceph/ceph/test_assets/rbd_mirror_pool_status.json new file mode 100644 index 00000000..399c3166 --- /dev/null +++ b/microceph/ceph/test_assets/rbd_mirror_pool_status.json @@ -0,0 +1,10 @@ +{ + "summary": { + "health": "OK", + "daemon_health": "OK", + "image_health": "OK", + "states": { + "replaying": 2 + } + } +} diff --git a/microceph/ceph/test_assets/rbd_mirror_verbose_pool_status.json b/microceph/ceph/test_assets/rbd_mirror_verbose_pool_status.json new file mode 100644 index 00000000..0c325eeb --- /dev/null +++ b/microceph/ceph/test_assets/rbd_mirror_verbose_pool_status.json @@ -0,0 +1,67 @@ +{ + "summary": { + "health": "OK", + "daemon_health": "OK", + "image_health": "OK", + "states": { + "replaying": 2 + } + }, + "daemons": [ + { + "service_id": "14173", + "instance_id": "14198", + "client_id": "magical-reindeer", + "hostname": "magical-reindeer", + "ceph_version": "19.2.0~git20240301.4c76c50", + "leader": true, + "health": "OK" + } + ], + "images": [ + { + "name": "image_one", + "global_id": "ebbea3fc-78c5-41e7-a796-d2fc59c691c6", + "state": "up+stopped", + "description": "local image is primary", + "daemon_service": { + "service_id": "14173", + "instance_id": "14198", + "daemon_id": "magical-reindeer", + "hostname": "magical-reindeer" + }, + "last_update": "2024-10-09 05:55:27", + "peer_sites": [ + { + "site_name": "simple", + "mirror_uuids": "ced68f5f-f982-4ca2-b823-c68be7b86c93", + "state": "up+replaying", + "description": "replaying, {\"bytes_per_second\":0.0,\"entries_behind_primary\":0,\"entries_per_second\":0.0,\"non_primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1},\"primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1}}", + "last_update": "2024-10-09 05:55:27" + } + ] + }, + { + "name": "image_two", + "global_id": "0f35d44b-60fd-4294-adc9-eb7a65815db9", + "state": "up+stopped", + "description": "local image is primary", + "daemon_service": { + "service_id": "14173", + "instance_id": "14198", + "daemon_id": "magical-reindeer", + "hostname": "magical-reindeer" + }, + "last_update": "2024-10-09 05:55:27", + "peer_sites": [ + { + "site_name": "simple", + "mirror_uuids": "ced68f5f-f982-4ca2-b823-c68be7b86c93", + "state": "up+replaying", + "description": "replaying, {\"bytes_per_second\":0.0,\"entries_behind_primary\":0,\"entries_per_second\":0.0,\"non_primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1},\"primary_position\":{\"entry_tid\":3,\"object_number\":3,\"tag_tid\":1}}", + "last_update": "2024-10-09 05:55:27" + } + ] + } + ] +} diff --git a/microceph/client/remote.go b/microceph/client/remote.go index 84257322..604e3c8e 100644 --- a/microceph/client/remote.go +++ b/microceph/client/remote.go @@ -61,7 +61,7 @@ func SendRemoteImportToClusterMembers(ctx context.Context, s state.State, data t return nil } -// FetchAllRemotes pulls all remote records from MicroCeph. +// FetchAllRemotes queries the remote API and returns a slice of configured remote. func FetchAllRemotes(ctx context.Context, c *microCli.Client) ([]types.RemoteRecord, error) { queryCtx, cancel := context.WithTimeout(ctx, time.Second*120) defer cancel() diff --git a/microceph/client/remote_replication.go b/microceph/client/remote_replication.go new file mode 100644 index 00000000..cd15f807 --- /dev/null +++ b/microceph/client/remote_replication.go @@ -0,0 +1,41 @@ +package client + +import ( + "context" + "fmt" + "time" + + "github.com/canonical/lxd/shared/api" + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/constants" + microCli "github.com/canonical/microcluster/v2/client" +) + +// Sends replication request for creating, deleting, getting, and listing remote replication. +func SendRemoteReplicationRequest(ctx context.Context, c *microCli.Client, data types.ReplicationRequest) (string, error) { + var err error + var resp string + queryCtx, cancel := context.WithTimeout(ctx, time.Second*120) + defer cancel() + + if data.GetWorkloadRequestType() == constants.EventListReplication { + // list request uses replication/$workload endpoint + err = c.Query( + queryCtx, data.GetAPIRequestType(), types.ExtendedPathPrefix, + api.NewURL().Path("ops", "replication", string(data.GetWorkloadType())), + data, &resp, + ) + } else { + // Other requests use replication/$workload/$resource endpoint + err = c.Query( + queryCtx, data.GetAPIRequestType(), types.ExtendedPathPrefix, + api.NewURL().Path("ops", "replication", string(data.GetWorkloadType()), data.GetAPIObjectId()), + data, &resp, + ) + } + if err != nil { + return "", fmt.Errorf("failed to process %s request for %s: %w", data.GetWorkloadRequestType(), data.GetWorkloadType(), err) + } + + return resp, nil +} diff --git a/microceph/cmd/microceph/remote.go b/microceph/cmd/microceph/remote.go index 5c8c1ab1..b634d1d1 100644 --- a/microceph/cmd/microceph/remote.go +++ b/microceph/cmd/microceph/remote.go @@ -23,6 +23,9 @@ func (c *cmdRemote) Command() *cobra.Command { // Remove subcommand remoteRemoveCmd := cmdRemoteRemove{common: c.common} cmd.AddCommand(remoteRemoveCmd.Command()) + // Replication subcommand + remoteReplicationCmd := cmdRemoteReplication{common: c.common} + cmd.AddCommand(remoteReplicationCmd.Command()) // Workaround for subcommand usage errors. See: https://github.com/spf13/cobra/issues/706 cmd.Args = cobra.NoArgs diff --git a/microceph/cmd/microceph/remote_list.go b/microceph/cmd/microceph/remote_list.go index 16f607b1..a164475d 100644 --- a/microceph/cmd/microceph/remote_list.go +++ b/microceph/cmd/microceph/remote_list.go @@ -11,6 +11,7 @@ import ( "github.com/canonical/microcluster/v2/microcluster" "github.com/jedib0t/go-pretty/v6/table" "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" ) type cmdRemoteList struct { @@ -74,7 +75,10 @@ func printRemoteTable(remotes []types.RemoteRecord) error { for _, remote := range remotes { t.AppendRow(table.Row{remote.ID, remote.Name, remote.LocalName}) } - t.SetStyle(table.StyleColoredBright) + if terminal.IsTerminal(0) && terminal.IsTerminal(1) { + // Set style if interactive shell. + t.SetStyle(table.StyleColoredBright) + } t.Render() return nil } diff --git a/microceph/cmd/microceph/remote_replication.go b/microceph/cmd/microceph/remote_replication.go new file mode 100644 index 00000000..0b9c3ab7 --- /dev/null +++ b/microceph/cmd/microceph/remote_replication.go @@ -0,0 +1,22 @@ +package main + +import ( + "github.com/spf13/cobra" +) + +type cmdRemoteReplication struct { + common *CmdControl +} + +func (c *cmdRemoteReplication) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "replication", + Short: "manage remote replication", + } + + // Replication RBD commands + replicationRbdCmd := cmdRemoteReplicationRbd{common: c.common} + cmd.AddCommand(replicationRbdCmd.Command()) + + return cmd +} diff --git a/microceph/cmd/microceph/remote_replication_rbd.go b/microceph/cmd/microceph/remote_replication_rbd.go new file mode 100644 index 00000000..4a595c0a --- /dev/null +++ b/microceph/cmd/microceph/remote_replication_rbd.go @@ -0,0 +1,38 @@ +package main + +import ( + "github.com/spf13/cobra" +) + +type cmdRemoteReplicationRbd struct { + common *CmdControl +} + +func (c *cmdRemoteReplicationRbd) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "rbd", + Short: "manage RBD remote replication", + } + + // Replication enable command + remoteReplicationRbdEnableCmd := cmdRemoteReplicationEnableRbd{common: c.common} + cmd.AddCommand(remoteReplicationRbdEnableCmd.Command()) + + // Replication disable command + remoteReplicationRbdDisableCmd := cmdRemoteReplicationDisableRbd{common: c.common} + cmd.AddCommand(remoteReplicationRbdDisableCmd.Command()) + + // Replication list command + remoteReplicationRbdListCmd := cmdRemoteReplicationListRbd{common: c.common} + cmd.AddCommand(remoteReplicationRbdListCmd.Command()) + + // Replication status command + remoteReplicationRbdStatusCmd := cmdRemoteReplicationStatusRbd{common: c.common} + cmd.AddCommand(remoteReplicationRbdStatusCmd.Command()) + + // Replication configure command + remoteReplicationRbdConfigureCmd := cmdRemoteReplicationConfigureRbd{common: c.common} + cmd.AddCommand(remoteReplicationRbdConfigureCmd.Command()) + + return cmd +} diff --git a/microceph/cmd/microceph/remote_replication_rbd_configure.go b/microceph/cmd/microceph/remote_replication_rbd_configure.go new file mode 100644 index 00000000..c3b1bb70 --- /dev/null +++ b/microceph/cmd/microceph/remote_replication_rbd_configure.go @@ -0,0 +1,71 @@ +package main + +import ( + "context" + + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/client" + "github.com/canonical/microcluster/v2/microcluster" + "github.com/spf13/cobra" +) + +type cmdRemoteReplicationConfigureRbd struct { + common *CmdControl + schedule string +} + +func (c *cmdRemoteReplicationConfigureRbd) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "configure ", + Short: "Configure remote replication parameters for RBD resource (Pool or Image)", + RunE: c.Run, + } + + cmd.Flags().StringVar(&c.schedule, "schedule", "", "snapshot schedule in days, hours, or minutes using d, h, m suffix respectively") + return cmd +} + +func (c *cmdRemoteReplicationConfigureRbd) Run(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return cmd.Help() + } + + m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) + if err != nil { + return err + } + + cli, err := m.LocalClient() + if err != nil { + return err + } + + payload, err := c.prepareRbdPayload(types.ConfigureReplicationRequest, args) + if err != nil { + return err + } + + _, err = client.SendRemoteReplicationRequest(context.Background(), cli, payload) + if err != nil { + return err + } + + return nil +} + +func (c *cmdRemoteReplicationConfigureRbd) prepareRbdPayload(requestType types.ReplicationRequestType, args []string) (types.RbdReplicationRequest, error) { + pool, image, err := types.GetPoolAndImageFromResource(args[0]) + if err != nil { + return types.RbdReplicationRequest{}, err + } + + retReq := types.RbdReplicationRequest{ + SourcePool: pool, + SourceImage: image, + Schedule: c.schedule, + RequestType: requestType, + ResourceType: types.GetRbdResourceType(pool, image), + } + + return retReq, nil +} diff --git a/microceph/cmd/microceph/remote_replication_rbd_disable.go b/microceph/cmd/microceph/remote_replication_rbd_disable.go new file mode 100644 index 00000000..89c56f7a --- /dev/null +++ b/microceph/cmd/microceph/remote_replication_rbd_disable.go @@ -0,0 +1,67 @@ +package main + +import ( + "context" + + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/client" + "github.com/canonical/microcluster/v2/microcluster" + "github.com/spf13/cobra" +) + +type cmdRemoteReplicationDisableRbd struct { + common *CmdControl + isForce bool +} + +func (c *cmdRemoteReplicationDisableRbd) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "disable ", + Short: "Disable remote replication for RBD resource (Pool or Image)", + RunE: c.Run, + } + + cmd.Flags().BoolVar(&c.isForce, "force", false, "forcefully disable replication for rbd resource") + return cmd +} + +func (c *cmdRemoteReplicationDisableRbd) Run(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return cmd.Help() + } + + m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) + if err != nil { + return err + } + + cli, err := m.LocalClient() + if err != nil { + return err + } + + payload, err := c.prepareRbdPayload(types.DisableReplicationRequest, args) + if err != nil { + return err + } + + _, err = client.SendRemoteReplicationRequest(context.Background(), cli, payload) + return err +} + +func (c *cmdRemoteReplicationDisableRbd) prepareRbdPayload(requestType types.ReplicationRequestType, args []string) (types.RbdReplicationRequest, error) { + pool, image, err := types.GetPoolAndImageFromResource(args[0]) + if err != nil { + return types.RbdReplicationRequest{}, err + } + + retReq := types.RbdReplicationRequest{ + SourcePool: pool, + SourceImage: image, + RequestType: requestType, + IsForceOp: c.isForce, + ResourceType: types.GetRbdResourceType(pool, image), + } + + return retReq, nil +} diff --git a/microceph/cmd/microceph/remote_replication_rbd_enable.go b/microceph/cmd/microceph/remote_replication_rbd_enable.go new file mode 100644 index 00000000..e6fae002 --- /dev/null +++ b/microceph/cmd/microceph/remote_replication_rbd_enable.go @@ -0,0 +1,81 @@ +package main + +import ( + "context" + + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/client" + "github.com/canonical/microcluster/v2/microcluster" + "github.com/spf13/cobra" +) + +type cmdRemoteReplicationEnableRbd struct { + common *CmdControl + remoteName string + repType string + schedule string + skipAutoEnable bool +} + +func (c *cmdRemoteReplicationEnableRbd) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "enable ", + Short: "Enable remote replication for RBD resource (Pool or Image)", + RunE: c.Run, + } + + cmd.Flags().StringVar(&c.remoteName, "remote", "", "remote MicroCeph cluster name") + cmd.MarkFlagRequired("remote") + cmd.Flags().BoolVar(&c.skipAutoEnable, "skip-auto-enable", false, "do not auto enable rbd mirroring for all images in the pool.") + cmd.Flags().StringVar(&c.repType, "type", "journal", "'journal' or 'snapshot', defaults to journal") + cmd.Flags().StringVar(&c.schedule, "schedule", "", "snapshot schedule in days, hours, or minutes using d, h, m suffix respectively") + return cmd +} + +func (c *cmdRemoteReplicationEnableRbd) Run(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return cmd.Help() + } + + m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) + if err != nil { + return err + } + + cli, err := m.LocalClient() + if err != nil { + return err + } + + payload, err := c.prepareRbdPayload(types.EnableReplicationRequest, args) + if err != nil { + return err + } + + _, err = client.SendRemoteReplicationRequest(context.Background(), cli, payload) + if err != nil { + return err + } + + return nil +} + +func (c *cmdRemoteReplicationEnableRbd) prepareRbdPayload(requestType types.ReplicationRequestType, args []string) (types.RbdReplicationRequest, error) { + pool, image, err := types.GetPoolAndImageFromResource(args[0]) + if err != nil { + return types.RbdReplicationRequest{}, err + } + + retReq := types.RbdReplicationRequest{ + RemoteName: c.remoteName, + SourcePool: pool, + SourceImage: image, + Schedule: c.schedule, + ReplicationType: types.RbdReplicationType(c.repType), + RequestType: requestType, + ResourceType: types.GetRbdResourceType(pool, image), + SkipAutoEnable: c.skipAutoEnable, + } + + return retReq, nil +} diff --git a/microceph/cmd/microceph/remote_replication_rbd_list.go b/microceph/cmd/microceph/remote_replication_rbd_list.go new file mode 100644 index 00000000..6b7f5d8f --- /dev/null +++ b/microceph/cmd/microceph/remote_replication_rbd_list.go @@ -0,0 +1,104 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/client" + "github.com/canonical/microcluster/v2/microcluster" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" +) + +type cmdRemoteReplicationListRbd struct { + common *CmdControl + poolName string + json bool +} + +func (c *cmdRemoteReplicationListRbd) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List all configured remotes replication pairs.", + RunE: c.Run, + } + + cmd.Flags().StringVar(&c.poolName, "pool", "", "RBD pool name") + cmd.Flags().BoolVar(&c.json, "json", false, "output as json string") + return cmd +} + +func (c *cmdRemoteReplicationListRbd) Run(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmd.Help() + } + + m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) + if err != nil { + return err + } + + cli, err := m.LocalClient() + if err != nil { + return err + } + + payload, err := c.prepareRbdPayload(types.ListReplicationRequest) + if err != nil { + return err + } + + resp, err := client.SendRemoteReplicationRequest(context.Background(), cli, payload) + if err != nil { + return err + } + + if c.json { + fmt.Println(resp) + return nil + } + + return printRemoteReplicationList(resp) +} + +func (c *cmdRemoteReplicationListRbd) prepareRbdPayload(requestType types.ReplicationRequestType) (types.RbdReplicationRequest, error) { + // list fetches ALL POOLS if pool name is empty. + retReq := types.RbdReplicationRequest{ + SourcePool: c.poolName, + RequestType: requestType, + ResourceType: types.RbdResourcePool, + } + + return retReq, nil +} + +func printRemoteReplicationList(response string) error { + var resp types.RbdPoolList + err := json.Unmarshal([]byte(response), &resp) + if err != nil { + return nil + } + + // start table object + rowConfigAutoMerge := table.RowConfig{AutoMerge: true, AutoMergeAlign: text.AlignCenter} + + t := table.NewWriter() + t.SetOutputMirror(os.Stdout) + t.AppendHeader(table.Row{"Pool Name", "Image Name", "Is Primary", "Last Local Update"}, rowConfigAutoMerge) + for _, pool := range resp { + for _, image := range pool.Images { + t.AppendRow(table.Row{pool.Name, image.Name, image.IsPrimary, image.LastLocalUpdate}, rowConfigAutoMerge) + } + } + if terminal.IsTerminal(0) && terminal.IsTerminal(1) { + // Set style if interactive shell. + t.SetStyle(table.StyleColoredBright) + } + t.Render() + return nil +} diff --git a/microceph/cmd/microceph/remote_replication_rbd_status.go b/microceph/cmd/microceph/remote_replication_rbd_status.go new file mode 100644 index 00000000..17e0ccb7 --- /dev/null +++ b/microceph/cmd/microceph/remote_replication_rbd_status.go @@ -0,0 +1,179 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/canonical/microceph/microceph/api/types" + "github.com/canonical/microceph/microceph/client" + "github.com/canonical/microcluster/v2/microcluster" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" + "github.com/spf13/cobra" + "golang.org/x/crypto/ssh/terminal" +) + +type cmdRemoteReplicationStatusRbd struct { + common *CmdControl + json bool +} + +func (c *cmdRemoteReplicationStatusRbd) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "status ", + Short: "Show RBD resource (Pool or Image) replication status", + RunE: c.Run, + } + + cmd.Flags().BoolVar(&c.json, "json", false, "output as json string") + return cmd +} + +func (c *cmdRemoteReplicationStatusRbd) Run(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return cmd.Help() + } + + m, err := microcluster.App(microcluster.Args{StateDir: c.common.FlagStateDir}) + if err != nil { + return err + } + + cli, err := m.LocalClient() + if err != nil { + return err + } + + payload, err := c.prepareRbdPayload(types.StatusReplicationRequest, args) + if err != nil { + return err + } + + resp, err := client.SendRemoteReplicationRequest(context.Background(), cli, payload) + if err != nil { + return err + } + + if c.json { + fmt.Println(resp) + return nil + } + + return printRemoteReplicationStatusTable(payload.ResourceType, resp) +} + +func (c *cmdRemoteReplicationStatusRbd) prepareRbdPayload(requestType types.ReplicationRequestType, args []string) (types.RbdReplicationRequest, error) { + pool, image, err := types.GetPoolAndImageFromResource(args[0]) + if err != nil { + return types.RbdReplicationRequest{}, err + } + + retReq := types.RbdReplicationRequest{ + SourcePool: pool, + SourceImage: image, + RequestType: requestType, + ResourceType: types.GetRbdResourceType(pool, image), + } + + return retReq, nil +} + +func printRemoteReplicationStatusTable(ResourceType types.RbdResourceType, response string) error { + var err error + + // start table object + rowConfigAutoMerge := table.RowConfig{AutoMerge: true, AutoMergeAlign: text.AlignCenter} + + if ResourceType == types.RbdResourcePool { + var resp types.RbdPoolStatus + err = json.Unmarshal([]byte(response), &resp) + if err != nil { + return err + } + + // Summary Section. + t_summary := table.NewWriter() + t_summary.SetOutputMirror(os.Stdout) + t_summary.AppendHeader(table.Row{"Summary", "Summary", "Health", "Health"}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Name", resp.Name, "Replication", resp.HealthReplication}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Mode", resp.Type, "Daemon", resp.HealthDaemon}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Image Count", resp.ImageCount, "Image", resp.HealthImages}, rowConfigAutoMerge) + if terminal.IsTerminal(0) && terminal.IsTerminal(1) { + // Set style if interactive shell. + t_summary.SetStyle(table.StyleColoredBright) + } + t_summary.Render() + fmt.Println() + + // Images Section. + // t_images := table.NewWriter() + // t_images.SetOutputMirror(os.Stdout) + // t_images.AppendHeader(table.Row{"Image Name", "Is Primary", "Last Local Update"}) + // for _, image := range resp.Images { + // t_images.AppendRow(table.Row{image.Name, image.IsPrimary, image.LastLocalUpdate}) + // } + // t_images.SetStyle(table.StyleColoredBright) + // t_images.Render() + // fmt.Println() + + // Remotes Section + t_remotes := table.NewWriter() + t_remotes.SetOutputMirror(os.Stdout) + t_remotes.AppendHeader(table.Row{"Remote Name", "Direction", "UUID"}) + for _, remote := range resp.Remotes { + t_remotes.AppendRow(table.Row{remote.Name, remote.Direction, remote.UUID}) + } + if terminal.IsTerminal(0) && terminal.IsTerminal(1) { + // Set style if interactive shell. + t_remotes.SetStyle(table.StyleColoredBright) + } + t_remotes.Render() + fmt.Println() + + } else if ResourceType == types.RbdResourceImage { + var resp types.RbdImageStatus + err = json.Unmarshal([]byte(response), &resp) + if err != nil { + return err + } + + // Summary Section. + t_summary := table.NewWriter() + t_summary.SetOutputMirror(os.Stdout) + t_summary.AppendHeader(table.Row{"Summary", "Summary"}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Name", resp.Name}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"ID", resp.ID}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Mode", resp.Type}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Is Primary", resp.IsPrimary}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Status", resp.Status}, rowConfigAutoMerge) + t_summary.AppendRow(table.Row{"Last Local Update", resp.LastLocalUpdate}, rowConfigAutoMerge) + t_summary.SetStyle(table.StyleColoredBright) + t_summary.Render() + fmt.Println() + + // Images Section. + t_images := table.NewWriter() + t_images.SetOutputMirror(os.Stdout) + t_images.AppendHeader(table.Row{"Remote Name", "Status", "Last Remote Update"}, rowConfigAutoMerge) + for _, remote := range resp.Remotes { + var status string + statusList := strings.Split(remote.Status, ",") + if len(statusList) < 1 { + status = "" + } else { + status = statusList[0] + } + t_images.AppendRow(table.Row{remote.Name, status, remote.LastRemoteUpdate}) + } + if terminal.IsTerminal(0) && terminal.IsTerminal(1) { + // Set style if interactive shell. + t_images.SetStyle(table.StyleColoredBright) + } + t_images.Render() + fmt.Println() + } + return nil +} diff --git a/microceph/cmd/microcephd/main.go b/microceph/cmd/microcephd/main.go index 4d25a9fe..a6f004f7 100644 --- a/microceph/cmd/microcephd/main.go +++ b/microceph/cmd/microcephd/main.go @@ -113,8 +113,8 @@ func main() { app.PersistentFlags().BoolVarP(&daemonCmd.global.flagHelp, "help", "h", false, "Print help") app.PersistentFlags().BoolVar(&daemonCmd.global.flagVersion, "version", false, "Print version number") - app.PersistentFlags().BoolVarP(&daemonCmd.global.flagLogDebug, "debug", "d", false, "Show all debug messages") - app.PersistentFlags().BoolVarP(&daemonCmd.global.flagLogVerbose, "verbose", "v", false, "Show all information messages") + app.PersistentFlags().BoolVarP(&daemonCmd.global.flagLogDebug, "debug", "d", true, "Show all debug messages") + app.PersistentFlags().BoolVarP(&daemonCmd.global.flagLogVerbose, "verbose", "v", true, "Show all information messages") app.PersistentFlags().StringVar(&daemonCmd.flagStateDir, "state-dir", "", "Path to store state information"+"``") diff --git a/microceph/constants/constants.go b/microceph/constants/constants.go index 9437fb5c..4fe2b4a8 100644 --- a/microceph/constants/constants.go +++ b/microceph/constants/constants.go @@ -53,15 +53,29 @@ var GetPathConst = func() PathConst { } } +// File Modes +const PermissionWorldNoAccess = 0750 +const PermissionOnlyUserAccess = 0700 + func GetPathFileMode() PathFileMode { pathConsts := GetPathConst() return PathFileMode{ - pathConsts.ConfPath: 0750, - pathConsts.RunPath: 0700, - pathConsts.DataPath: 0700, - pathConsts.LogPath: 0700, + pathConsts.ConfPath: PermissionWorldNoAccess, + pathConsts.RunPath: PermissionOnlyUserAccess, + pathConsts.DataPath: PermissionOnlyUserAccess, + pathConsts.LogPath: PermissionOnlyUserAccess, } } // Regexes const ClusterNameRegex = "^[a-z0-9]+$" + +// Replication Events +const EventEnableReplication = "enable_replication" +const EventDisableReplication = "disable_replication" +const EventListReplication = "list_replication" +const EventStatusReplication = "status_replication" +const EventConfigureReplication = "configure_replication" + +// Rbd features +var RbdJournalingEnableFeatureSet = [...]string{"exclusive-lock", "journaling"} diff --git a/microceph/go.mod b/microceph/go.mod index d4f549c4..43c47cfe 100644 --- a/microceph/go.mod +++ b/microceph/go.mod @@ -1,6 +1,6 @@ module github.com/canonical/microceph/microceph -go 1.22.5 +go 1.22.7 require ( github.com/Rican7/retry v0.3.1 @@ -9,14 +9,16 @@ require ( github.com/djherbis/times v1.6.0 github.com/google/go-cmp v0.6.0 github.com/gorilla/mux v1.8.1 - github.com/jedib0t/go-pretty/v6 v6.5.9 + github.com/jedib0t/go-pretty/v6 v6.6.0 github.com/olekukonko/tablewriter v0.0.5 github.com/pborman/uuid v1.2.1 + github.com/qmuntal/stateless v1.7.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 - github.com/tidwall/gjson v1.17.1 + github.com/tidwall/gjson v1.18.0 github.com/tidwall/sjson v1.2.5 + gopkg.in/yaml.v2 v2.4.0 ) require ( @@ -41,7 +43,7 @@ require ( github.com/kr/fs v0.1.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/mattn/go-sqlite3 v1.14.23 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/muhlemmer/gu v0.3.1 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -54,18 +56,17 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect - github.com/zitadel/logging v0.6.0 // indirect - github.com/zitadel/oidc/v3 v3.26.0 // indirect + github.com/zitadel/logging v0.6.1 // indirect + github.com/zitadel/oidc/v3 v3.30.0 // indirect github.com/zitadel/schema v1.3.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + go.opentelemetry.io/otel v1.30.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/trace v1.30.0 // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/microceph/go.sum b/microceph/go.sum index 957c0f41..45544a73 100644 --- a/microceph/go.sum +++ b/microceph/go.sum @@ -216,8 +216,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jaypipes/pcidb v1.0.1 h1:WB2zh27T3nwg8AE8ei81sNRb9yWBii3JGNJtT7K9Oic= github.com/jaypipes/pcidb v1.0.1/go.mod h1:6xYUz/yYEyOkIkUt2t2J2folIuZ4Yg6uByCGFXMCeE4= -github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU= -github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= +github.com/jedib0t/go-pretty/v6 v6.6.0 h1:wmZVuAcEkZRT+Aq1xXpE8IGat4vE5WXOMmBpbQqERXw= +github.com/jedib0t/go-pretty/v6 v6.6.0/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= github.com/jeremija/gosubmit v0.2.7 h1:At0OhGCFGPXyjPYAsCchoBUhE099pcBXmsb4iZqROIc= github.com/jeremija/gosubmit v0.2.7/go.mod h1:Ui+HS073lCFREXBbdfrJzMB57OI/bdxTiLtrDHHhFPI= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -246,8 +246,8 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= +github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -287,6 +287,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/qmuntal/stateless v1.7.1 h1:dI+BtLHq/nD6u46POkOINTDjY9uE33/4auEzfX3TWp0= +github.com/qmuntal/stateless v1.7.1/go.mod h1:n1HjRBM/cq4uCr3rfUjaMkgeGcd+ykAZwkjLje6jGBM= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -296,8 +298,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -332,8 +334,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= -github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= @@ -347,10 +349,10 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zitadel/logging v0.6.0 h1:t5Nnt//r+m2ZhhoTmoPX+c96pbMarqJvW1Vq6xFTank= -github.com/zitadel/logging v0.6.0/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow= -github.com/zitadel/oidc/v3 v3.26.0 h1:BG3OUK+JpuKz7YHJIyUxL5Sl2JV6ePkG42UP4Xv3J2w= -github.com/zitadel/oidc/v3 v3.26.0/go.mod h1:Cx6AYPTJO5q2mjqF3jaknbKOUjpq1Xui0SYvVhkKuXU= +github.com/zitadel/logging v0.6.1 h1:Vyzk1rl9Kq9RCevcpX6ujUaTYFX43aa4LkvV1TvUk+Y= +github.com/zitadel/logging v0.6.1/go.mod h1:Y4CyAXHpl3Mig6JOszcV5Rqqsojj+3n7y2F591Mp/ow= +github.com/zitadel/oidc/v3 v3.30.0 h1:1IuZlK+X+JLExEA2PYgRlVvWHBhz/cMwT7VL/YrQabw= +github.com/zitadel/oidc/v3 v3.30.0/go.mod h1:+I5BgvGO5C2ZJrQRjV34EjkyA7P3GXyYGZgXI8Sdw18= github.com/zitadel/schema v1.3.0 h1:kQ9W9tvIwZICCKWcMvCEweXET1OcOyGEuFbHs4o5kg0= github.com/zitadel/schema v1.3.0/go.mod h1:NptN6mkBDFvERUCvZHlvWmmME+gmZ44xzwRXwhzsbtc= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= @@ -363,12 +365,12 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= @@ -381,8 +383,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -457,8 +459,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -471,8 +473,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -485,8 +487,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -536,13 +538,13 @@ golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -552,8 +554,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/snapcraft/commands/rbd-mirror.start b/snapcraft/commands/rbd-mirror.start index 720fbc38..8a36abe2 100755 --- a/snapcraft/commands/rbd-mirror.start +++ b/snapcraft/commands/rbd-mirror.start @@ -1,3 +1,2 @@ #!/bin/sh -# TODO: change it to an rbd-mirror user key exec rbd-mirror -f --cluster ceph --id "rbd-mirror.$(hostname)" diff --git a/tests/scripts/actionutils.sh b/tests/scripts/actionutils.sh index 59fca2c0..e495c1a2 100755 --- a/tests/scripts/actionutils.sh +++ b/tests/scripts/actionutils.sh @@ -218,6 +218,93 @@ function remote_perform_remote_ops_check() { lxc exec node-wrk3 -- sh -c "microceph remote list --json | grep '\"local_name\":\"siteb\"'" } +function remote_configure_rbd_mirroring() { + set -eux + # create two new rbd pools on sitea + lxc exec node-wrk0 -- sh -c "microceph.ceph osd pool create pool_one" + lxc exec node-wrk1 -- sh -c "microceph.ceph osd pool create pool_two" + # and siteb + lxc exec node-wrk2 -- sh -c "microceph.ceph osd pool create pool_one" + lxc exec node-wrk3 -- sh -c "microceph.ceph osd pool create pool_two" + + # enable both pools for rbd on site a + lxc exec node-wrk0 -- sh -c "microceph.rbd pool init pool_one" + lxc exec node-wrk1 -- sh -c "microceph.rbd pool init pool_two" + # and siteb + lxc exec node-wrk2 -- sh -c "microceph.rbd pool init pool_one" + lxc exec node-wrk3 -- sh -c "microceph.rbd pool init pool_two" + + # create 2 images on both pools on primary site. + lxc exec node-wrk0 -- sh -c "microceph.rbd create --size 512 pool_one/image_one" + lxc exec node-wrk0 -- sh -c "microceph.rbd create --size 512 pool_one/image_two" + lxc exec node-wrk1 -- sh -c "microceph.rbd create --size 512 pool_two/image_one" + lxc exec node-wrk1 -- sh -c "microceph.rbd create --size 512 pool_two/image_two" + + # enable mirroring on pool_one + lxc exec node-wrk0 -- sh -c "microceph remote replication rbd enable pool_one --remote siteb" + + # enable mirroring on pool_two images + lxc exec node-wrk0 -- sh -c "microceph remote replication rbd enable pool_two/image_one --type journal --remote siteb" + lxc exec node-wrk0 -- sh -c "microceph remote replication rbd enable pool_two/image_two --type snapshot --remote siteb" +} + +function remote_enable_rbd_mirror_daemon() { + lxc exec node-wrk0 -- sh -c "microceph enable rbd-mirror" + lxc exec node-wrk2 -- sh -c "microceph enable rbd-mirror" +} + +function remote_wait_for_secondary_to_sync() { + set -eux + + # wait till images are synchronised + count=0 + for index in {1..100}; do + echo "Check run #$index" + list_output=$(lxc exec node-wrk2 -- sh -c "sudo microceph remote replication rbd list --json") + echo $list_output + images=$(echo $list_output | jq .[].Images) + echo $images + img_one_count=$(echo $images | grep -c "image_one" || true) + echo $img_one_count + if [[ $img_one_count -gt 0 ]] ; then + break + fi + + count=$index + echo "#################" + sleep 30 + done + + if [count -eq 10] ; then + echo "Remote replication sync check timed out" + exit -1 + fi +} + +function remote_verify_rbd_mirroring() { + set -eux + + lxc exec node-wrk0 -- sh -c "sudo microceph remote replication rbd list" + lxc exec node-wrk2 -- sh -c "sudo microceph remote replication rbd list" + lxc exec node-wrk0 -- sh -c "sudo microceph remote replication rbd list" | grep "pool_one.*image_one" + lxc exec node-wrk1 -- sh -c "sudo microceph remote replication rbd list" | grep "pool_one.*image_two" + lxc exec node-wrk2 -- sh -c "sudo microceph remote replication rbd list" | grep "pool_two.*image_one" + lxc exec node-wrk3 -- sh -c "sudo microceph remote replication rbd list" | grep "pool_two.*image_two" +} + +function remote_disable_rbd_mirroring() { + set -eux + # check disables fail for image mirroring pools with images currently being mirrored + lxc exec node-wrk0 -- sh -c "sudo microceph remote replication rbd disable pool_two 2>&1 || true" | grep "in Image mirroring mode" + # disable both images in pool_two and then disable pool_two + lxc exec node-wrk0 -- sh -c "sudo microceph remote replication rbd disable pool_two/image_one" + lxc exec node-wrk0 -- sh -c "sudo microceph remote replication rbd disable pool_two/image_two" + lxc exec node-wrk0 -- sh -c "sudo microceph remote replication rbd disable pool_two" + + # disable pool one + lxc exec node-wrk0 -- sh -c "sudo microceph remote replication rbd disable pool_one" +} + function remote_remove_and_verify() { set -eux