Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable support for setting a Ceph public_network in the MicroCloud initialization #314

Closed
2 tasks
gabrielmougard opened this issue Jun 3, 2024 · 3 comments
Closed
2 tasks
Assignees
Labels
Feature New feature, not a bug

Comments

@gabrielmougard
Copy link
Contributor

Description

This is a direct follow-up task that will come after #274 .

The reason why we did not implement the public_network setting for Ceph in MicroCloud is because there is a change to be added in the MicroCeph API to query a distant public_network (which is not present when getting a ClusterConfig from MicroCeph unlike the cluster_network)

DoD

  • New (or updated) API endpoint in MicroCeph to systematically have the public_network exposed (like cluster_network)
  • MicroCloud logic (Already done. See attached diffs below)

Diffs

diff1.patch

diff --git a/cmd/microcloud/ask.go b/cmd/microcloud/ask.go
index b63252f..f0db11d 100644
--- a/cmd/microcloud/ask.go
+++ b/cmd/microcloud/ask.go
@@ -412,10 +412,10 @@ func validateCephInterfacesForSubnet(lxdService *service.LXDService, systems map
 
 // getTargetCephNetworks fetches the Ceph network configuration from the existing Ceph cluster.
 // If the system passed as an argument is nil, we will fetch the local Ceph network configuration.
-func getTargetCephNetworks(sh *service.Handler, s *InitSystem) (publicCephNetwork *net.IPNet, internalCephNetwork *net.IPNet, err error) {
+func getTargetCephNetworks(sh *service.Handler, s *InitSystem) (internalCephNetwork *net.IPNet, err error) {
        microCephService := sh.Services[types.MicroCeph].(*service.CephService)
        if microCephService == nil {
-               return nil, nil, fmt.Errorf("failed to get MicroCeph service")
+               return nil, fmt.Errorf("failed to get MicroCeph service")
        }
 
        var cephAddr string
@@ -427,7 +427,7 @@ func getTargetCephNetworks(sh *service.Handler, s *InitSystem) (publicCephNetwor
 
        remoteCephConfigs, err := microCephService.ClusterConfig(context.Background(), cephAddr, cephAuthSecret)
        if err != nil {
-               return nil, nil, err
+               return nil, err
        }
 
        for key, value := range remoteCephConfigs {
@@ -436,23 +436,14 @@ func getTargetCephNetworks(sh *service.Handler, s *InitSystem) (publicCephNetwor
                        // is not a network range but a regular IP address. We need to extract the network range.
                        _, valueNet, err := net.ParseCIDR(value)
                        if err != nil {
-                               return nil, nil, fmt.Errorf("failed to parse the Ceph cluster network configuration from the existing Ceph cluster: %v", err)
+                               return nil, fmt.Errorf("failed to parse the Ceph cluster network configuration from the existing Ceph cluster: %v", err)
                        }
 
                        internalCephNetwork = valueNet
                }
-
-               if key == "public_network" && value != "" {
-                       _, valueNet, err := net.ParseCIDR(value)
-                       if err != nil {
-                               return nil, nil, fmt.Errorf("failed to parse the Ceph public network configuration from the existing Ceph cluster: %v", err)
-                       }
-
-                       publicCephNetwork = valueNet
-               }
        }
 
-       return publicCephNetwork, internalCephNetwork, nil
+       return internalCephNetwork, nil
 }
 
 func (c *CmdControl) askRemotePool(systems map[string]InitSystem, autoSetup bool, wipeAllDisks bool, sh *service.Handler) error {
@@ -695,19 +686,15 @@ func (c *CmdControl) askNetwork(sh *service.Handler, systems map[string]InitSyst
                                        }
                                }
 
-                               var customTargetCephPublicNetwork, customTargetCephInternalNetwork string
+                               var customTargetCephInternalNetwork string
                                if initializedMicroCephSystem != nil {
                                        // If there is at least one initialized system with MicroCeph (we consider that more than one initialized MicroCeph systems are part of the same cluster),
                                        // we need to fetch its Ceph configuration to validate against this to-be-bootstrapped cluster.
-                                       targetPublicCephNetwork, targetInternalCephNetwork, err := getTargetCephNetworks(sh, initializedMicroCephSystem)
+                                       targetInternalCephNetwork, err := getTargetCephNetworks(sh, initializedMicroCephSystem)
                                        if err != nil {
                                                return err
                                        }
 
-                                       if targetPublicCephNetwork.String() != microCloudInternalSubnet.String() {
-                                               customTargetCephPublicNetwork = targetPublicCephNetwork.String()
-                                       }
-
                                        if targetInternalCephNetwork.String() != microCloudInternalSubnet.String() {
                                                customTargetCephInternalNetwork = targetInternalCephNetwork.String()
                                        }
@@ -719,30 +706,7 @@ func (c *CmdControl) askNetwork(sh *service.Handler, systems map[string]InitSyst
 
                                // If there is no remote Ceph cluster or is an existing remote Ceph has no configured networks
                                // other than the default one (internal MicroCloud network), we ask the user to configure the Ceph networks.
-                               if customTargetCephPublicNetwork == "" && customTargetCephInternalNetwork == "" {
-                                       publicCephSubnet, err := c.asker.AskString(fmt.Sprintf("What subnet (either IPv4 or IPv6 CIDR notation) would you like your Ceph public traffic on? [default: %s] ", microCloudInternalNetworkAddrCIDR), microCloudInternalNetworkAddrCIDR, validate.IsNetwork)
-                                       if err != nil {
-                                               return err
-                                       }
-
-                                       if publicCephSubnet != microCloudInternalNetworkAddrCIDR {
-                                               err = validateCephInterfacesForSubnet(lxdService, systems, availableCephNetworkInterfaces, publicCephSubnet)
-                                               if err != nil {
-                                                       return err
-                                               }
-
-                                               bootstrapSystem := systems[sh.Name]
-                                               bootstrapSystem.MicroCephPublicNetworkSubnet = publicCephSubnet
-                                               systems[sh.Name] = bootstrapSystem
-                                       }
-
-                                       // If a user chose a network different from the default one,
-                                       // we make the default for the internal network the same as the first one.
-                                       // This avoid entering two times the same network in order to have a partially disaggregated setup.
-                                       if publicCephSubnet != microCloudInternalNetworkAddrCIDR {
-                                               microCloudInternalNetworkAddrCIDR = publicCephSubnet
-                                       }
-
+                               if customTargetCephInternalNetwork == "" {
                                        internalCephSubnet, err := c.asker.AskString(fmt.Sprintf("What subnet (either IPv4 or IPv6 CIDR notation) would you like your Ceph internal traffic on? [default: %s] ", microCloudInternalNetworkAddrCIDR), microCloudInternalNetworkAddrCIDR, validate.IsNetwork)
                                        if err != nil {
                                                return err
@@ -761,17 +725,6 @@ func (c *CmdControl) askNetwork(sh *service.Handler, systems map[string]InitSyst
                                } else {
                                        // Else, we validate that the systems to be bootstrapped comply with the network configuration of the existing remote Ceph cluster,
                                        // and set their Ceph network configuration accordingly.
-                                       if customTargetCephPublicNetwork != "" && customTargetCephPublicNetwork != microCloudInternalNetworkAddrCIDR {
-                                               err = validateCephInterfacesForSubnet(lxdService, systems, availableCephNetworkInterfaces, customTargetCephPublicNetwork)
-                                               if err != nil {
-                                                       return err
-                                               }
-
-                                               bootstrapSystem := systems[sh.Name]
-                                               bootstrapSystem.MicroCephPublicNetworkSubnet = customTargetCephPublicNetwork
-                                               systems[sh.Name] = bootstrapSystem
-                                       }
-
                                        if customTargetCephInternalNetwork != "" && customTargetCephInternalNetwork != microCloudInternalNetworkAddrCIDR {
                                                err = validateCephInterfacesForSubnet(lxdService, systems, availableCephNetworkInterfaces, customTargetCephInternalNetwork)
                                                if err != nil {
@@ -785,18 +738,11 @@ func (c *CmdControl) askNetwork(sh *service.Handler, systems map[string]InitSyst
                                }
                        } else {
                                // If we are not bootstrapping, we target the local MicroCeph and fetch its network cluster config.
-                               localPublicCephNetwork, localInternalCephNetwork, err := getTargetCephNetworks(sh, nil)
+                               localInternalCephNetwork, err := getTargetCephNetworks(sh, nil)
                                if err != nil {
                                        return err
                                }
 
-                               if localPublicCephNetwork.String() != "" && localPublicCephNetwork.String() != microCloudInternalSubnet.String() {
-                                       err = validateCephInterfacesForSubnet(lxd, systems, availableCephNetworkInterfaces, localPublicCephNetwork.String())
-                                       if err != nil {
-                                               return err
-                                       }
-                               }
-
                                if localInternalCephNetwork.String() != "" && localInternalCephNetwork.String() != microCloudInternalSubnet.String() {
                                        err = validateCephInterfacesForSubnet(lxd, systems, availableCephNetworkInterfaces, localInternalCephNetwork.String())
                                        if err != nil {
diff --git a/cmd/microcloud/main_init.go b/cmd/microcloud/main_init.go
index c25254d..1417abc 100644
--- a/cmd/microcloud/main_init.go
+++ b/cmd/microcloud/main_init.go
@@ -36,6 +36,8 @@ type InitSystem struct {
        AvailableDisks []lxdAPI.ResourcesStorageDisk
        // MicroCephDisks contains the disks intended to be passed to MicroCeph.
        MicroCephDisks []cephTypes.DisksPost
+       // MicroCephPublicNetworkSubnet is an optional the subnet (IPv4/IPv6 CIDR notation) for the Ceph public network.
+       MicroCephPublicNetworkSubnet string
        // MicroCephClusterNetworkSubnet is an optional the subnet (IPv4/IPv6 CIDR notation) for the Ceph cluster network.
        MicroCephInternalNetworkSubnet string
        // TargetNetworks contains the network configuration for the target system.

diff2.patch

diff --git a/cmd/microcloud/main_init_preseed.go b/cmd/microcloud/main_init_preseed.go
index 41d93e3..15ddac7 100644
--- a/cmd/microcloud/main_init_preseed.go
+++ b/cmd/microcloud/main_init_preseed.go
@@ -63,7 +63,6 @@ type InitNetwork struct {
 
 // CephOptions represents the structure of the ceph options in the preseed yaml.
 type CephOptions struct {
-       PublicNetwork   string `yaml:"public_network"`
        InternalNetwork string `yaml:"internal_network"`
 }
 
@@ -258,18 +257,6 @@ func (p *Preseed) validate(name string, bootstrap bool) error {
                return fmt.Errorf("Missing interface name for machine lookup")
        }
 
-       usingCephPublicNetwork := p.Ceph.PublicNetwork != ""
-       if !containsCephStorage && usingCephPublicNetwork {
-               return fmt.Errorf("Cannot specify a Ceph public network without Ceph storage disks")
-       }
-
-       if usingCephPublicNetwork {
-               err = validate.IsNetwork(p.Ceph.PublicNetwork)
-               if err != nil {
-                       return fmt.Errorf("Invalid Ceph public network subnet: %v", err)
-               }
-       }
-
        usingCephInternalNetwork := p.Ceph.InternalNetwork != ""
        if !containsCephStorage && usingCephInternalNetwork {
                return fmt.Errorf("Cannot specify a Ceph internal network without Ceph storage disks")
@@ -715,7 +702,7 @@ func (p *Preseed) Parse(s *service.Handler, bootstrap bool) (map[string]InitSyst
        }
 
        var cephInterfaces map[string][]service.CephDedicatedInterface
-       if p.Ceph.PublicNetwork != "" || p.Ceph.InternalNetwork != "" || !bootstrap {
+       if p.Ceph.InternalNetwork != "" || !bootstrap {
                cephInterfaces, err = lxd.GetCephInterfaces(context.Background(), bootstrap, infos)
                if err != nil {
                        return nil, err
@@ -732,45 +719,27 @@ func (p *Preseed) Parse(s *service.Handler, bootstrap bool) (map[string]InitSyst
                        }
                }
 
-               var customTargetCephPublicNetwork, customTargetCephInternalNetwork string
+               var customTargetCephInternalNetwork string
                if initializedMicroCephSystem != nil {
                        // If there is at least one initialized system with MicroCeph (we consider that more than one initialized MicroCeph systems are part of the same cluster),
                        // we need to fetch its Ceph configuration to validate against this to-be-bootstrapped cluster.
-                       targetPublicCephNetwork, targetInternalCephNetwork, err := getTargetCephNetworks(s, initializedMicroCephSystem)
+                       targetInternalCephNetwork, err := getTargetCephNetworks(s, initializedMicroCephSystem)
                        if err != nil {
                                return nil, err
                        }
 
-                       if targetPublicCephNetwork.String() != lookupSubnet.String() {
-                               customTargetCephPublicNetwork = targetPublicCephNetwork.String()
-                       }
-
                        if targetInternalCephNetwork.String() != lookupSubnet.String() {
                                customTargetCephInternalNetwork = targetInternalCephNetwork.String()
                        }
                }
 
-               var publicCephNetwork string
                var internalCephNetwork string
-               if customTargetCephPublicNetwork == "" && customTargetCephInternalNetwork == "" {
-                       publicCephNetwork = p.Ceph.PublicNetwork
+               if customTargetCephInternalNetwork == "" {
                        internalCephNetwork = p.Ceph.InternalNetwork
                } else {
-                       publicCephNetwork = customTargetCephPublicNetwork
                        internalCephNetwork = customTargetCephInternalNetwork
                }
 
-               if publicCephNetwork != "" {
-                       err = validateCephInterfacesForSubnet(lxd, systems, cephInterfaces, publicCephNetwork)
-                       if err != nil {
-                               return nil, err
-                       }
-
-                       bootstrapSystem := systems[s.Name]
-                       bootstrapSystem.MicroCephPublicNetworkSubnet = publicCephNetwork
-                       systems[s.Name] = bootstrapSystem
-               }
-
                if internalCephNetwork != "" {
                        err = validateCephInterfacesForSubnet(lxd, systems, cephInterfaces, internalCephNetwork)
                        if err != nil {
@@ -782,18 +751,11 @@ func (p *Preseed) Parse(s *service.Handler, bootstrap bool) (map[string]InitSyst
                        systems[s.Name] = bootstrapSystem
                }
        } else {
-               localPublicCephNetwork, localInternalCephNetwork, err := getTargetCephNetworks(s, nil)
+               localInternalCephNetwork, err := getTargetCephNetworks(s, nil)
                if err != nil {
                        return nil, err
                }
 
-               if localPublicCephNetwork.String() != "" && localPublicCephNetwork.String() != lookupSubnet.String() {
-                       err = validateCephInterfacesForSubnet(lxd, systems, cephInterfaces, localPublicCephNetwork.String())
-                       if err != nil {
-                               return nil, err
-                       }
-               }
-
                if localInternalCephNetwork.String() != "" && localInternalCephNetwork.String() != lookupSubnet.String() {
                        err = validateCephInterfacesForSubnet(lxd, systems, cephInterfaces, localInternalCephNetwork.String())
                        if err != nil {

diff3.patch

diff --git a/cmd/microcloud/main_init.go b/cmd/microcloud/main_init.go
index 00a4fe5..971c074 100644
--- a/cmd/microcloud/main_init.go
+++ b/cmd/microcloud/main_init.go
@@ -715,10 +715,6 @@ func setupCluster(s *service.Handler, systems map[string]InitSystem) error {
 
                        if s.Type() == types.MicroCeph {
                                microCephBootstrapConf := make(map[string]string)
-                               if bootstrapSystem.MicroCephPublicNetworkSubnet != "" {
-                                       microCephBootstrapConf["PublicNet"] = bootstrapSystem.MicroCephPublicNetworkSubnet
-                               }
-
                                if bootstrapSystem.MicroCephInternalNetworkSubnet != "" {
                                        microCephBootstrapConf["ClusterNet"] = bootstrapSystem.MicroCephInternalNetworkSubnet
                                }

diff4.patch

diff --git a/doc/explanation/microcloud.rst b/doc/explanation/microcloud.rst
index 28eaeef..327cd22 100644
--- a/doc/explanation/microcloud.rst
+++ b/doc/explanation/microcloud.rst
@@ -64,14 +64,11 @@ MicroCloud will still be usable, but you will see some limitations:
   As a result of this, network forwarding works at a basic level only, and external addresses must be forwarded to a specific cluster member and don't fail over.
 - There is no support for hardware acceleration, load balancers, or ACL functionality within the local network.
 
-Dedicated networks for Ceph
+Dedicated internal network for Ceph
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
-You can set up dedicated networks for Ceph to separate either or both public and internal Ceph traffic from the rest of the MicroCloud cluster traffic.
+You can set up a dedicated network for Ceph to separate the internal Ceph traffic from the rest of the MicroCloud cluster traffic.
 
-You can choose to separate Ceph public and internal traffic onto separate dedicated networks (fully disaggregated) or you can choose to have all Ceph traffic use a single separate network (partially disaggregated).
-In either setups all of your cluster members must have an IP address configured on a network interface for the Ceph network(s) specified.
-
-See :ref:`howto-ceph-networking` for how to set up dedicated networks for Ceph.
+See :ref:`howto-ceph-networking` for how to set up a dedicated internal network for Ceph.
 
 Storage
 -------
diff --git a/doc/how-to/ceph_networking.rst b/doc/how-to/ceph_networking.rst
index a2f8212..c4824d1 100644
--- a/doc/how-to/ceph_networking.rst
+++ b/doc/how-to/ceph_networking.rst
@@ -3,13 +3,12 @@
 How to configure Ceph networking
 ================================
 
-When running :command:`microcloud init`, you are asked if you want to provide custom subnets for the Ceph cluster.
-Here are the questions you will be asked:
+When running :command:`microcloud init`, you are asked if you want to provide a custom subnet for the Ceph cluster.
+Here is the question you will be asked:
 
-- ``What subnet (either IPv4 or IPv6 CIDR notation) would you like your Ceph public traffic on? [default: 203.0.113.0/24]: <answer>``
 - ``What subnet (either IPv4 or IPv6 CIDR notation) would you like your Ceph internal traffic on? [default: 203.0.113.0/24]: <answer>``
 
-You can choose to skip both questions (just hit ``Enter``) and use the default value which is the subnet used for the internal MicroCloud traffic.
+You can choose to skip this question (just hit ``Enter``) and use the default value which is the subnet used for the internal MicroCloud traffic.
 This is referred to as a *usual* Ceph networking setup.
 
 .. figure:: /images/ceph_network_usual_setup.svg
@@ -20,52 +19,35 @@ Sometimes, you want to be able to use different network interfaces for some Ceph
 Let's imagine you have machines with network interfaces that are tailored for high throughput and low latency data transfer,
 like 100 GbE+ QSFP links, and other ones that might be more suited for management traffic, like 1 GbE or 10 GbE links.
 
-In this case, it would probably be ideal to set your Ceph internal (or cluster) traffic on the high throughput network interface and the Ceph public traffic on the management network interface. This is referred to as a *fully disaggregated* Ceph networking setup.
-
-.. figure:: /images/ceph_network_full_setup.svg
-   :alt: Each type of Ceph traffic is on a different network interface, tailored for its usage.
-   :align: center
-
-
-You could also decide to put both types of traffic on the same high throughput and low latency network interface. This is referred to as a *partially disaggregated* Ceph networking setup.
+In this case, it would probably be ideal to set your Ceph internal (or cluster) traffic on the high throughput network interface. This is referred to as a *partially disaggregated* Ceph networking setup.
 
 .. figure:: /images/ceph_network_partial_setup.svg
-   :alt: Both the Ceph public and internal traffic are on the same high throughput network interface.
+   :alt: The Ceph internal traffic uses a dedicated high throughput network interface.
    :align: center
 
-To use a fully or partially disaggregated Ceph networking setup with your MicroCloud, specify the corresponding subnets during the MicroCloud initialisation process.
+To use a partially disaggregated Ceph networking setup with your MicroCloud, specify the corresponding subnets during the MicroCloud initialisation process.
 
 The following instructions build on the :ref:`get-started` tutorial and show how you can test setting up a MicroCloud with disaggregated Ceph networking inside a LXD setup.
 
-1. Create the dedicated networks for Ceph:
+1. Create the dedicated network for Ceph:
 
    #. First, just like when you created an uplink network for MicroCloud so that the cluster members could have external connectivity, you will need to create a dedicated network for the Ceph cluster members to communicate with each other. Let's call it ``cephbr0``::
 
         lxc network create cephbr0
 
-   #. Create a second network. Let's call it ``cephbr1``::
-
-        lxc network create cephbr1
-
    #. Enter the following commands to find out the assigned IPv4 and IPv6 addresses for the networks and note them down::
 
         lxc network get cephbr0 ipv4.address
         lxc network get cephbr0 ipv6.address
-        lxc network get cephbr1 ipv4.address
-        lxc network get cephbr1 ipv6.address
 
 2. Create the network interfaces that will be used for the Ceph networking setup for each VM:
 
-   #. Add the network device for the ``cephbr0`` and ``cephbr1`` network::
+   #. Add the network device for the ``cephbr0`` network::
 
         lxc config device add micro1 eth2 nic network=cephbr0 name=eth2
         lxc config device add micro2 eth2 nic network=cephbr0 name=eth2
         lxc config device add micro3 eth2 nic network=cephbr0 name=eth2
         lxc config device add micro4 eth2 nic network=cephbr0 name=eth2
-        lxc config device add micro1 eth3 nic network=cephbr1 name=eth3
-        lxc config device add micro2 eth3 nic network=cephbr1 name=eth3
-        lxc config device add micro3 eth3 nic network=cephbr1 name=eth3
-        lxc config device add micro4 eth3 nic network=cephbr1 name=eth3
 
 3. Now, just like in the tutorial, start the VMs.
 
@@ -78,24 +60,11 @@ The following instructions build on the :ref:`get-started` tutorial and show how
         # `X` should be a number between 2 and 254, different for each VM
         ip addr add 10.0.1.X/24 dev enp7s0
 
-   #. Do the same for ``cephbr1`` on each VM::
-
-        # If the `cephbr1` gateway address is `10.0.2.1/24` (subnet should be `10.0.2.0/24`)
-        ip link set enp8s0 up
-        # `X` should be a number between 2 and 254, different for each VM
-        ip addr add 10.0.2.X/24 dev enp8s0
-
 5. Now, you can start the MicroCloud initialisation process and provide the subnets you noted down in step 1.c when asked for the Ceph networking subnets.
 
-#. We will use ``cephbr0`` for the Ceph internal traffic and ``cephbr1`` for the Ceph public traffic. In a production setup, you'd choose the fast subnet for the internal Ceph traffic::
+#. We will use ``cephbr0`` for the Ceph internal traffic. In a production setup, you'd choose the fast subnet for the internal Ceph traffic::
 
-    What subnet (either IPv4 or IPv6 CIDR notation) would you like your Ceph public traffic on? [default: 203.0.113.0/24]: 10.0.2.0/24
-
-    Interface "enp7s0" ("10.0.2.3") detected on cluster member "micro2"
-    Interface "enp7s0" ("10.0.2.4") detected on cluster member "micro3"
-    Interface "enp7s0" ("10.0.2.2") detected on cluster member "micro1"
-
-    What subnet (either IPv4 or IPv6 CIDR notation) would you like your Ceph internal traffic on? [default: 10.0.2.0/24]: 10.0.1.0/24
+    What subnet (either IPv4 or IPv6 CIDR notation) would you like your Ceph internal traffic on? [default: 203.0.113.0/24]: 10.0.1.0/24
 
     Interface "enp7s0" ("10.0.1.3") detected on cluster member "micro2"
     Interface "enp7s0" ("10.0.1.4") detected on cluster member "micro3"
@@ -114,7 +83,6 @@ The following instructions build on the :ref:`get-started` tutorial and show how
 
        WHO     MASK  LEVEL     OPTION                       VALUE        RO
        global        advanced  cluster_network              10.0.1.0/24  *
-       global        advanced  public_network               10.0.2.0/24  *
        global        advanced  osd_pool_default_crush_rule  2
 
    b. Inspect your Ceph-related network traffic:
@@ -156,6 +124,4 @@ The following instructions build on the :ref:`get-started` tutorial and show how
        17:48:48.604733 IP micro1.33328 > 10.0.1.4.6803: Flags [P.], seq 359839675:359904835, ack 938811, win 1886, options [nop,nop,TS val 3647909568 ecr 3552095031], length 65160
        17:48:48.604751 IP 10.0.1.4.6803 > micro1.33328: Flags [.], ack 359709355, win 24317, options [nop,nop,TS val 3552095035 ecr 3647909568], length 0
        17:48:48.604757 IP micro1.33328 > 10.0.1.4.6803: Flags [P.], seq 359904835:359910746, ack 938811, win 1886, options [nop,nop,TS val 3647909568 ecr 3552095035], length 5911
-       17:48:48.604797 IP micro1.33328 > 10.0.1.4.6803: Flags [P.], seq 359910746:359975906, ack 938811, win 1886, options [nop,nop,TS val 3647909568 ecr 3552095035], length 65160
-
-This guide showed how to set up a MicroCloud with a fully disaggregated Ceph networking setup, but you can also set up a partially disaggregated Ceph networking setup by using the same network subnet for both the Ceph public and internal traffic.
\ No newline at end of file
+       17:48:48.604797 IP micro1.33328 > 10.0.1.4.6803: Flags [P.], seq 359910746:359975906, ack 938811, win 1886, options [nop,nop,TS val 3647909568 ecr 3552095035], length 65160
\ No newline at end of file
diff --git a/doc/how-to/initialise.rst b/doc/how-to/initialise.rst
index 2bc88a1..d71c4f2 100644
--- a/doc/how-to/initialise.rst
+++ b/doc/how-to/initialise.rst
@@ -73,7 +73,6 @@ Complete the following steps to initialise MicroCloud:
       Wiping a disk will destroy all data on it.
 
    #. You can choose to optionally set up a CephFS distributed file system.
-#. Select either an IPv4 or IPv6 CIDR subnet for the Ceph public traffic. You can leave it empty to use the default value, which is the MicroCloud internal network (see :ref:`howto-ceph-networking` for how to configure it).
 #. Select either an IPv4 or IPv6 CIDR subnet for the Ceph internal traffic. You can leave it empty to use the default value, which is the MicroCloud internal network (see :ref:`howto-ceph-networking` for how to configure it).
 #. Select whether you want to set up distributed networking (using MicroOVN).
 
diff --git a/doc/tutorial/get_started.rst b/doc/tutorial/get_started.rst
index cf34ee9..d4c649d 100644
--- a/doc/tutorial/get_started.rst
+++ b/doc/tutorial/get_started.rst
@@ -269,7 +269,6 @@ Complete the following steps:
    #. Select all listed disks (these should be ``remote1``, ``remote2``, and ``remote3``).
    #. You don't need to wipe any disks (because we just created them).
    #. Select ``yes`` to optionally configure the CephFS distributed file system.
-   #. Leave the question empty for the IPv4 or IPv6 CIDR subnet address used for the Ceph public network.
    #. Leave the question empty for the IPv4 or IPv6 CIDR subnet address used for the Ceph internal network.
    #. Select ``yes`` to configure distributed networking.
    #. Select all listed network interfaces (these should be ``enp6s0`` on the four different VMs).

diff5.patch

diff --git a/test/includes/microcloud.sh b/test/includes/microcloud.sh
index 8da9945..fc840ac 100644
--- a/test/includes/microcloud.sh
+++ b/test/includes/microcloud.sh
@@ -4,7 +4,7 @@
 unset_interactive_vars() {
   unset LOOKUP_IFACE LIMIT_SUBNET SKIP_SERVICE EXPECT_PEERS REUSE_EXISTING REUSE_EXISTING_COUNT \
     SETUP_ZFS ZFS_FILTER ZFS_WIPE \
-    SETUP_CEPH CEPH_WARNING CEPH_FILTER CEPH_WIPE SETUP_CEPHFS CEPH_PUBLIC_NETWORK CEPH_CLUSTER_NETWORK IGNORE_CEPH_NETWORKING \
+    SETUP_CEPH CEPH_WARNING CEPH_FILTER CEPH_WIPE SETUP_CEPHFS CEPH_CLUSTER_NETWORK IGNORE_CEPH_NETWORKING \
     SETUP_OVN OVN_WARNING OVN_FILTER IPV4_SUBNET IPV4_START IPV4_END DNS_ADDRESSES IPV6_SUBNET
 }
 
@@ -27,7 +27,6 @@ microcloud_interactive() {
   CEPH_WARNING=${CEPH_WARNING:-}                 # (yes/no) input for warning about eligible disk detection.
   CEPH_FILTER=${CEPH_FILTER:-}                   # filter string for CEPH disks.
   CEPH_WIPE=${CEPH_WIPE:-}                       # (yes/no) to wipe all disks.
-  CEPH_PUBLIC_NETWORK=${CEPH_PUBLIC_NETWORK:-}   # (default: MicroCloud internal subnet) input for setting up a public network.
   CEPH_CLUSTER_NETWORK=${CEPH_CLUSTER_NETWORK:-} # (default: MicroCloud internal subnet or Ceph public network if specified previously) input for setting up a cluster network.
   IGNORE_CEPH_NETWORKING=${IGNORE_CEPH_NETWORKING:-} # (yes/no) input for ignoring Ceph network setup. Set it to `yes` during `microcloud add` .
   SETUP_OVN=${SETUP_OVN:-}                       # (yes/no) input for initiating OVN network setup.
@@ -90,13 +89,6 @@ $(true)                                                 # workaround for set -e
 fi
 
 if [ -z "${IGNORE_CEPH_NETWORKING}" ]; then
-  if [ -n "${CEPH_PUBLIC_NETWORK}" ]; then
-    setup="${setup}
-${CEPH_PUBLIC_NETWORK}
-$(true)                                                 # workaround for set -e
-"
-  fi
-
   if [ -n "${CEPH_CLUSTER_NETWORK}" ]; then
     setup="${setup}
 ${CEPH_CLUSTER_NETWORK}
@@ -181,12 +173,6 @@ validate_system_microceph() {
       shift 1
     fi
 
-    public_ceph_subnet=""
-    if echo "${1}" | grep -Pq '^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$'; then
-      public_ceph_subnet="${1}"
-      shift 1
-    fi
-
     cluster_ceph_subnet=""
     if echo "${1}" | grep -Pq '^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$'; then
       cluster_ceph_subnet="${1}"
@@ -195,7 +181,7 @@ validate_system_microceph() {
 
     disks="${*}"
 
-    echo "==> ${name} Validating MicroCeph. Using disks: {${disks}}, Using CephFS: {${cephfs}}, Public Ceph Subnet: {${public_ceph_subnet}}, Cluster Ceph Subnet: {${cluster_ceph_subnet}}"
+    echo "==> ${name} Validating MicroCeph. Using disks: {${disks}}, Using CephFS: {${cephfs}}, Cluster Ceph Subnet: {${cluster_ceph_subnet}}"
 
     lxc remote switch local
     lxc exec "${name}" -- sh -ceu "
@@ -222,12 +208,6 @@ validate_system_microceph() {
       " > /dev/null
     fi
 
-    if [ -n "${public_ceph_subnet}" ]; then
-      lxc exec "${name}" -- sh -ceu "
-        microceph.ceph config show osd.1 public_network | grep -q ${public_ceph_subnet}
-      " > /dev/null
-    fi
-
     if [ -n "${cluster_ceph_subnet}" ]; then
       lxc exec "${name}" -- sh -ceu "
         microceph.ceph config show osd.1 cluster_network | grep -q ${cluster_ceph_subnet}
@@ -1172,4 +1152,4 @@ ip_config_to_netaddr () {
        net_addr="$((io1 & mo1)).$((io2 & mo2)).$((io3 & mo3)).$((io4 & mo4))"
 
        echo "${net_addr}$(ip_prefix_by_netmask "${mask}")"
-}
\ No newline at end of file
+}
diff --git a/test/suites/add.sh b/test/suites/add.sh
index c248890..350b2cf 100644
--- a/test/suites/add.sh
+++ b/test/suites/add.sh
@@ -101,17 +101,13 @@ test_add_auto() {
 }
 
 test_add_interactive() {
-  reset_systems 4 2 3
+  reset_systems 4 2 2
 
-  ceph_public_subnet_prefix="10.0.1"
-  ceph_public_subnet_iface="enp7s0"
-  ceph_cluster_subnet_prefix="10.0.2"
-  ceph_cluster_subnet_iface="enp8s0"
+  ceph_cluster_subnet_prefix="10.0.1"
+  ceph_cluster_subnet_iface="enp7s0"
 
   for n in $(seq 2 5); do
-    public_ip="${ceph_public_subnet_prefix}.${n}/24"
     cluster_ip="${ceph_cluster_subnet_prefix}.${n}/24"
-    lxc exec "micro0$((n-1))" -- ip addr add "${public_ip}" dev "${ceph_public_subnet_iface}"
     lxc exec "micro0$((n-1))" -- ip addr add "${cluster_ip}" dev "${ceph_cluster_subnet_iface}"
   done
 
@@ -132,7 +128,6 @@ test_add_interactive() {
   export SETUP_CEPH="yes"
   export SETUP_CEPHFS="yes"
   export CEPH_WIPE="yes"
-  export CEPH_PUBLIC_NETWORK="${ceph_public_subnet_prefix}.0/24"
   export CEPH_CLUSTER_NETWORK="${ceph_cluster_subnet_prefix}.0/24"
   export SETUP_OVN="yes"
   export OVN_FILTER="enp6s0"
@@ -167,7 +162,7 @@ test_add_interactive() {
 
   for m in micro01 micro02 micro03 micro04 ; do
     validate_system_lxd "${m}" 4 disk1 1 1 enp6s0 10.1.123.1/24 10.1.123.100-10.1.123.254 fd42:1:1234:1234::1/64  10.1.123.1,fd42:1:1234:1234::1
-    validate_system_microceph "${m}" 1 "${ceph_public_subnet_prefix}.0/24" "${ceph_cluster_subnet_prefix}.0/24" disk2
+    validate_system_microceph "${m}" 1 "${ceph_cluster_subnet_prefix}.0/24" disk2
     validate_system_microovn "${m}"
   done
 
diff --git a/test/suites/basic.sh b/test/suites/basic.sh
index 4b0a5eb..5d636f0 100644
--- a/test/suites/basic.sh
+++ b/test/suites/basic.sh
@@ -12,7 +12,6 @@ test_interactive() {
   export SETUP_ZFS="no"
   export SETUP_CEPH="no"
   export SETUP_OVN="no"
-  export CEPH_PUBLIC_NETWORK="${microcloud_internal_net_addr}" # Default to MicroCloud internal network
   export CEPH_CLUSTER_NETWORK="${microcloud_internal_net_addr}"
   microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out"
 
@@ -39,7 +38,6 @@ test_interactive() {
   export SETUP_ZFS="yes"
   export ZFS_FILTER="lxd_disk1"
   export ZFS_WIPE="yes"
-  export CEPH_PUBLIC_NETWORK="${microcloud_internal_net_addr}" # Default to MicroCloud internal network
   export CEPH_CLUSTER_NETWORK="${microcloud_internal_net_addr}"
   unset SETUP_CEPH SETUP_OVN
   microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out"
@@ -64,7 +62,6 @@ test_interactive() {
   export SETUP_CEPHFS="yes"
   export CEPH_FILTER="lxd_disk2"
   export CEPH_WIPE="yes"
-  export CEPH_PUBLIC_NETWORK="${microcloud_internal_net_addr}" # Default to MicroCloud internal network
   export CEPH_CLUSTER_NETWORK="${microcloud_internal_net_addr}"
   microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out"
 
@@ -94,7 +91,6 @@ test_interactive() {
   export IPV4_END="10.1.123.254"
   export IPV6_SUBNET="fd42:1:1234:1234::1/64"
   export DNS_ADDRESSES="10.1.123.1,8.8.8.8"
-  export CEPH_PUBLIC_NETWORK="${microcloud_internal_net_addr}" # Default to MicroCloud internal network
   export CEPH_CLUSTER_NETWORK="${microcloud_internal_net_addr}"
   microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out"
 
@@ -115,7 +111,6 @@ test_interactive() {
   export SETUP_CEPHFS="yes"
   export CEPH_FILTER="lxd_disk2"
   export CEPH_WIPE="yes"
-  export CEPH_PUBLIC_NETWORK="${microcloud_internal_net_addr}" # Default to MicroCloud internal network
   export CEPH_CLUSTER_NETWORK="${microcloud_internal_net_addr}"
   microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out"
 
@@ -145,8 +140,7 @@ test_interactive() {
   export SETUP_CEPHFS="yes"
   export CEPH_FILTER="lxd_disk2"
   export CEPH_WIPE="yes"
-  export CEPH_PUBLIC_NETWORK="${ceph_dedicated_subnet_prefix}.0/24"
-  export CEPH_CLUSTER_NETWORK="${ceph_dedicated_subnet_prefix}.0/24" # No need to set CEPH_CLUSTER_NETWORK as it defaults to the same as CEPH_PUBLIC_NETWORK if this one is explicitly set.
+  export CEPH_CLUSTER_NETWORK="${ceph_dedicated_subnet_prefix}.0/24"
   export SETUP_OVN="yes"
   export OVN_FILTER="enp6s0"
   export IPV4_SUBNET="10.1.123.1/24"
@@ -157,22 +151,18 @@ test_interactive() {
   lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q
   for m in micro01 micro02 micro03 ; do
     validate_system_lxd "${m}" 3 disk1 3 1 "${OVN_FILTER}" "${IPV4_SUBNET}" "${IPV4_START}"-"${IPV4_END}" "${IPV6_SUBNET}"
-    validate_system_microceph "${m}" 1 "${ceph_dedicated_subnet_prefix}.0/24" "${ceph_dedicated_subnet_prefix}.0/24" disk2
+    validate_system_microceph "${m}" 1 "${ceph_dedicated_subnet_prefix}.0/24" disk2
     validate_system_microovn "${m}"
   done
 
-  # Reset the systems and install microovn and microceph with a fully disaggregated ceph network setup.
-  reset_systems 3 3 3
+  # Reset the systems and install microovn and microceph with a partially disaggregated ceph network setup.
+  reset_systems 3 3 2
 
-  ceph_public_subnet_prefix="10.0.1"
-  ceph_public_subnet_iface="enp7s0"
-  ceph_cluster_subnet_prefix="10.0.2"
-  ceph_cluster_subnet_iface="enp8s0"
+  ceph_cluster_subnet_prefix="10.0.1"
+  ceph_cluster_subnet_iface="enp7s0"
 
   for n in $(seq 2 4); do
-    public_ip="${ceph_public_subnet_prefix}.${n}/24"
     cluster_ip="${ceph_cluster_subnet_prefix}.${n}/24"
-    lxc exec "micro0$((n-1))" -- ip addr add "${public_ip}" dev "${ceph_public_subnet_iface}"
     lxc exec "micro0$((n-1))" -- ip addr add "${cluster_ip}" dev "${ceph_cluster_subnet_iface}"
   done
 
@@ -184,7 +174,6 @@ test_interactive() {
   export SETUP_CEPHFS="yes"
   export CEPH_FILTER="lxd_disk2"
   export CEPH_WIPE="yes"
-  export CEPH_PUBLIC_NETWORK="${ceph_public_subnet_prefix}.0/24"
   export CEPH_CLUSTER_NETWORK="${ceph_cluster_subnet_prefix}.0/24"
   export SETUP_OVN="yes"
   export OVN_FILTER="enp6s0"
@@ -196,7 +185,7 @@ test_interactive() {
   lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q
   for m in micro01 micro02 micro03 ; do
     validate_system_lxd "${m}" 3 disk1 3 1 "${OVN_FILTER}" "${IPV4_SUBNET}" "${IPV4_START}"-"${IPV4_END}" "${IPV6_SUBNET}"
-    validate_system_microceph "${m}" 1 "${CEPH_PUBLIC_NETWORK}" "${CEPH_CLUSTER_NETWORK}" disk2
+    validate_system_microceph "${m}" 1 "${CEPH_CLUSTER_NETWORK}" disk2
     validate_system_microovn "${m}"
   done
 }
@@ -537,7 +526,6 @@ ovn:
   ipv4_range: 10.1.123.100-10.1.123.254
   ipv6_gateway: fd42:1:1234:1234::1/64
 ceph:
-  public_network: ${ceph_dedicated_subnet_prefix}.0/24
   internal_network: ${ceph_dedicated_subnet_prefix}.0/24
 storage:
   cephfs: true
@@ -595,112 +583,6 @@ EOF
     return 1
     "
   done
-
-  # Create a MicroCloud with ceph, fully disaggregated ceph networking and ovn setup.
-  reset_systems 3 3 3
-  addr=$(lxc ls micro01 -f json -c4 | jq -r '.[0].state.network.enp5s0.addresses[] | select(.family == "inet") | .address')
-
-  ceph_public_subnet_prefix="10.0.1"
-  ceph_public_subnet_iface="enp7s0"
-  ceph_cluster_subnet_prefix="10.0.2"
-  ceph_cluster_subnet_iface="enp8s0"
-
-  for n in $(seq 2 4); do
-    public_ip="${ceph_public_subnet_prefix}.${n}/24"
-    cluster_ip="${ceph_cluster_subnet_prefix}.${n}/24"
-    lxc exec "micro0$((n-1))" -- ip addr add "${public_ip}" dev "${ceph_public_subnet_iface}"
-    lxc exec "micro0$((n-1))" -- ip addr add "${cluster_ip}" dev "${ceph_cluster_subnet_iface}"
-  done
-
-  lxc exec micro01 --env TEST_CONSOLE=0 -- microcloud init --preseed <<EOF
-lookup_subnet: ${addr}/24
-lookup_interface: enp5s0
-systems:
-- name: micro01
-  storage:
-    ceph:
-      - path: /dev/sdc
-        wipe: true
-      - path: /dev/sdd
-        wipe: true
-- name: micro02
-  storage:
-    ceph:
-      - path: /dev/sdc
-        wipe: true
-      - path: /dev/sdd
-        wipe: true
-- name: micro03
-  storage:
-    ceph:
-      - path: /dev/sdc
-        wipe: true
-      - path: /dev/sdd
-        wipe: true
-ovn:
-  ipv4_gateway: 10.1.123.1/24
-  ipv4_range: 10.1.123.100-10.1.123.254
-  ipv6_gateway: fd42:1:1234:1234::1/64
-ceph:
-  public_network: ${ceph_public_subnet_prefix}.0/24
-  internal_network: ${ceph_cluster_subnet_prefix}.0/24
-storage:
-  cephfs: true
-EOF
-
-  # Add cloud-init entry for checking ready state on launched instances.
-  lxc exec micro01 -- lxc profile edit default << EOF
-config:
-  cloud-init.user-data: |
-    #cloud-config
-    packages:
-    - iputils-ping
-    write_files:
-      - content: |
-          #!/bin/sh
-          exec curl --unix-socket /dev/lxd/sock lxd/1.0 -X PATCH -d '{"state": "Ready"}'
-        path: /var/lib/cloud/scripts/per-boot/ready.sh
-        permissions: "0755"
-devices:
-  fs:
-    ceph.cluster_name: ceph
-    ceph.user_name: admin
-    path: /cephfs
-    source: cephfs:lxd_cephfs/
-    type: disk
-EOF
-
-  # Launch a container and VM with CEPH storage & OVN network.
-  if [ "${SKIP_VM_LAUNCH}" = "1" ]; then
-    echo "::warning::SKIPPING VM LAUNCH TEST"
-  else
-    lxc exec micro01 -- lxc launch ubuntu-minimal:22.04 v1 -c limits.memory=512MiB -d root,size=3GiB --vm -s remote -n default
-  fi
-  lxc exec micro01 -- lxc launch ubuntu-minimal:22.04 c1 -c limits.memory=512MiB -d root,size=1GiB -s remote -n default
-
-  # Ensure we can reach the launched instances.
-  for m in c1 v1 ; do
-    if [ "${m}" = "v1" ] && [ "${SKIP_VM_LAUNCH}" = "1" ]; then
-      continue
-    fi
-
-    echo -n "Waiting up to 5 mins for ${m} to start "
-    lxc exec micro01 -- sh -ceu "
-    for round in \$(seq 100); do
-      if lxc info ${m} | grep -qxF 'Status: READY'; then
-         lxc exec ${m} -- stat /cephfs
-         echo \" ${m} booted successfully\"
-
-         return 0
-      fi
-      echo -n .
-      sleep 3
-    done
-    echo FAIL
-    return 1
-    "
-  done
-
 }
 
 _test_case() {
@@ -752,7 +634,6 @@ _test_case() {
 
     LOOKUP_IFACE="enp5s0" # filter string for the lookup interface table.
     LIMIT_SUBNET="yes" # (yes/no) input for limiting lookup of systems to the above subnet.
-    CEPH_PUBLIC_NETWORK="${microcloud_internal_net_addr}"
     CEPH_CLUSTER_NETWORK="${microcloud_internal_net_addr}"
 
     EXPECT_PEERS="$((num_systems - 1))"
@@ -995,7 +876,6 @@ test_disk_mismatch() {
   export CEPH_WARNING="yes"
   export CEPH_WIPE="yes"
   export SETUP_OVN="no"
-  export CEPH_PUBLIC_NETWORK="${microcloud_internal_net_addr}"
   export CEPH_CLUSTER_NETWORK="${microcloud_internal_net_addr}"
   microcloud_interactive | lxc exec micro01 -- sh -c "microcloud init > out"
   lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q
@gabrielmougard gabrielmougard self-assigned this Jun 3, 2024
@UtkarshBhatthere
Copy link

A similar issue on our Repository can be used to refer this here

@mseralessandri mseralessandri added the Feature New feature, not a bug label Sep 4, 2024
@roosterfish
Copy link
Contributor

@gabrielmougard I think this one can be closed as #417 got merged?

@gabrielmougard
Copy link
Contributor Author

@roosterfish yes :)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Feature New feature, not a bug
Projects
None yet
Development

No branches or pull requests

4 participants