From 7c8420817e9956ba6dae473e5725afc930704669 Mon Sep 17 00:00:00 2001 From: Roman Dodin Date: Sun, 12 Nov 2023 22:55:04 +0100 Subject: [PATCH] Added non-vr prefixed kind names (#1710) * remove nxos * added non vr kind names * fix link * fix link * use clean kind names in examples --- Makefile | 2 +- clab/register.go | 4 +- docs/manual/kinds/vr-aoscx.md | 9 +-- docs/manual/kinds/vr-csr.md | 20 ++--- docs/manual/kinds/vr-ftosv.md | 21 ++--- docs/manual/kinds/vr-n9kv.md | 20 ++--- docs/manual/kinds/vr-nxos.md | 62 -------------- docs/manual/kinds/vr-pan.md | 18 ++--- docs/manual/kinds/vr-ros.md | 22 ++--- docs/manual/kinds/vr-sros.md | 52 ++++++------ docs/manual/kinds/vr-veos.md | 22 ++--- docs/manual/kinds/vr-vjunosswitch.md | 18 ++--- docs/manual/kinds/vr-vmx.md | 20 ++--- docs/manual/kinds/vr-vqfx.md | 16 ++-- docs/manual/kinds/vr-vsrx.md | 12 +-- docs/manual/kinds/vr-xrv.md | 29 ++++--- docs/manual/kinds/vr-xrv9k.md | 26 +++--- docs/manual/vrnetlab.md | 71 +++++++++------- lab-examples/cert01/cert01.clab.yml | 2 +- lab-examples/cvx01/topo.clab.yml | 6 +- lab-examples/cvx02/topo.clab.yml | 4 +- lab-examples/srl01/srl01.clab.yml | 4 +- lab-examples/srl02/srl02.clab.yml | 4 +- lab-examples/srl03/srl03.clab.yml | 10 +-- lab-examples/srlceos01/srlceos01.clab.yml | 4 +- lab-examples/srlcrpd01/srlcrpd01.clab.yml | 4 +- lab-examples/srlfrr01/srlfrr01.clab.yml | 2 +- lab-examples/srlvjunos01/srlvjunos01.clab.yml | 4 +- .../templated01/templated01.clab.gotmpl | 4 +- .../templated02/templated02.clab.gotmpl | 4 +- lab-examples/vr01/vr01.clab.yml | 8 +- lab-examples/vr02/vr02.clab.yml | 4 +- lab-examples/vr03/vr03.clab.yml | 4 +- lab-examples/vr04/vr04.clab.yml | 6 +- lab-examples/vr05/sros4.clab.yml | 2 +- lab-examples/vr05/vr01.clab.yml | 4 +- lab-examples/vsrx01/vsrx01.yml | 4 +- lab-examples/vxlan01/vxlan-sros.clab.yml | 2 +- lab-examples/vxlan01/vxlan-vmx.clab.yml | 2 +- nodes/vr_aoscx/vr-aoscx.go | 2 +- nodes/vr_csr/vr-csr.go | 2 +- nodes/vr_ftosv/vr-ftosv.go | 2 +- nodes/vr_n9kv/vr-n9kv.go | 2 +- nodes/vr_nxos/vr-nxos.go | 81 ------------------- nodes/vr_pan/vr-pan.go | 2 +- nodes/vr_ros/vr-ros.go | 2 +- nodes/vr_sros/vr-sros.go | 2 +- nodes/vr_veos/vr-veos.go | 2 +- nodes/vr_vjunosswitch/vr-vjunosswitch.go | 2 +- nodes/vr_vmx/vr-vmx.go | 2 +- nodes/vr_vqfx/vr-vqfx.go | 2 +- nodes/vr_vsrx/vr-vsrx.go | 2 +- nodes/vr_xrv/vr-xrv.go | 2 +- nodes/vr_xrv9k/vr-xrv9k.go | 2 +- schemas/clab.schema.json | 16 +++- 55 files changed, 278 insertions(+), 379 deletions(-) delete mode 100644 docs/manual/kinds/vr-nxos.md delete mode 100644 nodes/vr_nxos/vr-nxos.go diff --git a/Makefile b/Makefile index a15212f33..d734fa32e 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ serve-docs: .PHONY: htmltest htmltest: - docker run --rm -v $(CURDIR):/docs squidfunk/mkdocs-material:$(MKDOCS_VER) build --clean --strict + docker run --rm -v $(CURDIR):/docs ghcr.io/srl-labs/mkdocs-material-insiders:$(MKDOCS_INS_VER) build --clean --strict docker run --rm -v $(CURDIR):/test wjdp/htmltest --conf ./site/htmltest-w-github.yml rm -rf ./site diff --git a/clab/register.go b/clab/register.go index bb91ee273..412a7ae31 100644 --- a/clab/register.go +++ b/clab/register.go @@ -25,15 +25,14 @@ import ( vr_csr "github.com/srl-labs/containerlab/nodes/vr_csr" vr_ftosv "github.com/srl-labs/containerlab/nodes/vr_ftosv" vr_n9kv "github.com/srl-labs/containerlab/nodes/vr_n9kv" - vr_nxos "github.com/srl-labs/containerlab/nodes/vr_nxos" vr_pan "github.com/srl-labs/containerlab/nodes/vr_pan" vr_ros "github.com/srl-labs/containerlab/nodes/vr_ros" vr_sros "github.com/srl-labs/containerlab/nodes/vr_sros" vr_veos "github.com/srl-labs/containerlab/nodes/vr_veos" + vr_vjunosswitch "github.com/srl-labs/containerlab/nodes/vr_vjunosswitch" vr_vmx "github.com/srl-labs/containerlab/nodes/vr_vmx" vr_vqfx "github.com/srl-labs/containerlab/nodes/vr_vqfx" vr_vsrx "github.com/srl-labs/containerlab/nodes/vr_vsrx" - vr_vjunosswitch "github.com/srl-labs/containerlab/nodes/vr_vjunosswitch" vr_xrv "github.com/srl-labs/containerlab/nodes/vr_xrv" vr_xrv9k "github.com/srl-labs/containerlab/nodes/vr_xrv9k" xrd "github.com/srl-labs/containerlab/nodes/xrd" @@ -58,7 +57,6 @@ func (c *CLab) RegisterNodes() { vr_csr.Register(c.Reg) vr_ftosv.Register(c.Reg) vr_n9kv.Register(c.Reg) - vr_nxos.Register(c.Reg) vr_pan.Register(c.Reg) vr_ros.Register(c.Reg) vr_sros.Register(c.Reg) diff --git a/docs/manual/kinds/vr-aoscx.md b/docs/manual/kinds/vr-aoscx.md index 217d9dda4..c95ac53ef 100644 --- a/docs/manual/kinds/vr-aoscx.md +++ b/docs/manual/kinds/vr-aoscx.md @@ -4,7 +4,7 @@ search: --- # Aruba ArubaOS-CX -ArubaOS-CX virtualized switch is identified with `vr-aoscx` or `vr-aruba_aoscx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +ArubaOS-CX virtualized switch is identified with `aruba_aoscx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. ## Managing vr-aoscx nodes @@ -33,7 +33,7 @@ Aruba AOS-CX node launched with containerlab can be managed via the following in * `eth0` - management interface connected to the containerlab management network * `eth1+` - second and subsequent data interface -When containerlab launches vr-aoscx node, it will assign IPv4 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches ArubaOS-CX node, it will assign IPv4 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. @@ -41,7 +41,7 @@ Data interfaces `eth1+` needs to be configured with IP addressing manually using ### Node configuration -vr-aoscx nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` user with the provided password. +ArubaOS-CX nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` user with the provided password. #### Startup configuration @@ -51,11 +51,10 @@ It is possible to make ArubaOS-CX nodes boot up with a user-defined startup-conf topology: nodes: node: - kind: vr-aoscx + kind: aruba_aoscx startup-config: myconfig.txt ``` With this knob containerlab is instructed to take a file `myconfig.txt` from the directory that hosts the topology file, and copy it to the lab directory for that specific node under the `/config/startup-config.cfg` name. Then the directory that hosts the startup-config dir is mounted to the container. This will result in this config being applied at startup by the node. Configuration is applied after the node is started, thus it can contain partial configuration snippets that you desire to add on top of the default config that a node boots up with. - diff --git a/docs/manual/kinds/vr-csr.md b/docs/manual/kinds/vr-csr.md index 59b35583d..7593bc4d3 100644 --- a/docs/manual/kinds/vr-csr.md +++ b/docs/manual/kinds/vr-csr.md @@ -4,11 +4,11 @@ search: --- # Cisco CSR1000v -Cisco CSR1000v virtualized router is identified with `vr-csr` or `vr-cisco_csr1000v` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +Cisco CSR1000v virtualized router is identified with `cisco_csr1000v` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-csr nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. +Cisco CSR1000v nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. -## Managing vr-csr nodes +## Managing Cisco CSR1000v nodes !!!note Containers with CSR1000v inside will take ~6min to fully boot. @@ -17,7 +17,7 @@ vr-csr nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, Cisco CSR1000v node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-csr container: + to connect to a `bash` shell of a running Cisco CSR1000v container: ```bash docker exec -it bash ``` @@ -36,30 +36,32 @@ Cisco CSR1000v node launched with containerlab can be managed via the following Default user credentials: `admin:admin` ## Interfaces mapping -vr-csr container can have up to 144 interfaces and uses the following mapping rules: + +Cisco CSR1000v container can have up to 144 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to first data port of CSR1000v line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-csr node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Cisco CSR1000v node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. - ## Features and options + ### Node configuration -vr-csr nodes come up with a basic configuration where only `admin` user and management interfaces such as NETCONF provisioned. +Cisco CSR1000v nodes come up with a basic configuration where only `admin` user and management interfaces such as NETCONF provisioned. #### Startup configuration + It is possible to make CSR1000V nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: ```yaml topology: nodes: node: - kind: vr-csr + kind: cisco_csr1000v startup-config: myconfig.txt ``` diff --git a/docs/manual/kinds/vr-ftosv.md b/docs/manual/kinds/vr-ftosv.md index 0fa2b5e4f..421ca663a 100644 --- a/docs/manual/kinds/vr-ftosv.md +++ b/docs/manual/kinds/vr-ftosv.md @@ -4,11 +4,11 @@ search: --- # Dell FTOSv (OS10) / ftosv -Dell FTOSv (OS10) virtualized router/switch is identified with `vr-ftosv` or `vr-dell_ftosv` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +Dell FTOSv (OS10) virtualized router/switch is identified with `dell_ftosv` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-ftosv nodes launched with containerlab comes up pre-provisioned with SSH and SNMP services enabled. +Dell FTOSv nodes launched with containerlab comes up pre-provisioned with SSH and SNMP services enabled. -## Managing vr-ftosv nodes +## Managing Dell FTOSv nodes !!!note Containers with FTOS10v inside will take ~2-4min to fully boot. @@ -17,7 +17,7 @@ vr-ftosv nodes launched with containerlab comes up pre-provisioned with SSH and Dell FTOS10v node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-ftosv container: + to connect to a `bash` shell of a running Dell FTOSv container: ```bash docker exec -it bash ``` @@ -31,29 +31,32 @@ Dell FTOS10v node launched with containerlab can be managed via the following in Default user credentials: `admin:admin` ## Interfaces mapping -vr-ftosv container can have different number of available interfaces which depends on platform used under FTOS10 virtualization .qcow2 disk and container image built using [vrnetlab](../vrnetlab.md) project. Interfaces uses the following mapping rules (in topology file): + +Dell FTOSv container can have different number of available interfaces which depends on platform used under FTOS10 virtualization .qcow2 disk and container image built using [vrnetlab](../vrnetlab.md) project. Interfaces uses the following mapping rules (in topology file): * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to first data port of FTOS10v line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-ftosv node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Dell FTOSv node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. - ## Features and options + ### Node configuration -vr-ftosv nodes come up with a basic configuration where only `admin` user and management interfaces such as SSH provisioned. + +Dell FTOSv nodes come up with a basic configuration where only `admin` user and management interfaces such as SSH provisioned. #### Startup configuration + It is possible to make vMX nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: ```yaml topology: nodes: node: - kind: vr-ftosv + kind: dell_ftosv startup-config: myconfig.txt ``` diff --git a/docs/manual/kinds/vr-n9kv.md b/docs/manual/kinds/vr-n9kv.md index 523dc083d..46f277a51 100644 --- a/docs/manual/kinds/vr-n9kv.md +++ b/docs/manual/kinds/vr-n9kv.md @@ -4,11 +4,11 @@ search: --- # Cisco Nexus 9000v -Cisco Nexus9000v virtualized router is identified with `vr-n9kv` or `vr-cisco_n9kv` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +Cisco Nexus9000v virtualized router is identified with `cisco_n9kv` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-n9kv nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF, NXAPI and gRPC services enabled. +Cisco Nexus 9000v nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF, NXAPI and gRPC services enabled. -## Managing vr-n9kv nodes +## Managing Cisco Nexus 9000v nodes !!!note Containers with Nexus 9000v inside will take ~8-10min to fully boot. @@ -17,7 +17,7 @@ vr-n9kv nodes launched with containerlab comes up pre-provisioned with SSH, SNMP Cisco Nexus 9000v node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-n9kv container: + to connect to a `bash` shell of a running Cisco Nexus 9000v container: ```bash docker exec -it bash ``` @@ -38,30 +38,32 @@ Cisco Nexus 9000v node launched with containerlab can be managed via the followi Default user credentials: `admin:admin` ## Interfaces mapping -vr-n9kv container can have up to 128 interfaces and uses the following mapping rules: + +Cisco Nexus 9000v container can have up to 128 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to first data port of Nexus 9000v line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-n9kv node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Cisco Nexus 9000v node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. - ## Features and options + ### Node configuration -vr-n9kv nodes come up with a basic configuration where only `admin` user and management interfaces such as NETCONF, NXAPI and GRPC provisioned. +Cisco Nexus 9000v nodes come up with a basic configuration where only `admin` user and management interfaces such as NETCONF, NXAPI and GRPC provisioned. #### Startup configuration + It is possible to make n9kv nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: ```yaml topology: nodes: node: - kind: vr-n9kv + kind: cisco_n9kv startup-config: myconfig.txt ``` diff --git a/docs/manual/kinds/vr-nxos.md b/docs/manual/kinds/vr-nxos.md deleted file mode 100644 index 08e75e3f7..000000000 --- a/docs/manual/kinds/vr-nxos.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -search: - boost: 4 ---- -# Cisco NXOS - -[Cisco NXOS](https://www.cisco.com/c/en/us/products/ios-nx-os-software/nx-os/index.html) virtual appliance is identified with `vr-nxos` or `vr-cisco_nxos` kind in the [topology file](../topo-def-file.md). It is built using [hellt/vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. - -!!!note - This is a Titanium based system, which is an older version of NX-OS. - -vr-nxos nodes launched with containerlab come up pre-provisioned with SSH service enabled. - -## Managing vr-nxos nodes -Cisco NXOS node launched with containerlab can be managed via the following interfaces: - -=== "bash" - to connect to a `bash` shell of a running vr-nxos container: - ```bash - docker exec -it bash - ``` -=== "CLI via SSH" - to connect to the NX-OS CLI - ```bash - ssh clab@ - ``` - - -!!!info - Default user credentials: `admin:admin` - -## Interfaces mapping -vr-nxos container can have up to 90 interfaces and uses the following mapping rules: - -* `eth0` - management interface connected to the containerlab management network -* `eth1` - first data interface, mapped to first data port of NX-OS line card -* `eth2+` - second and subsequent data interface - -When containerlab launches vr-nxos node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. - -Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. - - -## Features and options -### Node configuration -vr-nxos nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `clab` user. - - -#### Startup configuration -It is possible to make NXOS nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: - -```yaml -topology: - nodes: - node: - kind: vr-nxos - startup-config: myconfig.txt -``` - -With this knob containerlab is instructed to take a file `myconfig.txt` from the directory that hosts the topology file, and copy it to the lab directory for that specific node under the `/config/startup-config.cfg` name. Then the directory that hosts the startup-config dir is mounted to the container. This will result in this config being applied at startup by the node. - -Configuration is applied after the node is started, thus it can contain partial configuration snippets that you desire to add on top of the default config that a node boots up with. diff --git a/docs/manual/kinds/vr-pan.md b/docs/manual/kinds/vr-pan.md index d761e98bc..392b2c573 100644 --- a/docs/manual/kinds/vr-pan.md +++ b/docs/manual/kinds/vr-pan.md @@ -4,11 +4,11 @@ search: --- # Palo Alto PA-VM -Palo Alto PA-VM virtualized firewall is identified with `vr-pan` or `vr-paloalto_panos` kind in the [topology file](../topo-def-file.md). It is built using [boxen](https://github.com/carlmontanari/boxen/) project and essentially is a Qemu VM packaged in a docker container format. +Palo Alto PA-VM virtualized firewall is identified with `paloalto_panos` kind in the [topology file](../topo-def-file.md). It is built using [boxen](https://github.com/carlmontanari/boxen/) project and essentially is a Qemu VM packaged in a docker container format. -vr-pan nodes launched with containerlab come up pre-provisioned with SSH, and HTTPS services enabled. +Palo Alto PA-VM nodes launched with containerlab come up pre-provisioned with SSH, and HTTPS services enabled. -## Managing vr-pan nodes +## Managing Palo Alto PA-VM nodes !!!note Containers with Palo Alto PA-VM inside will take ~8min to fully boot. @@ -17,7 +17,7 @@ vr-pan nodes launched with containerlab come up pre-provisioned with SSH, and HT Palo Alto PA-VM node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-pan container: + to connect to a `bash` shell of a running Palo Alto PA-VM container: ```bash docker exec -it bash ``` @@ -34,13 +34,13 @@ Palo Alto PA-VM node launched with containerlab can be managed via the following ## Interfaces mapping -vr-pan container supports up to 24 interfaces (plus mgmt) and uses the following mapping rules: +Palo Alto PA-VM container supports up to 24 interfaces (plus mgmt) and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to first data port of PAN VM * `eth2+` - second and subsequent data interface -When containerlab launches vr-pan node, it will assign IPv4/6 address to the `mgmt` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Palo Alto PA-VM node, it will assign IPv4/6 address to the `mgmt` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` need to be configured with IP addressing manually using CLI/management protocols. @@ -51,17 +51,17 @@ Data interfaces `eth1+` need to be configured with IP addressing manually using ### Node configuration -vr-pan nodes come up with a basic configuration where only `admin` user and management interface is provisioned. +Palo Alto PA-VM nodes come up with a basic configuration where only `admin` user and management interface is provisioned. ### User defined config -It is possible to make `vr-pan` nodes to boot up with a user-defined config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property a user sets the path to the config file that will be mounted to a container and used as a startup config: +It is possible to make Palo Alto PA-VM nodes to boot up with a user-defined config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property a user sets the path to the config file that will be mounted to a container and used as a startup config: ```yaml name: lab topology: nodes: ceos: - kind: vr-paloalto_panos + kind: paloalto_panos startup-config: myconfig.conf ``` diff --git a/docs/manual/kinds/vr-ros.md b/docs/manual/kinds/vr-ros.md index c291228c7..9974a91f1 100644 --- a/docs/manual/kinds/vr-ros.md +++ b/docs/manual/kinds/vr-ros.md @@ -4,19 +4,19 @@ search: --- # MikroTik RouterOS Cloud-hosted router -[MikroTik RouterOS](https://mikrotik.com/download) cloud hosted router is identified with `vr-ros` or `vr-mikrotik_ros` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[MikroTik RouterOS](https://mikrotik.com/download) cloud hosted router is identified with `mikrotik_ros` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -## Managing vr-ros nodes +## Managing MikroTik RouterOS nodes MikroTik RouterOS node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-ros container: + to connect to a `bash` shell of a running MikroTik RouterOS container: ```bash docker exec -it bash ``` === "CLI" - to connect to the vr-ros CLI + to connect to the MikroTik RouterOS CLI ```bash ssh admin@ ``` @@ -32,20 +32,23 @@ MikroTik RouterOS node launched with containerlab can be managed via the followi Default user credentials: `admin:admin` ## Interfaces mapping -vr-ros container can have up to 30 interfaces and uses the following mapping rules: + +MikroTik RouterOS container can have up to 30 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to the `ether2` interface of the RouterOS * `eth2+` - second and subsequent data interface -When containerlab launches vr-ros node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches MikroTik RouterOS node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. ### Node configuration -vr-ros nodes come up with a basic "blank" configuration where only the management interface and user is provisioned. + +MikroTik RouterOS nodes come up with a basic "blank" configuration where only the management interface and user is provisioned. #### User defined config + It is possible to make ROS nodes to boot up with a user-defined startup config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind a user sets the path to the config file that will be mounted to a container and used as a startup config: ```yaml @@ -53,11 +56,12 @@ name: ros_lab topology: nodes: ros: - kind: vr-ros + kind: mikrotik_ros startup-config: myconfig.txt ``` With such topology file containerlab is instructed to take a file `myconfig.txt` from the current working directory, copy it to the lab directory for that specific node under the `/ftpboot/config.auto.rsc` name and mount that dir to the container. This will result in this config to act as a startup config for the node via FTP. Mikrotik will automatically import any file with the .auto.rsc suffix. ### File mounts -When a user starts a lab, containerlab creates a node directory for storing [configuration artifacts](../conf-artifacts.md). For `vr-ros` kind containerlab creates `ftpboot` directory where the config file will be copied as config.auto.rsc. \ No newline at end of file + +When a user starts a lab, containerlab creates a node directory for storing [configuration artifacts](../conf-artifacts.md). For MikroTik RouterOS kind containerlab creates `ftpboot` directory where the config file will be copied as config.auto.rsc. diff --git a/docs/manual/kinds/vr-sros.md b/docs/manual/kinds/vr-sros.md index 6c845fcd9..0a5d51cf1 100644 --- a/docs/manual/kinds/vr-sros.md +++ b/docs/manual/kinds/vr-sros.md @@ -4,11 +4,11 @@ search: --- # Nokia SR OS -[Nokia SR OS](https://www.nokia.com/networks/products/service-router-operating-system/) virtualized router is identified with `vr-sros` or `vr-nokia_sros` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Nokia SR OS](https://www.nokia.com/networks/products/service-router-operating-system/) virtualized router is identified with `nokia_sros` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-sros nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. +Nokia SR OS nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. -## Managing vr-sros nodes +## Managing Nokia SR OS nodes !!!note Containers with SR OS inside will take ~3min to fully boot. @@ -17,7 +17,7 @@ vr-sros nodes launched with containerlab come up pre-provisioned with SSH, SNMP, Nokia SR OS node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-sros container: + to connect to a `bash` shell of a running Nokia SR OS container: ```bash docker exec -it bash ``` @@ -51,7 +51,7 @@ Nokia SR OS node launched with containerlab can be managed via the following int ## Interfaces mapping -vr-sros container uses the following mapping for its interfaces: +Nokia SR OS container uses the following mapping for its interfaces: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to the first data port of SR OS line card @@ -65,7 +65,7 @@ Interfaces can be defined in a non-sequential way, for example: - endpoints: ["sr1:eth3", "sr2:eth5"] ``` -When containerlab launches vr-sros node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Nokia SR OS node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` need to be configured with IP addressing manually using CLI/management protocols. @@ -75,16 +75,16 @@ Data interfaces `eth1+` need to be configured with IP addressing manually using Virtual SR OS simulator can be run in multiple HW variants as explained in [the vSIM installation guide](https://documentation.nokia.com/cgi-bin/dbaccessfilename.cgi/3HE15836AAADTQZZA01_V1_vSIM%20Installation%20and%20Setup%20Guide%2020.10.R1.pdf). -`vr-sros` container images come with [pre-packaged SR OS variants](https://github.com/hellt/vrnetlab/tree/master/sros#variants) as defined in the upstream repo as well as support [custom variant definition](https://github.com/hellt/vrnetlab/tree/master/sros#custom-variant). The pre-packaged variants are identified by the variant name and come up with cards and mda already configured. On the other hand, custom variants give users total flexibility in emulated hardware configuration, but cards and MDAs must be configured manually. +Nokia SR OS container images come with [pre-packaged SR OS variants](https://github.com/hellt/vrnetlab/tree/master/sros#variants) as defined in the upstream repo as well as support [custom variant definition](https://github.com/hellt/vrnetlab/tree/master/sros#custom-variant). The pre-packaged variants are identified by the variant name and come up with cards and mda already configured. On the other hand, custom variants give users total flexibility in emulated hardware configuration, but cards and MDAs must be configured manually. -To make vr-sros to boot in one of the packaged variants, set the type to one of the predefined variant values: +To make Nokia SR OS to boot in one of the packaged variants, set the type to one of the predefined variant values: ```yaml topology: nodes: sros: - kind: vr-sros - image: vrnetlab/vr-sros:20.10.R1 + kind: nokia_sros + image: vrnetlab/nokia_sros:20.10.R1 type: sr-1s # if omitted, the default sr-1 variant will be used license: license-sros20.txt ``` @@ -129,15 +129,15 @@ type: >- topology: nodes: R1: - kind: vr-sros - image: vr-sros:22.7.R2 + kind: nokia_sros + image: nokia_sros:22.7.R2 type: >- cp: cpu=2 min_ram=4 chassis=sr-7 slot=A card=cpm5 ___ lc: cpu=4 min_ram=4 max_nics=6 chassis=sr-7 slot=1 card=iom4-e mda/1=me6-10gb-sfp+ ___ lc: cpu=4 min_ram=4 max_nics=6 chassis=sr-7 slot=2 card=iom4-e mda/1=me6-10gb-sfp+ R2: - kind: vr-sros - image: sros:22.7.R2 + kind: nokia_sros + image: nokia_sros:22.7.R2 type: >- cp: cpu=2 min_ram=4 chassis=sr-7 slot=A card=cpm5 ___ lc: cpu=4 min_ram=4 max_nics=6 chassis=sr-7 slot=1 card=iom4-e mda/1=me6-10gb-sfp+ ___ @@ -162,7 +162,7 @@ type: "cpu=2 ram=4 slot=A chassis=ixr-r6 card=cpiom-ixr-r6 mda/1=m6-10g-sfp++4-2 ### Node configuration -vr-sros nodes come up with a basic "blank" configuration where only the card/mda are provisioned, as well as the management interfaces such as Netconf, SNMP, gNMI. +Nokia SR OS nodes come up with a basic "blank" configuration where only the card/mda are provisioned, as well as the management interfaces such as Netconf, SNMP, gNMI. #### User-defined config @@ -181,7 +181,7 @@ name: sros_lab topology: nodes: sros: - kind: vr-sros + kind: nokia_sros startup-config: myconfig.txt ``` @@ -199,7 +199,7 @@ name: sros_lab topology: nodes: sros: - kind: vr-sros + kind: nokia_sros startup-config: myconfig.partial.txt ``` @@ -236,7 +236,7 @@ name: sros_lab topology: nodes: sros: - kind: vr-sros + kind: nokia_sros startup-config: https://gist.com//staticroute.partial.cfg ``` @@ -249,7 +249,7 @@ name: sros_lab topology: nodes: sros: - kind: vr-sros + kind: nokia_sros startup-config: | #(1)! /configure system location "I am an embedded config" ``` @@ -260,7 +260,7 @@ Embedded partial configs will persist on containerlab's host and use the same di #### Configuration save -Containerlab's [`save`](../../cmd/save.md) command will perform a configuration save for `vr-sros` nodes via Netconf. The configuration will be saved under `config.txt` file and can be found at the node's directory inside the lab parent directory: +Containerlab's [`save`](../../cmd/save.md) command will perform a configuration save for `Nokia SR OS` nodes via Netconf. The configuration will be saved under `config.txt` file and can be found at the node's directory inside the lab parent directory: ```bash # assuming the lab name is "cert01" @@ -270,7 +270,7 @@ cat clab-cert01/sr/tftpboot/config.txt #### Boot Options File -By default `vr_nokia_sros` nodes boot up with a pre-defined "Boot Options File" (BOF). This file includes boot settings including: +By default `nokia_sros` nodes boot up with a pre-defined "Boot Options File" (BOF). This file includes boot settings including: * license file location * config file location @@ -307,13 +307,13 @@ commit exit all ``` -This script is then placed somewhere on the disk, for example in the containerlab's topology root directory, and mounted to `vr-nokia_sros` node tftpboot directory using [binds](../nodes.md#binds) property: +This script is then placed somewhere on the disk, for example in the containerlab's topology root directory, and mounted to `nokia_sros` node tftpboot directory using [binds](../nodes.md#binds) property: ```yaml nodes: sros1: mgmt-ipv4: [mgmt-ip] - kind: vr-sros + kind: nokia_sros image: [container-image-repo] type: sr-1s license: license-sros.txt @@ -337,7 +337,7 @@ By combining file bindings and the automatic script execution of SROS it is poss ### License -Path to a valid license must be provided for all vr-sros nodes with a [`license`](../nodes.md#license) directive. +Path to a valid license must be provided for all Nokia SR OS nodes with a [`license`](../nodes.md#license) directive. If your SR OS license file is issued for a specific UUID, you can define it with custom type definition: @@ -348,10 +348,10 @@ type: "cp: uuid=00001234-5678-9abc-def1-000012345678 cpu=4 ram=6 slot=A chassis= ### File mounts -When a user starts a lab, containerlab creates a node directory for storing [configuration artifacts](../conf-artifacts.md). For `vr-sros` kind containerlab creates `tftpboot` directory where the license file will be copied. +When a user starts a lab, containerlab creates a node directory for storing [configuration artifacts](../conf-artifacts.md). For Nokia SR OS kind containerlab creates `tftpboot` directory where the license file will be copied. ## Lab examples -The following labs feature vr-sros node: +The following labs feature Nokia SR OS node: * [SR Linux and vr-sros](../../lab-examples/vr-sros.md) diff --git a/docs/manual/kinds/vr-veos.md b/docs/manual/kinds/vr-veos.md index 68ebcbb8b..ecc0ed6a3 100644 --- a/docs/manual/kinds/vr-veos.md +++ b/docs/manual/kinds/vr-veos.md @@ -4,11 +4,11 @@ search: --- # Arista vEOS -[Arista vEOS](https://www.arista.com/en/cg-veos-router/veos-router-overview) virtualized router is identified with `vr-veos` or `vr-arista_veos` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Arista vEOS](https://www.arista.com/en/cg-veos-router/veos-router-overview) virtualized router is identified with `arista_veos` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-veos nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. +Arista vEOS nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. -## Managing vr-veos nodes +## Managing Arista vEOS nodes !!!note Containers with vEOS inside will take ~4min to fully boot. @@ -17,7 +17,7 @@ vr-veos nodes launched with containerlab comes up pre-provisioned with SSH, SNMP Arista vEOS node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-veos container: + to connect to a `bash` shell of a running Arista vEOS container: ```bash docker exec -it bash ``` @@ -44,33 +44,35 @@ Arista vEOS node launched with containerlab can be managed via the following int Default user credentials: `admin:admin` ## Interfaces mapping -vr-veos container can have up to 144 interfaces and uses the following mapping rules: + +Arista vEOS container can have up to 144 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to first data port of vEOS line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-veos node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Arista vEOS node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. - ## Features and options + ### Node configuration -vr-veos nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` user and management interfaces such as NETCONF, SNMP, gNMI. +Arista vEOS nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` user and management interfaces such as NETCONF, SNMP, gNMI. #### Startup configuration + It is possible to make vEOS nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: ```yaml topology: nodes: node: - kind: vr-veos + kind: arista_veos startup-config: myconfig.txt ``` With this knob containerlab is instructed to take a file `myconfig.txt` from the directory that hosts the topology file, and copy it to the lab directory for that specific node under the `/config/startup-config.cfg` name. Then the directory that hosts the startup-config dir is mounted to the container. This will result in this config being applied at startup by the node. -Configuration is applied after the node is started, thus it can contain partial configuration snippets that you desire to add on top of the default config that a node boots up with. \ No newline at end of file +Configuration is applied after the node is started, thus it can contain partial configuration snippets that you desire to add on top of the default config that a node boots up with. diff --git a/docs/manual/kinds/vr-vjunosswitch.md b/docs/manual/kinds/vr-vjunosswitch.md index 1e0053457..27b7854c6 100644 --- a/docs/manual/kinds/vr-vjunosswitch.md +++ b/docs/manual/kinds/vr-vjunosswitch.md @@ -4,15 +4,15 @@ search: --- # Juniper vJunos-switch -[Juniper vJunos-switch](https://support.juniper.net/support/downloads/?p=vjunos) is a virtualized EX9214 switch identified with `vr-vjunosswitch` or `vr-juniper_vjunosswitch` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vJunos-switch](https://support.juniper.net/support/downloads/?p=vjunos) is a virtualized EX9214 switch identified with `juniper_vjunosswitch` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-vjunosswitch nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. +Juniper vJunos-switch nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. ## How to obtain the image The qcow2 image can be downloaded from [Juniper website](https://support.juniper.net/support/downloads/?p=vjunos) and built with [vrnetlab](../vrnetlab.md). -## Managing vr-vjunosswitch nodes +## Managing Juniper vJunos-switch nodes !!!note Containers with vJunos-switch inside will take ~15min to fully boot. @@ -21,7 +21,7 @@ The qcow2 image can be downloaded from [Juniper website](https://support.juniper Juniper vJunos-switch node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-vjunosswitch container: + to connect to a `bash` shell of a running Juniper vJunos-switch container: ```bash docker exec -it bash ``` @@ -41,13 +41,13 @@ Juniper vJunos-switch node launched with containerlab can be managed via the fol ## Interfaces mapping -vr-vjunosswitch container can have up to 11 interfaces and uses the following mapping rules: +Juniper vJunos-switch container can have up to 11 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to a first data port of vJunos-Switch VM * `eth2+` - second and subsequent data interface -When containerlab launches vr-vjunosswitch node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach the management plane of the router. +When containerlab launches Juniper vJunos-switch node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach the management plane of the router. Data interfaces `eth1+` need to be configured with IP addressing manually using CLI/management protocols or via a startup-config text file. @@ -55,7 +55,7 @@ Data interfaces `eth1+` need to be configured with IP addressing manually using ### Node configuration -vr-vjunosswitch nodes come up with a basic configuration supplied by a mountable configuration disk to the main VM image. Users, management interfaces, and protocols such as SSH and NETCONF are configured. +Juniper vJunos-switch nodes come up with a basic configuration supplied by a mountable configuration disk to the main VM image. Users, management interfaces, and protocols such as SSH and NETCONF are configured. #### Startup configuration @@ -65,7 +65,7 @@ It is possible to make vJunos-switch nodes boot up with a user-defined startup-c topology: nodes: node: - kind: vr-vjunosswitch + kind: juniper_vjunosswitch startup-config: myconfig.txt ``` @@ -75,7 +75,7 @@ Configuration is applied after the node is started, thus it can contain partial ## Lab examples -The following labs feature the vr-vjunosswitch node: +The following labs feature the Juniper vJunos-switch node: * [SR Linux and Juniper vJunos-switch](../../lab-examples/srl-vjunos-switch.md) diff --git a/docs/manual/kinds/vr-vmx.md b/docs/manual/kinds/vr-vmx.md index f0a7f2658..232f2c360 100644 --- a/docs/manual/kinds/vr-vmx.md +++ b/docs/manual/kinds/vr-vmx.md @@ -4,11 +4,11 @@ search: --- # Juniper vMX -[Juniper vMX](https://www.juniper.net/us/en/products/routers/mx-series/vmx-virtual-router-software.html) virtualized router is identified with `vr-vmx` or `vr-juniper_vmx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vMX](https://www.juniper.net/us/en/products/routers/mx-series/vmx-virtual-router-software.html) virtualized router is identified with `juniper_vmx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-vmx nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. +Juniper vMX nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. -## Managing vr-vmx nodes +## Managing Juniper vMX nodes !!!note Containers with vMX inside will take ~7min to fully boot. @@ -17,7 +17,7 @@ vr-vmx nodes launched with containerlab come up pre-provisioned with SSH, SNMP, Juniper vMX node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-vmx container: + to connect to a `bash` shell of a running Juniper vMX container: ```bash docker exec -it bash ``` @@ -44,13 +44,13 @@ Juniper vMX node launched with containerlab can be managed via the following int ## Interfaces mapping -vr-vmx container can have up to 90 interfaces and uses the following mapping rules: +Juniper vMX container can have up to 90 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to a first data port of vMX line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-vmx node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach the management plane of the router. +When containerlab launches Juniper vMX node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach the management plane of the router. Data interfaces `eth1+` need to be configured with IP addressing manually using CLI/management protocols. @@ -58,7 +58,7 @@ Data interfaces `eth1+` need to be configured with IP addressing manually using ### Node configuration -vr-vmx nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` users and management interfaces such as NETCONF, SNMP, gNMI. +Juniper vMX nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` users and management interfaces such as NETCONF, SNMP, gNMI. Starting with [hellt/vrnetlab](https://github.com/hellt/vrnetlab) v0.8.2 VMX will make use of the management VRF[^1]. @@ -70,7 +70,7 @@ It is possible to make vMX nodes boot up with a user-defined startup-config inst topology: nodes: node: - kind: vr-vmx + kind: juniper_vmx startup-config: myconfig.txt ``` @@ -80,13 +80,13 @@ Configuration is applied after the node is started, thus it can contain partial ## Lab examples -The following labs feature vr-vmx node: +The following labs feature Juniper vMX node: * [SR Linux and Juniper vMX](../../lab-examples/vr-vmx.md) ## Known issues and limitations -* when listing docker containers, vr-vmx containers will always report unhealthy status. Do not rely on this status. +* when listing docker containers, Juniper vMX containers will always report unhealthy status. Do not rely on this status. * vMX requires Linux kernel 4.17+ * To check the boot log, use `docker logs -f `. diff --git a/docs/manual/kinds/vr-vqfx.md b/docs/manual/kinds/vr-vqfx.md index d3797ab24..2351662de 100644 --- a/docs/manual/kinds/vr-vqfx.md +++ b/docs/manual/kinds/vr-vqfx.md @@ -4,12 +4,12 @@ search: --- # Juniper vQFX -[Juniper vQFX](https://www.juniper.net/us/en/dm/free-vqfx10000-software.html) virtualized router is identified with `vr-vqfx` or `vr-juniper_vqfx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vQFX](https://www.juniper.net/us/en/dm/free-vqfx10000-software.html) virtualized router is identified with `juniper_vqfx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. !!!note vQFX images built with [hellt/vrnetlab](https://github.com/hellt/vrnetlab/tree/master/vqfx) have experimental support for vQFX version v18 and newer. -## Managing vr-vqfx nodes +## Managing Juniper vQFX nodes !!!note Containers with vQFX inside will take ~7min to fully boot. @@ -18,7 +18,7 @@ search: Juniper vQFX node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-vqfx container: + to connect to a `bash` shell of a running Juniper vQFX container: ```bash docker exec -it bash ``` @@ -28,7 +28,7 @@ Juniper vQFX node launched with containerlab can be managed via the following in ssh admin@ ``` === "NETCONF" - Coming soon + Looking for contributions. !!!info Default user credentials: `admin:admin@123` @@ -39,7 +39,7 @@ Juniper vQFX node launched with containerlab can be managed via the following in * `eth1` - first data interface, mapped to first data port of vQFX line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-vqfx node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Juniper vQFX node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. @@ -47,7 +47,7 @@ Data interfaces `eth1+` needs to be configured with IP addressing manually using ### Node configuration -vr-vqfx nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` user with the provided password. +Juniper vQFX nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `admin` user with the provided password. #### Startup configuration @@ -57,7 +57,7 @@ It is possible to make vQFX nodes boot up with a user-defined startup-config ins topology: nodes: node: - kind: vr-vqfx + kind: juniper_vqfx startup-config: myconfig.txt ``` @@ -67,4 +67,4 @@ Configuration is applied after the node is started, thus it can contain partial ## Lab examples -Coming soon. +Looking for contributions. diff --git a/docs/manual/kinds/vr-vsrx.md b/docs/manual/kinds/vr-vsrx.md index 35e442a37..1ffd38e40 100644 --- a/docs/manual/kinds/vr-vsrx.md +++ b/docs/manual/kinds/vr-vsrx.md @@ -4,9 +4,9 @@ search: --- # Juniper vSRX -[Juniper vSRX](https://www.juniper.net/us/en/dm/download-next-gen-vsrx-firewall-trial.html) virtualized firewall is identified with `vr-vsrx` or `vr-juniper_vsrx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vSRX](https://www.juniper.net/us/en/dm/download-next-gen-vsrx-firewall-trial.html) virtualized firewall is identified with `juniper_vsrx` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -## Managing vr-vsrx nodes +## Managing Juniper vSRX nodes !!!note Containers with vSRX inside will take ~7min to fully boot. @@ -15,7 +15,7 @@ search: Juniper vSRX node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-vsrx container: + to connect to a `bash` shell of a running Juniper vSRX container: ```bash docker exec -it bash ``` @@ -35,7 +35,7 @@ Juniper vSRX node launched with containerlab can be managed via the following in * `eth0` - management interface (fxp0) connected to the containerlab management network * `eth1+` - second and subsequent data interface -When containerlab launches vr-vsrx node, it will assign IPv4/6 address to the `eth0` interface. These addresses are used to reach the management plane of the router. +When containerlab launches Juniper vSRX node, it will assign IPv4/6 address to the `eth0` interface. These addresses are used to reach the management plane of the router. Data interfaces `eth1+` need to be configured with IP addressing manually using CLI/management protocols. @@ -43,7 +43,7 @@ Data interfaces `eth1+` need to be configured with IP addressing manually using ### Node configuration -`vr-vsrx` nodes come up with a basic configuration where only the control plane and line cards are provisioned and the `admin` user with the provided password. +Juniper vSRX nodes come up with a basic configuration where only the control plane and line cards are provisioned and the `admin` user with the provided password. #### Startup configuration @@ -53,7 +53,7 @@ It is possible to make vSRX nodes boot up with a user-defined startup-config ins topology: nodes: node: - kind: vr-vsrx + kind: juniper_vsrx startup-config: myconfig.txt ``` diff --git a/docs/manual/kinds/vr-xrv.md b/docs/manual/kinds/vr-xrv.md index cc0ee1c3a..4121448e3 100644 --- a/docs/manual/kinds/vr-xrv.md +++ b/docs/manual/kinds/vr-xrv.md @@ -4,14 +4,14 @@ search: --- # Cisco XRv -Cisco XRv virtualized router is identified with `vr-xrv` or `vr-cisco_xrv` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +Cisco XRv virtualized router is identified with `cisco_xrv` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-xrv nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI (if available) services enabled. +Cisco XRv nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI (if available) services enabled. !!!warning - XRv image is discontinued by Cisco and supreceded by XRv 9000 image. It was added to containerlab because the image is lightweight, compared to XRv9k. If recent features are needed, use [vr-xrv9k](vr-xrv9k.md) kind. + XRv image is discontinued by Cisco and superseded by XRv 9000 image. It was added to containerlab because the image is lightweight, compared to XRv9k. If recent features are needed, use [Cisco XRv9k](vr-xrv9k.md) kind. -## Managing vr-xrv nodes +## Managing Cisco XRv nodes !!!note Containers with XRv inside will take ~5min to fully boot. @@ -20,7 +20,7 @@ vr-xrv nodes launched with containerlab come up pre-provisioned with SSH, SNMP, Cisco XRv node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-xrv container: + to connect to a `bash` shell of a running Cisco XRv container: ```bash docker exec -it bash ``` @@ -46,29 +46,32 @@ Cisco XRv node launched with containerlab can be managed via the following inter Default user credentials: `clab:clab@123` ## Interfaces mapping -vr-xrv container can have up to 90 interfaces and uses the following mapping rules: + +Cisco XRv container can have up to 90 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to first data port of XRv line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-xrv node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Cisco XRv node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. - ## Features and options + ### Node configuration -vr-xrv nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `clab` user and management interfaces such as NETCONF, SNMP, gNMI. + +Cisco XRv nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `clab` user and management interfaces such as NETCONF, SNMP, gNMI. #### Startup configuration + It is possible to make XRv nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: ```yaml topology: nodes: node: - kind: vr-xrv + kind: cisco_xrv startup-config: myconfig.txt ``` @@ -77,9 +80,11 @@ With this knob containerlab is instructed to take a file `myconfig.txt` from the Configuration is applied after the node is started, thus it can contain partial configuration snippets that you desire to add on top of the default config that a node boots up with. ## Lab examples -The following labs feature vr-xrv node: -- [SR Linux and Cisco XRv](../../lab-examples/vr-xrv.md) +The following labs feature Cisco XRv node: + +* [SR Linux and Cisco XRv](../../lab-examples/vr-xrv.md) ## Known issues and limitations + * LACP and BPDU packets are not propagated to/from vrnetlab based routers launched with containerlab. diff --git a/docs/manual/kinds/vr-xrv9k.md b/docs/manual/kinds/vr-xrv9k.md index f2a7f230d..c0273a352 100644 --- a/docs/manual/kinds/vr-xrv9k.md +++ b/docs/manual/kinds/vr-xrv9k.md @@ -4,19 +4,20 @@ search: --- # Cisco XRv9k -[Cisco XRv9k](https://www.cisco.com/c/en/us/products/collateral/routers/ios-xrv-9000-router/datasheet-c78-734034.html) virtualized router is identified with `vr-xrv9k` or `vr-cisco_xrv9k` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Cisco XRv9k](https://www.cisco.com/c/en/us/products/collateral/routers/ios-xrv-9000-router/datasheet-c78-734034.html) virtualized router is identified with `cisco_xrv9k` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -vr-xrv9k nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI (if available) services enabled. +Cisco XRv9k nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI (if available) services enabled. !!!warning XRv9k node is a resource hungry image. As of XRv9k 7.2.1 version the minimum resources should be set to 2vcpu/14GB. These are the default setting set with containerlab for this kind. Image will take 25 minutes to fully boot, be patient. You can monitor the loading status with `docker logs -f `. -## Managing vr-xrv9k nodes +## Managing Cisco XRv9k nodes + Cisco XRv9k node launched with containerlab can be managed via the following interfaces: === "bash" - to connect to a `bash` shell of a running vr-xrv9k container: + to connect to a `bash` shell of a running Cisco XRv9k container: ```bash docker exec -it bash ``` @@ -42,29 +43,32 @@ Cisco XRv9k node launched with containerlab can be managed via the following int Default user credentials: `clab:clab@123` ## Interfaces mapping -vr-xrv9k container can have up to 90 interfaces and uses the following mapping rules: + +Cisco XRv9k container can have up to 90 interfaces and uses the following mapping rules: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to first data port of XRv9k line card * `eth2+` - second and subsequent data interface -When containerlab launches vr-xrv9k node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. +When containerlab launches Cisco XRv9k node, it will assign IPv4/6 address to the `eth0` interface. These addresses can be used to reach management plane of the router. Data interfaces `eth1+` needs to be configured with IP addressing manually using CLI/management protocols. - ## Features and options + ### Node configuration -vr-xrv9k nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `clab` user and management interfaces such as NETCONF, SNMP, gNMI. + +Cisco XRv9k nodes come up with a basic configuration where only the control plane and line cards are provisioned, as well as the `clab` user and management interfaces such as NETCONF, SNMP, gNMI. #### Startup configuration + It is possible to make XRv9k nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: ```yaml topology: nodes: node: - kind: vr-xrv9k + kind: cisco_xrv9k startup-config: myconfig.txt ``` @@ -73,7 +77,7 @@ With this knob containerlab is instructed to take a file `myconfig.txt` from the Configuration is applied after the node is started, thus it can contain partial configuration snippets that you desire to add on top of the default config that a node boots up with. ## Lab examples -The following labs feature vr-xrv9k node: -- [SR Linux and Cisco XRv9k](../../lab-examples/vr-xrv9k.md) +The following labs feature Cisco XRv9k node: +* [SR Linux and Cisco XRv9k](../../lab-examples/vr-xrv9k.md) diff --git a/docs/manual/vrnetlab.md b/docs/manual/vrnetlab.md index 19b8db492..a9d9c0655 100644 --- a/docs/manual/vrnetlab.md +++ b/docs/manual/vrnetlab.md @@ -33,21 +33,28 @@ The following table provides a link between the version combinations: | ---------------- | ------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------- | | `0.10.4` | [`0.1.0-cl`](https://github.com/hellt/vrnetlab/tree/v0.1.0-cl) | Initial release. Images: sros, vmx, xrv, xrv9k | | `0.11.0` | [`0.2.0`](https://github.com/hellt/vrnetlab/tree/v0.2.0) | added [vr-veos](kinds/vr-veos.md), support for [boot-delay](#boot-delay), SR OS will have a static route to docker network, improved XRv startup chances | -| -- | [`0.2.1`](https://github.com/hellt/vrnetlab/tree/v0.2.1) | added timeout for SR OS images to allow eth interfaces to appear in the container namespace. Other images are not touched. | -| -- | [`0.2.2`](https://github.com/hellt/vrnetlab/tree/v0.2.2) | fixed serial (telnet) access to SR OS nodes | -| -- | [`0.2.3`](https://github.com/hellt/vrnetlab/tree/v0.2.3) | set default cpu/ram for SR OS images | -| `0.13.0` | [`0.3.0`](https://github.com/hellt/vrnetlab/tree/v0.3.0) | added support for Cisco CSR1000v via [`vr-csr`](kinds/vr-csr.md) and MikroTik routeros via [`vr-ros`](kinds/vr-ros.md) kind | -| -- | [`0.3.1`](https://github.com/hellt/vrnetlab/tree/v0.3.1) | enhanced SR OS boot sequence | -| -- | [`0.4.0`](https://github.com/hellt/vrnetlab/tree/v0.4.0) | fixed SR OS CPU allocation and added Palo Alto PAN support [`vr-pan`](kinds/vr-pan.md) | -| `0.16.0` | [`0.5.0`](https://github.com/hellt/vrnetlab/tree/v0.5.0) | added support for Cisco Nexus 9000v via [`vr-n9kv`](kinds/vr-n9kv.md) kind, added support for non-continuous interfaces provisioning | -| `0.19.0` | [`0.6.0`](https://github.com/hellt/vrnetlab/tree/v0.6.0) | added experimental support for Juniper vQFX via [`vr-vqfx`](kinds/vr-vqfx.md) kind, added support Dell FTOS via [`vr-ftosv`](kinds/vr-ftosv.md) | +| | [`0.2.1`](https://github.com/hellt/vrnetlab/tree/v0.2.1) | added timeout for SR OS images to allow eth interfaces to appear in the container namespace. Other images are not touched. | +| | [`0.2.2`](https://github.com/hellt/vrnetlab/tree/v0.2.2) | fixed serial (telnet) access to SR OS nodes | +| | [`0.2.3`](https://github.com/hellt/vrnetlab/tree/v0.2.3) | set default cpu/ram for SR OS images | +| `0.13.0` | [`0.3.0`](https://github.com/hellt/vrnetlab/tree/v0.3.0) | added support for Cisco CSR1000v via [`cisco_csr`](kinds/vr-csr.md) and MikroTik routeros via [`mikrotik_ros`](kinds/vr-ros.md) kind | +| | [`0.3.1`](https://github.com/hellt/vrnetlab/tree/v0.3.1) | enhanced SR OS boot sequence | +| | [`0.4.0`](https://github.com/hellt/vrnetlab/tree/v0.4.0) | fixed SR OS CPU allocation and added Palo Alto PAN support [`paloaltp_pan`](kinds/vr-pan.md) | +| `0.16.0` | [`0.5.0`](https://github.com/hellt/vrnetlab/tree/v0.5.0) | added support for Cisco Nexus 9000v via [`cisco_n9kv`](kinds/vr-n9kv.md) kind, added support for non-continuous interfaces provisioning | +| `0.19.0` | [`0.6.0`](https://github.com/hellt/vrnetlab/tree/v0.6.0) | added experimental support for Juniper vQFX via [`juniper_vqfx`](kinds/vr-vqfx.md) kind, added support Dell FTOS via [`dell_ftosv`](kinds/vr-ftosv.md) | | | [`0.6.2`](https://github.com/hellt/vrnetlab/tree/v0.6.2) | support for IPv6 management for SR OS; support for RouterOS v7+ | | | [`0.7.0`](https://github.com/hellt/vrnetlab/tree/v0.7.0) | startup-config support for vqfx and vmx | | `0.32.2` | [`0.8.0`](https://github.com/hellt/vrnetlab/releases/tag/v0.8.0) | startup-config support for the rest of the kinds, support for multi line card SR OS | | `0.34.0` | [`0.8.2`](https://github.com/hellt/vrnetlab/releases/tag/v0.8.2) | startup-config support for PANOS, ISA support for Nokia VSR-I and MGMT VRF for VMX | | | [`0.9.0`](https://github.com/hellt/vrnetlab/releases/tag/v0.9.0) | Support for IPInfusion OcNOS with vrnetlab | -| `0.41.0` | [`0.11.0`](https://github.com/hellt/vrnetlab/releases/tag/v0.11.0) | Added support for Juniper vSRX3.0 via [`vr-vsrx`](kinds/vr-vsrx.md) kind | -| `0.45.0` | [`0.12.0`](https://github.com/hellt/vrnetlab/releases/tag/v0.12.0) | Added support for Juniper vJunos-switch via [`vr-juniper_vjunosswitch`](kinds/vr-vjunosswitch.md) kind | +| `0.41.0` | [`0.11.0`](https://github.com/hellt/vrnetlab/releases/tag/v0.11.0) | Added support for Juniper vSRX3.0 via [`juniper_vsrx`](kinds/vr-vsrx.md) kind | +| `0.45.0` | [`0.12.0`](https://github.com/hellt/vrnetlab/releases/tag/v0.12.0) | Added support for Juniper vJunos-switch via [`juniper_vjunosswitch`](kinds/vr-vjunosswitch.md) kind | + +???note "how to understand version inter-dependency between containerlab and vrnetlab?" + When new VM-based platform support is added to vrnetlab, it is usually accompanied by a new containerlab version. In this case the table row will have both containerlab and vrnetlab versions. + When vrnetlab adds new features that don't require containerlab changes, the table will have only vrnetlab version. + When containerlab adds new features that don't require vrnetlab changes, the table will not list containerlab version. + + It is worth noting, that you can use the latest containerlab version with a given vrnetlab version, even if the table doesn't list the latest containerlab version. ### Building vrnetlab images @@ -76,20 +83,24 @@ To build a vrnetlab image compatible with containerlab, users first need to ensu The images that work with containerlab will appear in the supported list as we implement the necessary integration. -| Product | Kind | Demo lab | Notes | -| ----------------- | ----------------------------- | ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Nokia SR OS | [vr-sros](kinds/vr-sros.md) | [SRL & SR OS](../lab-examples/vr-sros.md) | When building SR OS vrnetlab image for use with containerlab, **do not** provide the license during the image build process. The license shall be provided in the containerlab topology definition file[^1]. | -| Juniper vMX | [vr-vmx](kinds/vr-vmx.md) | [SRL & vMX](../lab-examples/vr-vmx.md) | | -| Juniper vQFX | [vr-vqfx](kinds/vr-vqfx.md) | Coming soon | | -| Juniper vSRX | [vr-vsrx](kinds/vr-vsrx.md) | Coming soon | | -| Cisco XRv | [vr-xrv](kinds/vr-xrv.md) | [SRL & XRv](../lab-examples/vr-xrv.md) | | -| Cisco XRv9k | [vr-xrv9k](kinds/vr-xrv9k.md) | [SRL & XRv9k](../lab-examples/vr-xrv9k.md) | | -| Cisco CSR1000v | [vr-csr](kinds/vr-csr.md) | | | -| Arista vEOS | [vr-veos](kinds/vr-veos.md) | | | -| MikroTik RouterOS | [vr-ros](kinds/vr-ros.md) | | | -| Palo Alto PAN | [vr-pan](kinds/vr-pan.md) | | | -| Cisco Nexus 9000v | [vr-n9kv](kinds/vr-n9kv.md) | | | -| Dell FTOS10v | [vr-ftosv](kinds/vr-ftosv.md) | | | +| Product | Kind | Demo lab | Notes | +| ----------------- | -------------------------------- | ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Nokia SR OS | [nokia_sros](kinds/vr-sros.md) | [SRL & SR OS](../lab-examples/vr-sros.md) | When building SR OS vrnetlab image for use with containerlab, **do not** provide the license during the image build process. The license shall be provided in the containerlab topology definition file[^1]. | +| Juniper vMX | [juniper_vmx](kinds/vr-vmx.md) | [SRL & vMX](../lab-examples/vr-vmx.md) | | +| Juniper vQFX | [juniper_vqfx](kinds/vr-vqfx.md) | | | +| Juniper vSRX | [juniper_vsrx](kinds/vr-vsrx.md) | | | +| Juniper vJunos-Switch | [juniper_vjunosswitch](kinds/vr-vjunosswitch.md) | | | +| Cisco XRv | [cisco_xrv](kinds/vr-xrv.md) | [SRL & XRv](../lab-examples/vr-xrv.md) | | +| Cisco XRv9k | [cisco_xrv9k](kinds/vr-xrv9k.md) | [SRL & XRv9k](../lab-examples/vr-xrv9k.md) | | +| Cisco CSR1000v | [cisco_csr](kinds/vr-csr.md) | | | +| Cisco Nexus 9000v | [cisco_nexus9kv](kinds/vr-n9kv.md) | | | +| Arista vEOS | [arista_veos](kinds/vr-veos.md) | | | +| MikroTik RouterOS | [mikrotik_ros](kinds/vr-ros.md) | | | +| Palo Alto PAN | [paloalto_pan](kinds/vr-pan.md) | | | +| Dell FTOS10v | [dell_ftosv](kinds/vr-ftosv.md) | | | +| Aruba AOS-CX | [aruba_aoscx](kinds/vr-aoscx.md) | | | +| IPInfusion OcNOS | [ipinfusion_ocnos](kinds/ipinfusion-ocnos.md) | | | +| Checkpoint Cloudguard | [checkpoint_cloudguard](kinds/checkpoint_cloudguard.md) | | | ### Connection modes @@ -107,8 +118,8 @@ Containerlab offers several ways of connecting VM-based routers with the rest of topology: nodes: sr1: - kind: vr-sros - image: vrnetlab/vr-sros:20.10.R1 + kind: nokia_sros + image: vrnetlab/nokia_sros:20.10.R1 env: CONNECTION_MODE: bridge # use `ovs` for openvswitch datapath ``` @@ -126,12 +137,12 @@ name: bootdelay topology: nodes: sr1: - kind: vr-sros - image: vr-sros:21.2.R1 + kind: nokia_sros + image: nokia_sros:21.2.R1 license: license-sros21.txt sr2: - kind: vr-sros - image: vr-sros:21.2.R1 + kind: nokia_sros + image: nokia_sros:21.2.R1 license: license-sros21.txt env: # boot delay in seconds diff --git a/lab-examples/cert01/cert01.clab.yml b/lab-examples/cert01/cert01.clab.yml index 38950dc4e..4e11bed8c 100644 --- a/lab-examples/cert01/cert01.clab.yml +++ b/lab-examples/cert01/cert01.clab.yml @@ -2,6 +2,6 @@ name: cert01 topology: nodes: sr: - kind: vr-sros + kind: nokia_sros image: vrnetlab/vr-sros:21.2.R1 license: license-sros21.txt diff --git a/lab-examples/cvx01/topo.clab.yml b/lab-examples/cvx01/topo.clab.yml index c34c022b2..612b3aa07 100644 --- a/lab-examples/cvx01/topo.clab.yml +++ b/lab-examples/cvx01/topo.clab.yml @@ -3,7 +3,7 @@ name: cvx01 topology: nodes: sw1: - kind: cvx + kind: cumulus_cvx image: networkop/cx:4.3.0 binds: - sw1/interfaces:/etc/network/interfaces.d/host-mounts @@ -11,7 +11,7 @@ topology: kind: linux image: frrouting/frr:v7.5.1 binds: - - sw2/frr.conf:/etc/frr/frr.conf + - sw2/frr.conf:/etc/frr/frr.conf links: - - endpoints: ["sw1:swp12", "sw2:eth21"] \ No newline at end of file + - endpoints: ["sw1:swp12", "sw2:eth21"] diff --git a/lab-examples/cvx02/topo.clab.yml b/lab-examples/cvx02/topo.clab.yml index 85c25e0a7..5bc9f0e6d 100644 --- a/lab-examples/cvx02/topo.clab.yml +++ b/lab-examples/cvx02/topo.clab.yml @@ -3,7 +3,7 @@ name: cvx02 topology: nodes: sw1: - kind: cvx + kind: cumulus_cvx image: networkop/cx:4.3.0 runtime: docker binds: @@ -14,7 +14,7 @@ topology: image: networkop/host:ifreload binds: - h1/interfaces:/etc/network/interfaces - cmd: 2 # wait for 2 interfaces to be connected: eth0 + eth1 + cmd: "2" # wait for 2 interfaces to be connected: eth0 + eth1 links: - endpoints: ["sw1:swp12", "h1:eth1"] diff --git a/lab-examples/srl01/srl01.clab.yml b/lab-examples/srl01/srl01.clab.yml index 0af99249d..2696bc39f 100644 --- a/lab-examples/srl01/srl01.clab.yml +++ b/lab-examples/srl01/srl01.clab.yml @@ -3,9 +3,9 @@ name: srl01 topology: kinds: - srl: + nokia_srlinux: type: ixrd3 image: ghcr.io/nokia/srlinux nodes: srl: - kind: srl + kind: nokia_srlinux diff --git a/lab-examples/srl02/srl02.clab.yml b/lab-examples/srl02/srl02.clab.yml index 58836ce60..faec12b6e 100644 --- a/lab-examples/srl02/srl02.clab.yml +++ b/lab-examples/srl02/srl02.clab.yml @@ -4,11 +4,11 @@ name: srl02 topology: nodes: srl1: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux startup-config: srl1.cfg srl2: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux startup-config: srl2.cfg diff --git a/lab-examples/srl03/srl03.clab.yml b/lab-examples/srl03/srl03.clab.yml index 0c3e1fd7a..510a68bf4 100644 --- a/lab-examples/srl03/srl03.clab.yml +++ b/lab-examples/srl03/srl03.clab.yml @@ -2,20 +2,20 @@ name: srl03 topology: kinds: - srl: + nokia_srlinux: type: ixrd3 image: ghcr.io/nokia/srlinux linux: image: ghcr.io/hellt/network-multitool nodes: wan1: - kind: srl + kind: nokia_srlinux wan2: - kind: srl + kind: nokia_srlinux wan3: - kind: srl + kind: nokia_srlinux wan4: - kind: srl + kind: nokia_srlinux client1: kind: "linux" client2: diff --git a/lab-examples/srlceos01/srlceos01.clab.yml b/lab-examples/srlceos01/srlceos01.clab.yml index 15a28d2f2..1a8c9ee29 100644 --- a/lab-examples/srlceos01/srlceos01.clab.yml +++ b/lab-examples/srlceos01/srlceos01.clab.yml @@ -4,10 +4,10 @@ name: srlceos01 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux ceos: - kind: ceos + kind: arista_ceos image: ceos:4.25.0F links: diff --git a/lab-examples/srlcrpd01/srlcrpd01.clab.yml b/lab-examples/srlcrpd01/srlcrpd01.clab.yml index 956b85376..9a9b59372 100644 --- a/lab-examples/srlcrpd01/srlcrpd01.clab.yml +++ b/lab-examples/srlcrpd01/srlcrpd01.clab.yml @@ -3,10 +3,10 @@ name: srlcrpd01 topology: nodes: crpd: - kind: crpd + kind: juniper_crpd image: crpd:20.2R1.10 srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux links: diff --git a/lab-examples/srlfrr01/srlfrr01.clab.yml b/lab-examples/srlfrr01/srlfrr01.clab.yml index 4405d072c..c1c3f671e 100644 --- a/lab-examples/srlfrr01/srlfrr01.clab.yml +++ b/lab-examples/srlfrr01/srlfrr01.clab.yml @@ -3,7 +3,7 @@ name: srlfrr01 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux frr: kind: linux diff --git a/lab-examples/srlvjunos01/srlvjunos01.clab.yml b/lab-examples/srlvjunos01/srlvjunos01.clab.yml index 1e4f9c12b..fd17c52a2 100644 --- a/lab-examples/srlvjunos01/srlvjunos01.clab.yml +++ b/lab-examples/srlvjunos01/srlvjunos01.clab.yml @@ -3,12 +3,12 @@ name: srlvjunos01 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux:23.7.1 startup-config: srl.cli vswitch: - kind: vr-juniper_vjunosswitch + kind: juniper_vjunosswitch image: vrnetlab/vr-vjunosswitch:23.2R1.14 startup-config: vjunos.cfg diff --git a/lab-examples/templated01/templated01.clab.gotmpl b/lab-examples/templated01/templated01.clab.gotmpl index ef8971825..93c329555 100644 --- a/lab-examples/templated01/templated01.clab.gotmpl +++ b/lab-examples/templated01/templated01.clab.gotmpl @@ -2,9 +2,9 @@ name: templated01 topology: defaults: - kind: srl + kind: nokia_srlinux kinds: - srl: + nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: diff --git a/lab-examples/templated02/templated02.clab.gotmpl b/lab-examples/templated02/templated02.clab.gotmpl index 9cf8d4909..28ea1228a 100644 --- a/lab-examples/templated02/templated02.clab.gotmpl +++ b/lab-examples/templated02/templated02.clab.gotmpl @@ -2,9 +2,9 @@ name: templated02 topology: defaults: - kind: srl + kind: nokia_srlinux kinds: - srl: + nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: diff --git a/lab-examples/vr01/vr01.clab.yml b/lab-examples/vr01/vr01.clab.yml index 81c188c7f..c8d450762 100644 --- a/lab-examples/vr01/vr01.clab.yml +++ b/lab-examples/vr01/vr01.clab.yml @@ -3,13 +3,13 @@ name: vr01 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux sros: - kind: vr-sros - image: vr-sros:20.10.R1 + kind: nokia_sros + image: nokia_sros:20.10.R1 type: sr-1 - license: /opt/nokia/sros/license-sros20.txt # Not included in the lab, ask your Nokia representative + license: /opt/nokia/sros/license-sros20.txt # Not included in the lab, ask your Nokia representative links: - endpoints: ["srl:e1-1", "sros:eth1"] diff --git a/lab-examples/vr02/vr02.clab.yml b/lab-examples/vr02/vr02.clab.yml index 63bae6547..d1e83d036 100644 --- a/lab-examples/vr02/vr02.clab.yml +++ b/lab-examples/vr02/vr02.clab.yml @@ -3,10 +3,10 @@ name: vr02 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux vmx: - kind: vr-vmx + kind: juniper_vmx image: vrnetlab/vr-vmx:20.2R1.10 links: diff --git a/lab-examples/vr03/vr03.clab.yml b/lab-examples/vr03/vr03.clab.yml index 9028567e3..163dc49cf 100644 --- a/lab-examples/vr03/vr03.clab.yml +++ b/lab-examples/vr03/vr03.clab.yml @@ -3,10 +3,10 @@ name: vr03 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux xrv: - kind: vr-xrv + kind: cisco_xrv image: vrnetlab/vr-xrv:6.1.2 links: diff --git a/lab-examples/vr04/vr04.clab.yml b/lab-examples/vr04/vr04.clab.yml index c361cc710..f4aef844c 100644 --- a/lab-examples/vr04/vr04.clab.yml +++ b/lab-examples/vr04/vr04.clab.yml @@ -3,11 +3,11 @@ name: vr04 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux xrv9k: - kind: vr-xrv9k - image: vr-xrv:7.2.1 # do not forget to re-tag the image if needed + kind: cisco_xrv9k + image: vr-xrv9k:7.2.1 # do not forget to re-tag the image if needed links: - endpoints: ["srl:e1-1", "xrv9k:eth1"] diff --git a/lab-examples/vr05/sros4.clab.yml b/lab-examples/vr05/sros4.clab.yml index 9b98f752f..c108c8430 100644 --- a/lab-examples/vr05/sros4.clab.yml +++ b/lab-examples/vr05/sros4.clab.yml @@ -2,7 +2,7 @@ name: conf1 topology: defaults: - kind: vr-sros + kind: nokia_sros image: vrnetlab/vr-sros:21.2.R1 license: ~/license/sros.txt config: diff --git a/lab-examples/vr05/vr01.clab.yml b/lab-examples/vr05/vr01.clab.yml index 6c92cfb27..74d64041d 100644 --- a/lab-examples/vr05/vr01.clab.yml +++ b/lab-examples/vr05/vr01.clab.yml @@ -3,7 +3,7 @@ name: vr01 topology: nodes: srl: - kind: srl + kind: nokia_srlinux image: ghcr.io/nokia/srlinux config: vars: @@ -11,7 +11,7 @@ topology: isis_iid: 1 sid_idx: 11 sros: - kind: vr-sros + kind: nokia_sros image: vrnetlab/vr-sros:21.2.R1 type: sr-1 license: ~/license/sros.txt diff --git a/lab-examples/vsrx01/vsrx01.yml b/lab-examples/vsrx01/vsrx01.yml index 46b29c187..857f2bbc9 100644 --- a/lab-examples/vsrx01/vsrx01.yml +++ b/lab-examples/vsrx01/vsrx01.yml @@ -2,9 +2,9 @@ name: vsrx1 topology: nodes: srx1: - kind: vr-vsrx + kind: juniper_vsrx image: vrnetlab/vr-vsrx:23.2R1.13 - startup-config: srx1.txt + startup-config: srx1.txt client1: kind: "linux" image: wbitt/network-multitool:alpine-extra diff --git a/lab-examples/vxlan01/vxlan-sros.clab.yml b/lab-examples/vxlan01/vxlan-sros.clab.yml index 8f21d2c63..75356bb4d 100644 --- a/lab-examples/vxlan01/vxlan-sros.clab.yml +++ b/lab-examples/vxlan01/vxlan-sros.clab.yml @@ -3,7 +3,7 @@ name: vxlan topology: nodes: sros: - kind: vr-sros + kind: nokia_sros image: vr-sros:21.2.R1 license: license-sros20.txt # vmx node is defined in a `vxlan-vmx.clab.yml` topo file diff --git a/lab-examples/vxlan01/vxlan-vmx.clab.yml b/lab-examples/vxlan01/vxlan-vmx.clab.yml index 557dbe448..ba3fb950e 100644 --- a/lab-examples/vxlan01/vxlan-vmx.clab.yml +++ b/lab-examples/vxlan01/vxlan-vmx.clab.yml @@ -3,7 +3,7 @@ name: vxlan topology: nodes: vmx: - kind: vr-sros + kind: juniper_vmx image: vrnetlab/vr-vmx:20.4R1.12 # sros node is defined in a `vxlan-sros.clab.yml` topo file # that is launched on another VM diff --git a/nodes/vr_aoscx/vr-aoscx.go b/nodes/vr_aoscx/vr-aoscx.go index 0e1197237..80131b43c 100644 --- a/nodes/vr_aoscx/vr-aoscx.go +++ b/nodes/vr_aoscx/vr-aoscx.go @@ -11,7 +11,7 @@ import ( ) var ( - kindnames = []string{"vr-aoscx", "vr-aruba_aoscx"} + kindnames = []string{"aruba_aoscx", "vr-aoscx", "vr-aruba_aoscx"} defaultCredentials = nodes.NewCredentials("admin", "admin") ) diff --git a/nodes/vr_csr/vr-csr.go b/nodes/vr_csr/vr-csr.go index ae24f86c1..f6a00db21 100644 --- a/nodes/vr_csr/vr-csr.go +++ b/nodes/vr_csr/vr-csr.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-csr", "vr-cisco_csr1000v"} + kindnames = []string{"cisco_csr1000v", "vr-csr", "vr-cisco_csr1000v"} defaultCredentials = nodes.NewCredentials("admin", "admin") ) diff --git a/nodes/vr_ftosv/vr-ftosv.go b/nodes/vr_ftosv/vr-ftosv.go index 5f7047716..21f586aeb 100644 --- a/nodes/vr_ftosv/vr-ftosv.go +++ b/nodes/vr_ftosv/vr-ftosv.go @@ -15,7 +15,7 @@ import ( ) var ( - kindnames = []string{"vr-ftosv", "vr-dell_ftosv"} + kindnames = []string{"dell_ftosv", "vr-ftosv", "vr-dell_ftosv"} defaultCredentials = nodes.NewCredentials("admin", "admin") ) diff --git a/nodes/vr_n9kv/vr-n9kv.go b/nodes/vr_n9kv/vr-n9kv.go index 541f75179..a229526d5 100644 --- a/nodes/vr_n9kv/vr-n9kv.go +++ b/nodes/vr_n9kv/vr-n9kv.go @@ -16,7 +16,7 @@ import ( ) var ( - kindnames = []string{"vr-n9kv", "vr-cisco_n9kv"} + kindnames = []string{"cisco_n9kv", "vr-n9kv", "vr-cisco_n9kv"} defaultCredentials = nodes.NewCredentials("admin", "admin") ) diff --git a/nodes/vr_nxos/vr-nxos.go b/nodes/vr_nxos/vr-nxos.go deleted file mode 100644 index 6ee27010d..000000000 --- a/nodes/vr_nxos/vr-nxos.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2021 Nokia -// Licensed under the BSD 3-Clause License. -// SPDX-License-Identifier: BSD-3-Clause - -package vr_nxos - -import ( - "context" - "fmt" - "path" - - "github.com/srl-labs/containerlab/nodes" - "github.com/srl-labs/containerlab/types" - "github.com/srl-labs/containerlab/utils" -) - -var ( - kindnames = []string{"vr-nxos", "vr-cisco_nxos"} - defaultCredentials = nodes.NewCredentials("admin", "admin") -) - -const ( - configDirName = "config" - startupCfgFName = "startup-config.cfg" -) - -// Register registers the node in the NodeRegistry. -func Register(r *nodes.NodeRegistry) { - r.Register(kindnames, func() nodes.Node { - return new(vrNXOS) - }, defaultCredentials) -} - -type vrNXOS struct { - nodes.DefaultNode -} - -func (n *vrNXOS) Init(cfg *types.NodeConfig, opts ...nodes.NodeOption) error { - // Init DefaultNode - n.DefaultNode = *nodes.NewDefaultNode(n) - // set virtualization requirement - n.HostRequirements.VirtRequired = true - - n.Cfg = cfg - for _, o := range opts { - o(n) - } - // env vars are used to set launch.py arguments in vrnetlab container - defEnv := map[string]string{ - "USERNAME": defaultCredentials.GetUsername(), - "PASSWORD": defaultCredentials.GetPassword(), - "CONNECTION_MODE": nodes.VrDefConnMode, - "VCPU": "2", - "RAM": "4096", - "DOCKER_NET_V4_ADDR": n.Mgmt.IPv4Subnet, - "DOCKER_NET_V6_ADDR": n.Mgmt.IPv6Subnet, - } - n.Cfg.Env = utils.MergeStringMaps(defEnv, n.Cfg.Env) - - // mount config dir to support startup-config functionality - n.Cfg.Binds = append(n.Cfg.Binds, fmt.Sprint(path.Join(n.Cfg.LabDir, configDirName), ":/config")) - - n.Cfg.Cmd = fmt.Sprintf("--username %s --password %s --hostname %s --connection-mode %s --trace", - defaultCredentials.GetUsername(), defaultCredentials.GetPassword(), n.Cfg.ShortName, n.Cfg.Env["CONNECTION_MODE"]) - - return nil -} - -func (n *vrNXOS) PreDeploy(_ context.Context, params *nodes.PreDeployParams) error { - utils.CreateDirectory(n.Cfg.LabDir, 0777) - _, err := n.LoadOrGenerateCertificate(params.Cert, params.TopologyName) - if err != nil { - return nil - } - return nodes.LoadStartupConfigFileVr(n, configDirName, startupCfgFName) -} - -// CheckInterfaceName checks if a name of the interface referenced in the topology file correct. -func (n *vrNXOS) CheckInterfaceName() error { - return nodes.GenericVMInterfaceCheck(n.Cfg.ShortName, n.Endpoints) -} diff --git a/nodes/vr_pan/vr-pan.go b/nodes/vr_pan/vr-pan.go index 7d6b87807..aab613a5c 100644 --- a/nodes/vr_pan/vr-pan.go +++ b/nodes/vr_pan/vr-pan.go @@ -15,7 +15,7 @@ import ( ) var ( - kindnames = []string{"vr-pan", "vr-paloalto_panos"} + kindnames = []string{"paloalto_panos", "vr-pan", "vr-paloalto_panos"} defaultCredentials = nodes.NewCredentials("admin", "Admin@123") ) diff --git a/nodes/vr_ros/vr-ros.go b/nodes/vr_ros/vr-ros.go index a2ceb4fa6..800c7794e 100644 --- a/nodes/vr_ros/vr-ros.go +++ b/nodes/vr_ros/vr-ros.go @@ -15,7 +15,7 @@ import ( ) var ( - kindnames = []string{"vr-ros", "vr-mikrotik_ros"} + kindnames = []string{"mikrotik_ros", "vr-ros", "vr-mikrotik_ros"} defaultCredentials = nodes.NewCredentials("admin", "admin") ) diff --git a/nodes/vr_sros/vr-sros.go b/nodes/vr_sros/vr-sros.go index e254098db..0b8473659 100644 --- a/nodes/vr_sros/vr-sros.go +++ b/nodes/vr_sros/vr-sros.go @@ -28,7 +28,7 @@ import ( ) var ( - kindnames = []string{"vr-sros", "vr-nokia_sros"} + kindnames = []string{"nokia_sros", "vr-sros", "vr-nokia_sros"} defaultCredentials = nodes.NewCredentials("admin", "admin") ) diff --git a/nodes/vr_veos/vr-veos.go b/nodes/vr_veos/vr-veos.go index 51d7c2731..86df216dc 100644 --- a/nodes/vr_veos/vr-veos.go +++ b/nodes/vr_veos/vr-veos.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-veos", "vr-arista_veos"} + kindnames = []string{"arista_veos", "vr-veos", "vr-arista_veos"} defaultCredentials = nodes.NewCredentials("admin", "admin") ) diff --git a/nodes/vr_vjunosswitch/vr-vjunosswitch.go b/nodes/vr_vjunosswitch/vr-vjunosswitch.go index 7bfb818ba..2b3934ad3 100644 --- a/nodes/vr_vjunosswitch/vr-vjunosswitch.go +++ b/nodes/vr_vjunosswitch/vr-vjunosswitch.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-vjunosswitch", "vr-juniper_vjunosswitch"} + kindnames = []string{"juniper_vjunosswitch", "vr-vjunosswitch", "vr-juniper_vjunosswitch"} defaultCredentials = nodes.NewCredentials("admin", "admin@123") ) diff --git a/nodes/vr_vmx/vr-vmx.go b/nodes/vr_vmx/vr-vmx.go index 121fc4637..622b1a7cb 100644 --- a/nodes/vr_vmx/vr-vmx.go +++ b/nodes/vr_vmx/vr-vmx.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-vmx", "vr-juniper_vmx"} + kindnames = []string{"juniper_vmx", "vr-vmx", "vr-juniper_vmx"} defaultCredentials = nodes.NewCredentials("admin", "admin@123") ) diff --git a/nodes/vr_vqfx/vr-vqfx.go b/nodes/vr_vqfx/vr-vqfx.go index 7fba859b4..b02de9c1f 100644 --- a/nodes/vr_vqfx/vr-vqfx.go +++ b/nodes/vr_vqfx/vr-vqfx.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-vqfx", "vr-juniper_vqfx"} + kindnames = []string{"juniper_vqfx", "vr-vqfx", "vr-juniper_vqfx"} defaultCredentials = nodes.NewCredentials("admin", "admin@123") ) diff --git a/nodes/vr_vsrx/vr-vsrx.go b/nodes/vr_vsrx/vr-vsrx.go index 7a666471a..8edd507c2 100644 --- a/nodes/vr_vsrx/vr-vsrx.go +++ b/nodes/vr_vsrx/vr-vsrx.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-vsrx", "vr-juniper_vsrx"} + kindnames = []string{"juniper_vsrx", "vr-vsrx", "vr-juniper_vsrx"} defaultCredentials = nodes.NewCredentials("admin", "admin@123") ) diff --git a/nodes/vr_xrv/vr-xrv.go b/nodes/vr_xrv/vr-xrv.go index e2fe642f2..28acbce08 100644 --- a/nodes/vr_xrv/vr-xrv.go +++ b/nodes/vr_xrv/vr-xrv.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-xrv", "vr-cisco_xrv"} + kindnames = []string{"cisco_xrv", "vr-xrv", "vr-cisco_xrv"} defaultCredentials = nodes.NewCredentials("clab", "clab@123") ) diff --git a/nodes/vr_xrv9k/vr-xrv9k.go b/nodes/vr_xrv9k/vr-xrv9k.go index 0eceed924..1ca165e7d 100644 --- a/nodes/vr_xrv9k/vr-xrv9k.go +++ b/nodes/vr_xrv9k/vr-xrv9k.go @@ -17,7 +17,7 @@ import ( ) var ( - kindnames = []string{"vr-xrv9k", "vr-cisco_xrv9k"} + kindnames = []string{"cisco_xrv9k", "vr-xrv9k", "vr-cisco_xrv9k"} defaultCredentials = nodes.NewCredentials("clab", "clab@123") ) diff --git a/schemas/clab.schema.json b/schemas/clab.schema.json index 388d5886f..4ef7ceffe 100644 --- a/schemas/clab.schema.json +++ b/schemas/clab.schema.json @@ -48,35 +48,47 @@ "juniper_crpd", "sonic-vs", "vr-sros", + "nokia_sros", "vr-nokia_sros", "vr-vmx", "vr-juniper_vmx", + "juniper_vmx", "vr-vqfx", "vr-juniper_vqfx", + "juniper_vqfx", "vr-vsrx", "vr-juniper_vsrx", + "juniper_vsrx", "vr-vjunosswitch", "vr-juniper_vjunosswitch", + "juniper_vjunosswitch", "vr-xrv", "vr-cisco_xrv", + "cisco_xrv", "vr-xrv9k", "vr-cisco_xrv9k", - "vr-nxos", - "vr-cisco_nxos", + "cisco_xrv9k", "vr-veos", "vr-arista_veos", + "arista_veos", "vr-csr", "vr-cisco_csr", + "cisco_csr1000v", "vr-pan", "vr-paloalto_panos", + "paloalto_panos", "vr-ros", "vr-mikrotik_ros", + "mikrotik_ros", "vr-n9kv", "vr-cisco_n9kv", + "cisco_n9kv", "vr-ftosv", "vr-dell_ftosv", + "dell_ftosv", "vr-aoscx", "vr-aruba_aoscx", + "aruba_aoscx", "linux", "bridge", "ovs-bridge",