From 5527113e6a770db720771f37c9ecabf9f5a6917c Mon Sep 17 00:00:00 2001 From: Perumal Venkatesh Date: Wed, 6 Nov 2024 01:03:46 -0800 Subject: [PATCH 001/175] Add check to see if bgp sessions are up before proceeding with the tests (#15312) Description of PR Test currently doesn't check if the bgp sessions are all up before proceeding with the tests. Adding a check to make sure they are and then proceed with the tests. With this change we are seeing all the tests pass for chassis Approach What is the motivation for this PR? Test currently doesn't check if the bgp sessions are all up before proceeding with the tests. Adding a check to make sure they are and then proceed with the tests. With this change we are seeing all the tests pass for chassis How did you do it? How did you verify/test it? Any platform specific information? Validated on Cisco chassis ============================= test session starts ============================== platform linux -- Python 3.8.10, pytest-7.4.0, pluggy-1.5.0 ansible: 2.13.13 rootdir: /data/tests configfile: pytest.ini plugins: metadata-3.1.1, forked-1.6.0, html-4.1.1, repeat-0.9.3, xdist-1.28.0, allure-pytest-2.8.22, ansible-4.0.0 collected 4 items bgp/test_seq_idf_isolation.py::test_idf_isolated_no_export PASSED [ 25%] bgp/test_seq_idf_isolation.py::test_idf_isolated_withdraw_all PASSED [ 50%] bgp/test_seq_idf_isolation.py::test_idf_isolation_no_export_with_config_reload PASSED [ 75%] bgp/test_seq_idf_isolation.py::test_idf_isolation_withdraw_all_with_config_reload PASSED [100%] co-authorized by: jianquanye@microsoft.com --- tests/bgp/test_seq_idf_isolation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/bgp/test_seq_idf_isolation.py b/tests/bgp/test_seq_idf_isolation.py index bdaf5dac62f..9d236b0ecb2 100644 --- a/tests/bgp/test_seq_idf_isolation.py +++ b/tests/bgp/test_seq_idf_isolation.py @@ -210,7 +210,7 @@ def test_idf_isolation_no_export_with_config_reload(rand_one_downlink_duthost, # Issue command to isolate with no export community on DUT duthost.shell("sudo idf_isolation isolated_no_export") duthost.shell('sudo config save -y') - config_reload(duthost, safe_reload=True, check_intf_up_ports=True) + config_reload(duthost, safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) # Verify DUT is in isolated-no-export state. pytest_assert(IDF_ISOLATED_NO_EXPORT == get_idf_isolation_state(duthost), @@ -235,7 +235,7 @@ def test_idf_isolation_no_export_with_config_reload(rand_one_downlink_duthost, """ duthost.shell("sudo idf_isolation unisolated") duthost.shell('sudo config save -y') - config_reload(duthost, safe_reload=True, check_intf_up_ports=True) + config_reload(duthost, safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) pytest_assert(IDF_UNISOLATED == get_idf_isolation_state(duthost), "DUT is not isolated_no_export state") @@ -276,7 +276,7 @@ def test_idf_isolation_withdraw_all_with_config_reload(duthosts, rand_one_downli # Issue command to isolate with no export community on DUT duthost.shell("sudo idf_isolation isolated_withdraw_all") duthost.shell('sudo config save -y') - config_reload(duthost, safe_reload=True, check_intf_up_ports=True) + config_reload(duthost, safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) # Verify DUT is in isolated-withdraw-all state. pytest_assert(IDF_ISOLATED_WITHDRAW_ALL == get_idf_isolation_state(duthost), From e45414e8d07827a27bf4173744b6ca73d6257d55 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Wed, 6 Nov 2024 18:32:28 +0800 Subject: [PATCH 002/175] [m0][mx] Skip check pfcwd during config reload for m0/mx (#15364) What is the motivation for this PR? pfcwd is not enabled in m0/mx, hence skip check it when config reload How did you do it? Skip check pfcwd when config reload on m0/mx How did you verify/test it? Run tests --- tests/common/config_reload.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/common/config_reload.py b/tests/common/config_reload.py index ae43156288f..0b0fe7c2768 100644 --- a/tests/common/config_reload.py +++ b/tests/common/config_reload.py @@ -108,7 +108,8 @@ def config_reload_minigraph_with_rendered_golden_config_override( def pfcwd_feature_enabled(duthost): device_metadata = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']['DEVICE_METADATA'] pfc_status = device_metadata['localhost']["default_pfcwd_status"] - return pfc_status == 'enable' + switch_role = device_metadata['localhost'].get('type', '') + return pfc_status == 'enable' and switch_role not in ['MgmtToRRouter', 'BmcMgmtToRRouter'] @ignore_loganalyzer From e2f3201cc3ec3066d45ca0a11e9327a477f79168 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Wed, 6 Nov 2024 18:32:59 +0800 Subject: [PATCH 003/175] [m0][mx] Skip queue related tests for m0/mx (#15363) What is the motivation for this PR? Queue related tests are not expected to run in M0/MX topos How did you do it? Skip those cases in M0/MX How did you verify/test it? Run tests --- .../conditional_mark/tests_mark_conditions.yaml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index aeb3773a23a..d3ab72aad49 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1740,10 +1740,11 @@ snmp/test_snmp_queue.py: snmp/test_snmp_queue_counters.py: skip: - reason: "Have an known issue on kvm testbed" + conditions_logical_operator: OR + reason: "Have an known issue on kvm testbed / Unsupported in MGFX topos" conditions: - - asic_type in ['vs'] - - https://github.com/sonic-net/sonic-mgmt/issues/14007 + - "asic_type in ['vs'] and https://github.com/sonic-net/sonic-mgmt/issues/14007" + - "topo_type in ['m0', 'mx']" ####################################### ##### span ##### @@ -1888,9 +1889,11 @@ telemetry/test_telemetry.py: telemetry/test_telemetry.py::test_telemetry_queue_buffer_cnt: skip: - reason: "Testcase ignored due to switch type is voq" + conditions_logical_operator: or + reason: "Testcase ignored due to switch type is voq / Unsupported in MGFX topos" conditions: - "(switch_type=='voq')" + - "topo_type in ['m0', 'mx']" ####################################### ##### pktgen ##### From 8311c4720292ba183288bbbabef451969bcd0b52 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Wed, 6 Nov 2024 18:33:34 +0800 Subject: [PATCH 004/175] [drop_packets] Skip drop_packets on MGFX (#15341) What is the motivation for this PR? drop_packtes tests were skipped for MGFX by folder previously. Now it is broken by this change #14395 Hence update condition mark How did you do it? Update condition mark How did you verify/test it? Run tests --- .../tests_mark_conditions_drop_packets.yaml | 52 ++++++++++++++----- 1 file changed, 38 insertions(+), 14 deletions(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml index b1bd6251861..222014ab10b 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_drop_packets.yaml @@ -5,11 +5,12 @@ #Hence, it is not dropped by default in Cisco-8000. For dropping link local address, it should be done through security/DATA ACL drop_packets/test_configurable_drop_counters.py::test_dip_link_local: skip: - reason: "Cisco 8000 platform and some mlx platforms does not drop DIP link local packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform and some mlx platforms does not drop DIP link local packets" conditions_logical_operator: or conditions: - "'Mellanox' in hwsku" - asic_type=='cisco-8000' + - "topo_type in ['m0', 'mx']" drop_packets/test_configurable_drop_counters.py::test_neighbor_link_down: skip: @@ -19,31 +20,38 @@ drop_packets/test_configurable_drop_counters.py::test_neighbor_link_down: drop_packets/test_configurable_drop_counters.py::test_sip_link_local: skip: - reason: "Cisco 8000 platform and some MLX platforms does not drop SIP link local packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform and some MLX platforms does not drop SIP link local packets" conditions_logical_operator: or conditions: - asic_type=="cisco-8000" - "'Mellanox' in hwsku" + - "topo_type in ['m0', 'mx']" ####################################### ##### test_drop_counters.py ##### ####################################### drop_packets/test_drop_counters.py::test_absent_ip_header: skip: - reason: "Test case not supported on Broadcom DNX platform" + reason: "Test case not supported on Broadcom DNX platform and MGFX topos" + conditions_logical_operator: or conditions: - "asic_subtype in ['broadcom-dnx']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_acl_egress_drop: skip: - reason: "Not supported on Broadcom platforms" + reason: "Not supported on Broadcom platforms and MGFX topos" + conditions_logical_operator: or conditions: - "asic_type in ['broadcom']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_dst_ip_absent: skip: - reason: "Test case not supported on Broadcom DNX platform and Cisco 8000 platform" + reason: "Test case not supported on Broadcom DNX platform and Cisco 8000 platform and MGFX topos" + conditions_logical_operator: or conditions: - "asic_subtype in ['broadcom-dnx'] or asic_type in ['cisco-8000']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_dst_ip_absent[vlan_members]: skip: @@ -55,9 +63,11 @@ drop_packets/test_drop_counters.py::test_dst_ip_absent[vlan_members]: drop_packets/test_drop_counters.py::test_dst_ip_is_loopback_addr: skip: - reason: "Cisco 8000 platform does not drop DIP loopback packets. Test also not supported on Broadcom DNX" + reason: "Cisco 8000 platform does not drop DIP loopback packets. Test also not supported on Broadcom DNX and MGFX topos" + conditions_logical_operator: or conditions: - "(asic_type=='cisco-8000') or (asic_subtype in ['broadcom-dnx'])" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_dst_ip_is_loopback_addr[vlan_members]: skip: @@ -69,23 +79,28 @@ drop_packets/test_drop_counters.py::test_dst_ip_is_loopback_addr[vlan_members]: drop_packets/test_drop_counters.py::test_dst_ip_link_local: skip: - reason: "Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop DIP linklocal packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop DIP linklocal packets" conditions_logical_operator: or conditions: - "(asic_type=='cisco-8000') or (asic_subtype in ['broadcom-dnx'])" - "'Mellanox' in hwsku" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_equal_smac_dmac_drop: skip: - reason: "Drop not enabled on chassis since internal traffic uses same smac & dmac" + conditions_logical_operator: or + reason: "MGFX topos doesn't support drop packets / Drop not enabled on chassis since internal traffic uses same smac & dmac" conditions: - "asic_subtype in ['broadcom-dnx']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_ip_is_zero_addr: skip: - reason: "Cisco 8000 platform does not drop packets with 0.0.0.0 source or destination IP address" + conditions_logical_operator: or + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform does not drop packets with 0.0.0.0 source or destination IP address" conditions: - "asic_type=='cisco-8000'" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_ip_is_zero_addr[vlan_members-ipv4-dst]: skip: @@ -128,9 +143,11 @@ drop_packets/test_drop_counters.py::test_ip_is_zero_addr[vlan_members-ipv6-src]: drop_packets/test_drop_counters.py::test_ip_pkt_with_expired_ttl: skip: - reason: "Not supported on Mellanox devices" + reason: "Not supported on Mellanox devices and MGFX topos" + conditions_logical_operator: or conditions: - "asic_type in ['mellanox']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_loopback_filter: # Test case is skipped, because SONiC does not have a control to adjust loop-back filter settings. @@ -143,9 +160,11 @@ drop_packets/test_drop_counters.py::test_loopback_filter: drop_packets/test_drop_counters.py::test_no_egress_drop_on_down_link: skip: - reason: "VS platform do not support fanout configuration" + reason: "MGFX topos doesn't support drop packets / VS platform do not support fanout configuration" + conditions_logical_operator: or conditions: - "asic_type in ['vs']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_not_expected_vlan_tag_drop[vlan_members]: skip: @@ -163,15 +182,19 @@ drop_packets/test_drop_counters.py::test_not_expected_vlan_tag_drop[vlan_members drop_packets/test_drop_counters.py::test_src_ip_is_class_e: skip: - reason: "Cisco 8000 platform does not drop packets with source IP address in class E" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 platform does not drop packets with source IP address in class E" + conditions_logical_operator: or conditions: - "asic_type=='cisco-8000'" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_src_ip_is_loopback_addr: skip: - reason: "Test currently not supported on broadcom DNX platform" + conditions_logical_operator: or + reason: "MGFX topos doesn't support drop packets / Test currently not supported on broadcom DNX platform" conditions: - "asic_subtype in ['broadcom-dnx']" + - "topo_type in ['m0', 'mx']" drop_packets/test_drop_counters.py::test_src_ip_is_loopback_addr[vlan_members]: skip: @@ -199,8 +222,9 @@ drop_packets/test_drop_counters.py::test_src_ip_is_multicast_addr[vlan_members-i drop_packets/test_drop_counters.py::test_src_ip_link_local: skip: - reason: "Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop SIP linklocal packets" + reason: "MGFX topos doesn't support drop packets / Cisco 8000 broadcom DNX platforms and some MLX platforms do not drop SIP linklocal packets" conditions_logical_operator: or conditions: - "(asic_type=='cisco-8000') or (asic_subtype in ['broadcom-dnx'])" - "'Mellanox' in hwsku" + - "topo_type in ['m0', 'mx']" From f3cf75c46e8104b7fd14ca4e53b891665b33e698 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:34:06 +0800 Subject: [PATCH 005/175] Add some dataplane test to onboarding PR checker list (#15372) What is the motivation for this PR? Elastictest performs well in distribute running PR test in multiple KVMs, which support us to add more test scripts to PR checker. But some traffic test using ptfadapter can't be tested on KVM platform, we need to skip traffic test if needed How did you do it? Add some t0/t1 dataplane test to onboarding test job How did you verify/test it? Co-authored-by: xwjiang2021 <96218837+xwjiang2021@users.noreply.github.com> --- .azure-pipelines/pr_test_scripts.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index b00c9c51f9f..3d48dfbef25 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -461,10 +461,16 @@ onboarding_t0: - lldp/test_lldp_syncd.py # Flaky, we will triage and fix it later, move to onboarding to unblock pr check - dhcp_relay/test_dhcp_relay_stress.py + - arp/test_arp_update.py + - decap/test_subnet_decap.py + - fdb/test_fdb_mac_learning.py + - ip/test_mgmt_ipv6_only.py onboarding_t1: - lldp/test_lldp_syncd.py + - mpls/test_mpls.py + - vxlan/test_vxlan_route_advertisement.py specific_param: From 2aa8281705d18f2a3e13df97c1343f5c59aa3a10 Mon Sep 17 00:00:00 2001 From: Wenda Chu <32250288+w1nda@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:44:52 +0800 Subject: [PATCH 006/175] [wol_test] Add udp related tests into wol test plan (#15321) We extended wol tool to support sending magic pattern in udp paylod, and we need to add related tests into test plan. Related PRs: sonic-net/SONiC#1827, sonic-net/sonic-buildimage#20523 What is the motivation for this PR? We extended wol tool to support sending magic pattern in udp paylod, and we need to add related tests into test plan. How did you do it? Add udp related tests and give some demo commands. --- docs/testplan/WoL-test-plan.md | 38 +++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/docs/testplan/WoL-test-plan.md b/docs/testplan/WoL-test-plan.md index 5e48160dd48..b2fc70b8244 100644 --- a/docs/testplan/WoL-test-plan.md +++ b/docs/testplan/WoL-test-plan.md @@ -32,13 +32,19 @@ The test will issue `wol` commands with various parameter combinations on DUT, t #### Test case #1 - Verrify send a wol packet to a specific interface 1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. -1. Issue command on DUT host: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 255.255.255.255`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234`) 1. Stop `tcpdump` process in PTF. 1. Check if only one wol packet exists in `.pcap` file and the content is expected. #### Test case #2 - Verify send a wol packekt to each member of a vlan 1. Start multiple `tcpdump` processes in PTF to capture WoL packet on each interfaces. Save the captured packets to different `.pcap` files. -1. Issue command on DUT host: `wol `. (e.g., `wol Vlan1000 00:11:22:33:44:55`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol `. (e.g., `wol Vlan1000 00:11:22:33:44:55`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 255.255.255.255`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234`) 1. Stop all `tcpdump` processes in PTF. 1. *For each interface in vlan*, check if one wol packet exists in corresponding `.pcap` file and the content is expected. 1. *For each interface not in vlan*, check no wol packet exists in corresponding `.pcap` file. @@ -51,21 +57,43 @@ The test will issue `wol` commands with various parameter combinations on DUT, t #### Test case #4 - Verify send a wol packet with password 1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. -1. Issue command on DUT host: `wol -p ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -p 192.168.1.1`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol -p ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -p 192.168.1.1`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 255.255.255.255` -p 11:22:33:44:55:66`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234 -p 192.168.123.123`) 1. Stop `tcpdump` process in PTF. 1. Check if only one wol packet exists in `.pcap` file and the content is expected. Especially, verify the password in wol packet is same as command. #### Test case #5 - Verify send multiple wol packets with specific interval to a specific interface 1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. -1. Issue command on DUT host: `wol -c -i ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -c 3 -i 2000`) +1. Issue command on DUT host: + 1. Send magic pattern in ethernet payload: `wol -c -i ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -c 3 -i 2000`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 255.255.255.255 -c 4 -i 1000`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port1234 -c 5 -i 1500`) 1. Stop `tcpdump` process in PTF. 1. Check if exact `` wol packets exist in `.pcap` file and the content is expected. Moreover, check the time interval between each wol packet in `.pcap` file is ALMOST SAME[^1] as input ``. #### Test case #6 - Verify send multiple wol packets with specific interval to each membor of a vlan 1. Start multiple `tcpdump` processes in PTF to capture WoL packet on each interfaces. Save the captured packets to different `.pcap` files. -1. Issue command on DUT host: `wol -c -i ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -c 3 -i 2000`) +1. Issue command on DUT host: + 1. `wol -c -i ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -c 3 -i 2000`) + 1. Send magic pattern in udp payload with ipv4 address: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 255.255.255.255 -c 4 -i 1000`) + 1. Send magic pattern in udp payload with ipv6 address and a specific udp_port: `wol ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -u --ip-address 2404:f801:10::ffff::ffff:ffff --udp-port 1234 -c 5 -i 1500`) 1. Stop `tcpdump` process in PTF. 1. *For each interface in vlan*, check if exact `` wol packets exist in `.pcap` file and the content is expected. Moreover, check the time interval between each wol packet in `.pcap` file is ALMOST SAME[^1] as input ``. 1. *For each interface not in vlan*, check no wol packet exists in corresponding `.pcap` file. +#### Test case #7 - Verify constrain of parameters +1. Make sure count and interval both exist or not. +1. Make sure udp flag is required when using ip address or udp port. +1. Make sure udp flag is conflict with mac broadcast flag. + +#### Test case #8 - Verify parameters can be set correctly by CLI +1. Make sure interface that receving packet and command line parameter interface are same. +1. Make sure target_mac in payload and command line parameter target_mac are same. +1. Make sure ip address in header and command line parameter ip_address are same. (Test both ipv4 and ipv6 address with broadcase address or unicast address on VLAN interface or port interface, so there should be 8 combinations, maybe we can leverage pytest parametrize mark to realize that.) +1. Make sure when command line parameter ip_address is empty, ip address in header is default value: 255.255.255.255. +1. Make sure udp port in header and command line parameter udp port are same. +1. Make sure when command line parameter udp_port is empty, udp port in header is default value: 9. + [^1]: ALMOST SAME means we should tolerate small errors caused by electrical characteristics. From 5d15b8fc9c4d0f6a00cf98b1d45d6ba7d42fd6db Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Wed, 6 Nov 2024 09:15:57 -0800 Subject: [PATCH 007/175] Stabilize pfcwd warm-reboot test on Mellanox platform (#15314) --- tests/pfcwd/conftest.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py index fc6e3086119..2c5592bbcde 100644 --- a/tests/pfcwd/conftest.py +++ b/tests/pfcwd/conftest.py @@ -35,7 +35,7 @@ def pytest_addoption(parser): @pytest.fixture(scope="module") -def two_queues(request): +def two_queues(request, duthosts, enum_rand_one_per_hwsku_frontend_hostname, fanouthosts): """ Enable/Disable sending traffic to queues [4, 3] By default send to queue 4 @@ -48,6 +48,14 @@ def two_queues(request): Returns: two_queues: False/True """ + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + dut_asic_type = duthost.facts["asic_type"].lower() + # On Mellanox devices, if the leaf-fanout is running EOS, then only one queue is supported + if dut_asic_type == "mellanox": + for fanouthost in list(fanouthosts.values()): + fanout_os = fanouthost.get_fanout_os() + if fanout_os == 'eos': + return False return request.config.getoption('--two-queues') From e3e732a404b422a7a22e9261f244150b9d3fc0d9 Mon Sep 17 00:00:00 2001 From: nissampa <99767762+nissampa@users.noreply.github.com> Date: Wed, 6 Nov 2024 10:45:30 -0800 Subject: [PATCH 008/175] Dpu test plan phase 2 (#14773) * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md * Update Smartswitch-test-plan.md --- docs/testplan/Smartswitch-test-plan.md | 776 +++++++++++++++++++++++-- 1 file changed, 739 insertions(+), 37 deletions(-) diff --git a/docs/testplan/Smartswitch-test-plan.md b/docs/testplan/Smartswitch-test-plan.md index 3083f9f8a77..3e6d55028d6 100644 --- a/docs/testplan/Smartswitch-test-plan.md +++ b/docs/testplan/Smartswitch-test-plan.md @@ -2,6 +2,8 @@ - [Introduction](#introduction) - [Scope](#scope) +- [Testbed and Version](#testbed-and-version) +- [Topology](#topology) - [Definitions and Abbreviations](#definitions-and-abbreviations) - [Objectives of CLI Test Cases](#objectives-of-cli-test-cases) - [CLI Test Cases](#cli-test-cases) @@ -9,14 +11,23 @@ - [1.2 Check platform voltage](#12-check-platform-voltage) - [1.3 Check platform temperature](#13-check-platform-temperature) - [1.4 Check DPU console](#14-check-DPU-console) - - [1.5 Check midplane ip address between NPU and DPU](#15-check-midplane-ip-address-between-npu-and-dpu) + - [1.5 Check midplane ip address between Switch and DPU](#15-check-midplane-ip-address-between-switch-and-dpu) - [1.6 Check DPU shutdown and power up individually](#16-check-DPU-shutdown-and-power-up-individually) - - [1.7 Check removal of pcie link between NPU and DPU](#17-check-removal-of-pcie-link-between-npu-and-dpu) - - [1.8 Check the NTP date and timezone between DPU and NPU](#18-check-the-ntp-date-and-timezone-between-dpu-and-npu) + - [1.7 Check pcie link status between Switch and DPU](#17-check-pcie-link-status-between-switch-and-dpu) + - [1.8 Check the NTP date and timezone between DPU and Switch](#18-check-the-ntp-date-and-timezone-between-dpu-and-switch) - [1.9 Check the State of DPUs](#19-check-the-state-of-dpus) - [1.10 Check the Health of DPUs](#110-check-the-health-of-dpus) - [1.11 Check reboot cause history](#111-check-reboot-cause-history) - - [1.12 Check the DPU state after OS reboot](#112-check-the-dpu-state-after-os-reboot) + - [1.12 Check the DPU state after Switch reboot](#112-check-the-dpu-state-after-switch-reboot) + - [1.13 Check memory on DPU](#113-check-memory-on-dpu) + - [1.14 Check DPU status and pcie Link after memory exhaustion on Switch](#114-check-dpu-status-and-pcie-link-after-memory-exhaustion-on-switch) + - [1.15 Check DPU status and pcie Link after memory exhaustion on DPU](#115-check-dpu-status-and-pcie-link-after-memory-exhaustion-on-dpu) + - [1.16 Check DPU status and pcie Link after restart pmon on Switch](#116-check-dpu-status-and-pcie-link-after-restart-pmon-on-switch) + - [1.17 Check DPU status and pcie Link after reload of configuration on Switch](#117-check-dpu-status-and-pcie-link-after-reload-of-configuration-on-switch) + - [1.18 Check DPU status and pcie Link after kernel panic on Switch](#118-check-dpu-status-and-pcie-link-after-kernel-panic-on-switch) + - [1.19 Check DPU status and pcie Link after kernel panic on DPU](#119-check-dpu-status-and-pcie-link-after-kernel-panic-on-dpu) + - [1.20 Check DPU status and pcie Link after switch power cycle](#120-check-dpu-status-and-pcie-link-after-switch-power-cycle) + - [1.21 Check DPU status and pcie Link after SW trip by temperature trigger](#121-check-dpu-status-and-pcie-link-after-sw-trip-by-temperature-trigger) - [Objectives of API Test Cases](#objectives-of-api-test-cases) - [API Test Cases](#api-test-cases) - [1.1 Check SmartSwitch specific ChassisClass APIs](#11-check-smartswitch-specific-chassisclass-apis) @@ -36,6 +47,19 @@ Purpose of the test is to verify smartswich platform related functionalities/fea For every test cases, all DPUs need to be powered on unless specified in any of the case. General convention of DPU0, DPU1, DPU2 and DPUX has been followed to represent DPU modules and the number of DPU modules can vary. +## Testbed and Version + +The test runs on the os versions 202411 and above. +Add a check to confirm that the test environment uses version 202411 or later; if the version is earlier, skip the test. +After the above check, it needs to check DPUs in the testbed are in dark mode or not. +If it is in dark mode, then power up all the DPUs. +Dark mode is one in which all the DPUs admin_status are down. + +## Topology + +New topology called smartswitch-t1 has been added for running smartswitch cases. +T1 cases also runs on the new topology. + ## Definitions and Abbreviations | **Term** | **Meaning** | @@ -54,14 +78,23 @@ General convention of DPU0, DPU1, DPU2 and DPUX has been followed to represent D | 1.2 | Check platform voltage | To verify the Voltage sensor values and and functionality of alarm by changing the threshold values | | | 1.3 | Check platform temperature | To Verify the Temperature sensor values and functionality of alarm by changing the threshold values | | | 1.4 | Check DPU console | To Verify console access for all DPUs | | -| 1.5 | Check midplane ip address between NPU and DPU | To Verify PCIe interface created between NPU and DPU according to bus number | | +| 1.5 | Check midplane ip address between Switch and DPU | To Verify PCIe interface created between Switch and DPU according to bus number | | | 1.6 | Check DPU shutdown and power up individually | To Verify DPU shutdown and DPUs power up | | -| 1.7 | Check removal of pcie link between NPU and DPU | To Verify the PCie hot plug functinality | | -| 1.8 | Check the NTP date and timezone between DPU and NPU | To Verify NPU and DPU are in sync with respect to timezone and logs timestamp | | +| 1.7 | Check pcie link status between Switch and DPU | To Verify the PCie hot plug functinality | | +| 1.8 | Check the NTP date and timezone between DPU and Switch | To Verify Switch and DPU are in sync with respect to timezone and logs timestamp | | | 1.9 | Check the State of DPUs | To Verify DPU state details during online and offline | | | 1.10 | Check the Health of DPUs | To Verify overall health (LED, process, docker, services and hw) of DPU | Phase:2 | | 1.11 | Check reboot cause history | To Verify reboot cause history cli | | | 1.12 | Check the DPU state after OS reboot | To Verify DPU state on host reboot | | +| 1.13 | Check memory on DPU | To verify Memory and its threshold on all the DPUs | +| 1.14 | Check DPU status and pcie Link after memory exhaustion on Switch | To verify dpu status and connectivity after memory exhaustion on Switch | +| 1.15 | Check DPU status and pcie Link after memory exhaustion on DPU | To verify dpu status and connectivity after memory exhaustion on DPU | +| 1.16 | Check DPU status and pcie Link after restart pmon on Switch | To verify dpu status and connectivity after restart of pmon on NPU | +| 1.17 | Check DPU status and pcie Link after reload of configuration on Switch | To verify dpu status and connectivity after reload of configuration on Switch | +| 1.18 | Check DPU status and pcie Link after kernel panic on Switch| To verify dpu status and connectivity after Kernel Panic on Switch | +| 1.19 | Check DPU status and pcie Link after kernel panic on DPU | To verify dpu status and connectivity after Kernel Panic on DPU | +| 1.20 | Check DPU status and pcie Link after switch power cycle | To verify dpu status and connectivity after switch power cycle | +| 1.21 | Check DPU status and pcie Link after SW trip by temperature trigger | To verify dpu status and connectivity after SW trip by temperature trigger | ## CLI Test Cases @@ -69,7 +102,7 @@ General convention of DPU0, DPU1, DPU2 and DPUX has been followed to represent D ### 1.1 Check DPU Status #### Steps - * Use command `show chassis modules status` to get DPU status + * Use command on Switch: `show chassis modules status` to get DPU status * Get the number of DPU modules from ansible inventory file for the testbed. #### Verify in @@ -93,7 +126,7 @@ root@sonic:/home/cisco# show chassis modules status ### 1.2 Check platform voltage #### Steps - * Use command `show platform voltage` to get platform voltage + * Use command on Switch: `show platform voltage` to get platform voltage #### Verify in * Switch @@ -197,7 +230,7 @@ root@sonic:/home/cisco# ### 1.3 Check platform temperature #### Steps - * Use command `show platform temperature` to get platform temperature + * Use command on Switch: `show platform temperature` to get platform temperature #### Verify in * Switch @@ -318,7 +351,7 @@ root@sonic:/home/cisco# * cntrl+a and then cntrl+x to come out of the DPU console. -### 1.5 Check midplane ip address between NPU and DPU +### 1.5 Check midplane ip address between Switch and DPU #### Steps * Get the number of DPU modules from from ansible inventory file for the testbed. @@ -347,10 +380,10 @@ root@sonic:/home/cisco# #### Steps * Get the number of DPU modules from Ansible inventory file for the testbed. - * Use command `config chassis modules shutdown ` to shut down individual DPU - * Use command `show chassis modules status` to show DPU status - * Use command `config chassis modules startup ` to power up individual DPU - * Use command `show chassis modules status` to show DPU status + * Use command on Switch: `config chassis modules shutdown ` to shut down individual DPU + * Use command on Switch: `show chassis modules status` to show DPU status + * Use command on Switch: `config chassis modules startup ` to power up individual DPU + * Use command on Switch: `show chassis modules status` to show DPU status #### Verify in * Switch @@ -384,14 +417,14 @@ root@sonic:/home/cisco# show chassis modules status * Verify DPU is shown in show chassis modules status after DPU powered on -### 1.7 Check removal of pcie link between NPU and DPU +### 1.7 Check pcie link status between Switch and DPU #### Steps - * Use `show platform pcieinfo -c` to run the pcie info test to check everything is passing - * Use command `config chassis modules shutdown DPU` to bring down the dpu (This will bring down the pcie link between npu and dpu) - * Use `show platform pcieinfo -c` to run the pcie info test to check pcie link has been removed - * Use command `config chassis modules startup DPU` to bring up the dpu (This will rescan pcie links) - * Use `show platform pcieinfo -c` to run the pcie info test to check everything is passing + * Use command on Switch: `show platform pcieinfo -c` to run the pcie info test to check everything is passing + * Use command on Switch: `config chassis modules shutdown DPU` to bring down the dpu (This will bring down the pcie link between npu and dpu) + * Use command on Switch: `show platform pcieinfo -c` to run the pcie info test to check pcie link has been removed + * Use command on Switch: `config chassis modules startup DPU` to bring up the dpu (This will rescan pcie links) + * Use command on Switch: `show platform pcieinfo -c` to run the pcie info test to check everything is passing * This test is to check the PCie hot plug functinality since there is no OIR possible #### Verify in @@ -404,9 +437,9 @@ On Switch: Showing example of one DPU pcie link root@sonic:/home/cisco# show platform pcieinfo -c -root@sonic:/home/cisco# echo 1 > /sys/bus/pci/devices/0000:1a:00.0/remove +root@sonic:/home/cisco# config chassis modules shutdown DPU root@sonic:/home/cisco# -root@sonic:/home/cisco# echo 1 > /sys/bus/pci/rescan +root@sonic:/home/cisco# config chassis modules startup DPU root@sonic:/home/cisco# root@sonic:/home/cisco# show platform pcieinfo -c @@ -416,12 +449,12 @@ root@sonic:/home/cisco# show platform pcieinfo -c * Verify pcieinfo test pass for all after bringing back up the link -### 1.8 Check the NTP date and timezone between DPU and NPU +### 1.8 Check the NTP date and timezone between DPU and Switch #### Steps - * Use command `date` to get date and time zone on Switch - * Use command `ssh admin@169.254.x.x` to enter into required dpu. - * Use command `date` to get date and time zone on DPU + * Use command on Switch: `date` to get date and time zone on Switch + * Use command on Switch: `ssh admin@169.254.x.x` to enter into required dpu. + * Use command on DPU: `date` to get date and time zone on DPU #### Verify in * Switch and DPU @@ -452,7 +485,7 @@ root@sonic:/home/cisco# ### 1.9 Check the State of DPUs #### Steps - * Use command `show system-health DPU all` to get DPU health status. + * Use command on Switch:`show system-health DPU all` to get DPU health status. #### Verify in * Switch and DPU @@ -507,7 +540,7 @@ DPU0 1 Partial Online dpu_midplane_link_state up * This Test case is to be covered in Phase 2 #### Steps - * Use command `show system-health detail ` to check the health of the DPU. + * Use command on Switch: `show system-health detail ` to check the health of the DPU. #### Verify in * Switch @@ -546,13 +579,13 @@ rsyslog OK Process ### 1.11 Check reboot cause history #### Steps - * The "show reboot-cause" CLI on the switch shows the most recent rebooted device, time and the cause. - * The "show reboot-cause history" CLI on the switch shows the history of the Switch and all DPUs - * The "show reboot-cause history module-name" CLI on the switch shows the history of the specified module - * Use `config chassis modules shutdown ` - * Use `config chassis modules startup ` + * Use command on Switch: "show reboot-cause" CLI to show the most recent rebooted device, time and the cause. + * Use command on Switch: "show reboot-cause history" CLI to show the history of the Switch and all DPUs + * Use command on Switch: "show reboot-cause history module-name" CLI to show the history of the specified module + * Use command on Switch: `config chassis modules shutdown ` + * Use command on Switch: `config chassis modules startup ` * Wait for 5 minutes for Pmon to update the DPU states - * Use `show reboot-cause ` to check the latest reboot is displayed + * Use command on Switch: `show reboot-cause ` to check the latest reboot is displayed #### Verify in * Switch @@ -600,7 +633,7 @@ DPU3 2023_10_02_17_23_46 Host Reset DPU Sun 02 Oct 2 #### Steps -Existing Test case for NPU: +Existing Test case for Switch: * Reboot using a particular command (sonic reboot, watchdog reboot, etc) * All the timeout and poll timings are read from platform.json * Wait for ssh to drop @@ -617,7 +650,7 @@ Reboot Test Case for DPU: * Save the configurations of all DPU state before reboot * Power on all the DPUs that were powered on before reboot using `config chassis modules startup ` * Wait for DPUs to be up - * Use command `show chassis modules status` to get DPU status + * Use command on Switch: `show chassis modules status` to get DPU status * Get the number of DPU modules from ansible inventory file for the testbed. #### Verify in @@ -645,6 +678,675 @@ root@sonic:/home/cisco# show chassis modules status * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. +### 1.13 Check Memory on DPU + +### Steps + + * Infrastructure support will be provided to choose from following clis or in combinations based on the vendor. + * Use command on DPU: `show system-memory` to get memory usage on each of those DPUs + * Use command on Switch: `show system-health dpu ` to check memory check service status + * Use command on DPU: `pdsctl show system --events` to check memory related events triggered. + (This is vendor specific event monitoring cli) + +#### Verify in + * DPU + +#### Sample Output +``` +On DPU: + +root@sonic:/home/admin# show system-memory + total used free shared buff/cache available +Mem: 6266 4198 1509 28 765 2067 +Swap: 0 0 0 +root@sonic:/home/admin# +root@sonic:/home/admin# + +On Switch: + +root@MtFuji:/home/cisco# show system-health dpu DPU0 +is_smartswitch returning True +Name ID Oper-Status State-Detail State-Value Time Reason +------ ---- ------------- ----------------------- ------------- ----------------- -------------------------------------------------------------------------------------------------------------------------- +DPU0 0 Online dpu_midplane_link_state UP 20241003 17:57:22 INTERNAL-MGMT : admin state - UP, oper_state - UP, status - OK, HOST-MGMT : admin state - UP, oper_state - UP, status - OK + dpu_control_plane_state UP 20241003 17:57:22 All containers are up and running, host-ethlink-status: Uplink1/1 is UP + dpu_data_plane_state UP 20241001 19:54:30 DPU container named polaris is running, pdsagent running : OK, pciemgrd running : OK +root@MtFuji:/home/cisco# + +On DPU: +# Note: This command is run on cisco specific testbed. +# HwSKU: Cisco-8102-28FH-DPU-O-T1 + +root@sonic:/home/admin# pdsctl show system --events +---------------------------------------------------------------------------------------------------- +Event Severity Timestamp +---------------------------------------------------------------------------------------------------- +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:11.515551 +0000 UTC +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:11.668685 +0000 UTC +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:12.379261 +0000 UTC +DSE_SERVICE_STARTED DEBUG 2024-09-20 21:48:19.379819 +0000 UTC +root@sonic:/home/admin# + +``` + +#### Pass/Fail Criteria + + * Verify that used memory should not cross the specified threshold value (90) of total memory. + * Threshold can be set different based on platform. + * Verify that dpu_control_plane_state is up under system-health dpu cli. + * Verify no memory related events (MEM_FAILURE_EVENT) under pdsctl show system --events cli. This is vendor specific event montioring cli. + * Increase the memory to go beyond threshold (head -c /dev/zero | tail &) and verify it in pdsctl show system --events cli. + + +### 1.14 Check DPU status and pcie Link after memory exhaustion on Switch + +#### Steps + + * Use command on Switch: `sudo swapoff -a`. Swapping is turned off so the OOM is triggered in a shorter time. + * Use command on Switch: 'nohup bash -c "sleep 5 && tail /dev/zero" &' to run out of memory completely. + * It runs on the background and `nohup` is also necessary to protect the background process. + * Added `sleep 5` to ensure ansible receive the result first. + * Check the status and power on DPUs after switch goes for reboot and comes back + * Use command on Switch: `show chassis modules status` to check status of the DPUs. + * Append to the existing test case: https://github.com/sonic-net/sonic-mgmt/blob/master/tests/platform_tests/test_memory_exhaustion.py + + #### Verify in + + * Switch + +#### Sample Output + +``` +root@sonic:/home/cisco# sudo swapoff -a +root@sonic:/home/cisco# +root@sonic:/home/cisco# nohup bash -c "sleep 5 && tail /dev/zero" & +root@sonic:/home/cisco# nohup: ignoring input and appending output to 'nohup.out' +root@sonic:/home/cisco# +. +(Going for reboot) +. +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +``` + +#### Pass/Fail Criteria + + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.15 Check DPU status and pcie Link after memory exhaustion on DPU + +#### Steps + +* Use command on DPU: `sudo swapoff -a`. Swapping is turned off so the OOM is triggered in a shorter time. +* Use command on DPU: 'nohup bash -c "sleep 5 && tail /dev/zero" &' to to run out of memory completely. +* It runs on the background and `nohup` is also necessary to protect the background process. +* Added `sleep 5` to ensure ansible receive the result first. +* Powercycling of DPU is to ensure that pcie link came up properly after the memory exhaustion test. +* Use command on Switch: `config chassis module shutdown ` to power off the DPUs. +* Wait for 3 mins. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. + + #### Verify in + + * DPU and Switch + +#### Sample Output + +DPU: + +``` +root@sonic:/home/admin# sudo swapoff -a +root@sonic:/home/adminsco# +root@sonic:/home/admin# nohup bash -c "sleep 5 && tail /dev/zero" & +root@sonic:/home/admin# nohup: ignoring input and appending output to 'nohup.out' +root@sonic:/home/admin# +. +(Going for reboot) +. +``` + +Switch: + +``` +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +``` + +#### Pass/Fail Criteria + + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.16 Check DPU status and pcie Link after restart pmon on Switch + +#### Steps + * Use command on Switch: `docker ps` to check the status of all the dockers. + * Use command on Switch: `systemctl restart pmon` + * Wait for 3 mins + * Use command on Switch: `docker ps` to check the status of all the dockers. + * Use command on Switch: `show chassis modules status` to check status of the DPUs. + +#### Verify in + * Switch + +#### Sample Output + +``` +root@MtFuji:/home/cisco# docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a49fcf07beb8 docker-snmp:latest "/usr/local/bin/supe…" 3 days ago Up 3 days snmp +57ecc675292d docker-platform-monitor:latest "/usr/bin/docker_ini…" 3 days ago Up 3 days pmon +f1306072ba01 docker-sonic-mgmt-framework:latest "/usr/local/bin/supe…" 3 days ago Up 3 days mgmt-framework +571cc36585ae docker-lldp:latest "/usr/bin/docker-lld…" 3 days ago Up 3 days lldp +db4b1444e8a0 docker-sonic-gnmi:latest "/usr/local/bin/supe…" 3 days ago Up 3 days gnmi +a90702b9c541 d0a0fb621c53 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_server +4d2d79b77c66 2c214d2315a2 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_relay +90246d1e26d2 docker-fpm-frr:latest "/usr/bin/docker_ini…" 3 days ago Up 3 days bgp +42cf834770a8 docker-orchagent:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days swss +7eb9da209385 docker-router-advertiser:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days radv +66c4c8779e60 docker-syncd-cisco:latest "/usr/local/bin/supe…" 3 days ago Up 3 days syncd +5d542c98fb00 docker-teamd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days teamd +a5225d08bcf4 docker-eventd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days eventd +bd7555425d6d docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu5 +42fd04767a03 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu4 +a1633fc4a6ff docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu3 +32b4e9506827 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu0 +bb73239399e4 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu6 +e5281aba74de docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu7 +96032ebcb451 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu1 +45418ff0d88f docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu2 +39ddbddd3fb3 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database +f1cac669cd08 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database-chassis +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# systemctl restart pmon +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a49fcf07beb8 docker-snmp:latest "/usr/local/bin/supe…" 3 days ago Up 3 days snmp +57ecc675292d docker-platform-monitor:latest "/usr/bin/docker_ini…" 3 days ago Up 4 seconds pmon +f1306072ba01 docker-sonic-mgmt-framework:latest "/usr/local/bin/supe…" 3 days ago Up 3 days mgmt-framework +571cc36585ae docker-lldp:latest "/usr/bin/docker-lld…" 3 days ago Up 3 days lldp +db4b1444e8a0 docker-sonic-gnmi:latest "/usr/local/bin/supe…" 3 days ago Up 3 days gnmi +a90702b9c541 d0a0fb621c53 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_server +4d2d79b77c66 2c214d2315a2 "/usr/bin/docker_ini…" 3 days ago Up 3 days dhcp_relay +90246d1e26d2 docker-fpm-frr:latest "/usr/bin/docker_ini…" 3 days ago Up 3 days bgp +42cf834770a8 docker-orchagent:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days swss +7eb9da209385 docker-router-advertiser:latest "/usr/bin/docker-ini…" 3 days ago Up 3 days radv +66c4c8779e60 docker-syncd-cisco:latest "/usr/local/bin/supe…" 3 days ago Up 3 days syncd +5d542c98fb00 docker-teamd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days teamd +a5225d08bcf4 docker-eventd:latest "/usr/local/bin/supe…" 3 days ago Up 3 days eventd +bd7555425d6d docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu5 +42fd04767a03 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu4 +a1633fc4a6ff docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu3 +32b4e9506827 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu0 +bb73239399e4 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu6 +e5281aba74de docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu7 +96032ebcb451 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu1 +45418ff0d88f docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days databasedpu2 +39ddbddd3fb3 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database +f1cac669cd08 docker-database:latest "/usr/local/bin/dock…" 3 days ago Up 3 days database-chassis +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# +root@MtFuji:/home/cisco# systemctl status pmon +● pmon.service - Platform monitor container + Loaded: loaded (/lib/systemd/system/pmon.service; static) + Drop-In: /etc/systemd/system/pmon.service.d + └─auto_restart.conf + Active: active (running) since Sat 2024-10-05 00:22:29 UTC; 24s ago + Process: 3584922 ExecStartPre=/usr/bin/pmon.sh start (code=exited, status=0> + Main PID: 3584995 (pmon.sh) + Tasks: 2 (limit: 153342) + Memory: 27.6M + CGroup: /system.slice/pmon.service + ├─3584995 /bin/bash /usr/bin/pmon.sh wait + └─3585000 python3 /usr/local/bin/container wait pmon + +Oct 05 00:22:28 MtFuji container[3584943]: container_start: pmon: set_owner:loc> +Oct 05 00:22:29 MtFuji container[3584943]: docker cmd: start for pmon +Oct 05 00:22:29 MtFuji container[3584943]: container_start: END +Oct 05 00:22:29 MtFuji systemd[1]: Started pmon.service - Platform monitor cont> +Oct 05 00:22:29 MtFuji container[3585000]: container_wait: BEGIN +Oct 05 00:22:29 MtFuji container[3585000]: read_data: config:True feature:pmon > +Oct 05 00:22:29 MtFuji container[3585000]: read_data: config:False feature:pmon> +Oct 05 00:22:29 MtFuji container[3585000]: docker get image version for pmon +Oct 05 00:22:29 MtFuji container[3585000]: container_wait: pmon: set_owner:loca> +Oct 05 00:22:29 MtFuji container[3585000]: container_wait: END -- transitioning> + +root@MtFuji:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# + +``` +#### Pass/Fail Criteria + * Verify pmon and all the associated critical process is up are up. + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.17 Check DPU status and pcie Link after reload of configuration on Switch + +#### Steps +* Use command on Switch: `config reload -y` to reload the configurations in the switch. +* Wait for 3 mins. +* Use command on Switch: `show chassis modules status` to check status of the DPUs. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. +* Use command on Switch: `show chassis modules status` to check status of the DPUs. + +#### Verify in + * Switch + +#### Sample Output + +``` +root@sonic:/home/cisco# config reload -y +Acquired lock on /etc/sonic/reload.lock +Disabling container monitoring ... +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -j /etc/sonic/init_cfg.json -j /etc/sonic/config_db.json --write-to-db +Running command: /usr/local/bin/db_migrator.py -o migrate +Running command: /usr/local/bin/sonic-cfggen -d -y /etc/sonic/sonic_version.yml -t /usr/share/sonic/templates/sonic-environment.j2,/etc/sonic/sonic-environment +Restarting SONiC target ... +Enabling container monitoring ... +Reloading Monit configuration ... +Reinitializing monit daemon +Released lock on /etc/sonic/reload.lock +root@MtFuji:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# + +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.18 Check DPU status and pcie Link after kernel panic on Switch + +#### Steps +* Use command on Switch: `nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" &` +* Use command on Switch: `show chassis modules status` to check status of the DPUs. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. +* Use command on Switch: `show chassis modules status` to check status of the DPUs. +* Append to the existing test case: https://github.com/sonic-net/sonic-mgmt/blob/master/tests/platform_tests/test_kdump.py + +#### Verify in + * Switch + +#### Sample Output + +``` +root@sonic:/home/cisco# nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" & +. +(Going for reboot) +. +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# show reboot-cause + +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + * Verify `show reboot-cause ` to check the reboot is caused by kernel panic. + + +### 1.19 Check DPU status and pcie Link after kernel panic on DPU + +#### Steps +* Use command on DPU: `nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" &`. +* Powercycling of DPU is to ensure that pcie link came up properly after the memory exhaustion test. +* Use command on Switch: `config chassis module shutdown ` to power off the DPUs. +* Wait for 3 mins. +* Use command on Switch: `config chassis module startup ` to power on the DPUs. + +#### Verify in + * DPU and Switch + +#### Sample Output + +DPU: + +``` +root@sonic:/home/admin# nohup bash -c "sleep 5 && echo c > /proc/sysrq-trigger" & +. +(Going for reboot) +. +``` + +Switch: + +``` +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +root@sonic:/home/cisco# show reboot-cause + +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + * Verify `show reboot-cause ` to check the reboot is caused by kernel panic. + + +### 1.20 Check DPU status and pcie Link after switch power cycle + +#### Steps + * Power cycle the testbed using PDU controller. + * Use command on Switch: `config chassis module startup ` to power on the DPUs. + * Use command on Switch: `show chassis modules status` to check status of the DPUs. + * Append to the existing test case: https://github.com/sonic-net/sonic-mgmt/blob/master/tests/platform_tests/test_power_off_reboot.py + +#### Verify in + * Switch + +#### Sample Output + +After power cycle of the testbed, + +``` +root@sonic:/home/cisco# +root@sonic:/home/cisco# +. +(Going for power off reboot using pdu controller) +. +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Offline down 154226463179152 + DPU2 Data Processing Unit N/A Offline down 154226463179168 + DPUX Data Processing Unit N/A Offline down 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# config chassis startup DPU1 +root@sonic:/home/cisco# config chassis startup DPU2 +root@sonic:/home/cisco# config chassis startup DPUX +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis modules startup +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +``` + +#### Pass/Fail Criteria + * Verify number of DPUs from inventory file for the testbed and number of DPUs shown in the cli output. + * Verify Ping works to all the mid plane ip listed in the ansible inventory file for the testbed. + + +### 1.21 Check DPU status and pcie Link after SW trip by temperature trigger + +#### Steps + + * Infrastructure will be provided to run the scripts that triggers the temperature trip based on vendor. + * The following is the example sequence to trigger temperature trip on the DPU + - Note: If Cisco setup, the following steps work. + - In DPU, Execute: `docker exec -it polaris /bin/bash` + - Create /tmp/temp_sim.json file with dictionary { "hbmtemp": 65, "dietemp": 85} + - Increase dietemp to 125 to trigger the trip. + +#### Verify in + * DPU + +#### Sample Output + +DPU: + +``` +# Note: This command is run on cisco specific testbed. +# HwSKU: Cisco-8102-28FH-DPU-O-T1 + +root@sonic:/home/admin# +root@sonic:/home/admin# docker exec -it polaris /bin/bash +bash-4.4# +bash-4.4# cat /tmp/temp_sim.json +{ "hbmtemp": 65, +"dietemp": 85 } +bash-4.4# +bash-4.4# +bash-4.4# +bash-4.4# cat /tmp/temp_sim.json +{ "hbmtemp": 65, +"dietemp": 125 } +bash-4.4# exit +exit +root@sonic:/home/admin# e** RESETTING: SW TRIP dietemBoot0 v19, Id 0x82 +Boot fwid 0 @ 0x74200000... OK + +. +(Going for reboot) +. + +``` +Switch: + +``` +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Offline down 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# config chassis shutdown DPU0 +root@sonic:/home/cisco# config chassis startup DPU0 +root@sonic:/home/cisco# +root@sonic:/home/cisco# +root@sonic:/home/cisco# show chassis modules status + Name Description Physical-Slot Oper-Status Admin-Status Serial +------ -------------------- --------------- ------------- -------------- --------------- + DPU0 Data Processing Unit N/A Online up 154226463179136 + DPU1 Data Processing Unit N/A Online up 154226463179152 + DPU2 Data Processing Unit N/A Online up 154226463179168 + DPUX Data Processing Unit N/A Online up 154226463179184 + +root@sonic:/home/cisco# ping 169.254.200.1 +PING 169.254.200.1 (169.254.200.1) 56(84) bytes of data. +64 bytes from 169.254.200.1: icmp_seq=1 ttl=64 time=0.160 ms +^C +--- 169.254.28.1 ping statistics --- +1 packets transmitted, 1 received, 0% packet loss, time 0ms +rtt min/avg/max/mdev = 0.160/0.160/0.160/0.000 ms +root@sonic:/home/cisco# +root@sonic:/home/cisco# show reboot-cause history dpu0 + + +``` + +#### Pass/Fail Criteria + * Verify Ping works to DPU mid plane ip listed in the ansible inventory file for the testbed. + * Verify the command on Swithc: show reboot cause history to check the cause as cattrip. + + ## Objectives of API Test Cases | | **Test Case** | **Intention** | **Comments** | From 315c7e2e25f8e3a45b5185554f2f25b9acdc8fec Mon Sep 17 00:00:00 2001 From: Chuan Wu <103085864+echuawu@users.noreply.github.com> Date: Thu, 7 Nov 2024 05:31:14 +0800 Subject: [PATCH 009/175] Update qos pcbb parameter for SN4600C on dualtor aa topology (#15303) Update qos pcbb parameter for SN4600C on dualtor aa topology Change-Id: Ie99762681cad38e02059cb064a28fdd86288e672 --- tests/qos/files/qos_params.spc3.yaml | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/qos/files/qos_params.spc3.yaml b/tests/qos/files/qos_params.spc3.yaml index b132af4c1ae..e4b91b88e8f 100644 --- a/tests/qos/files/qos_params.spc3.yaml +++ b/tests/qos/files/qos_params.spc3.yaml @@ -99,6 +99,39 @@ qos_params: pkts_num_trig_pfc: 176064 pkts_num_trig_ingr_drp: 177916 pkts_num_margin: 4 + topo-dualtor-aa: + 100000_40m: + pkts_num_leak_out: 0 + pcbb_xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 + pcbb_xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 + pcbb_xoff_3: + outer_dscp: 2 + dscp: 3 + ecn: 1 + pg: 2 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 + pcbb_xoff_4: + outer_dscp: 6 + dscp: 4 + ecn: 1 + pg: 6 + pkts_num_trig_pfc: 176064 + pkts_num_trig_ingr_drp: 177916 + pkts_num_margin: 4 topo-dualtor-aa-64-breakout: 200000_40m: pkts_num_leak_out: 0 From d3f233a9cc86a59f0a7dee281d60dc360480a0ed Mon Sep 17 00:00:00 2001 From: weguo-NV <154216071+weiguo-nvidia@users.noreply.github.com> Date: Thu, 7 Nov 2024 05:43:38 +0800 Subject: [PATCH 010/175] Fix test_check_show_lpmode case (#15062) --- .../platform_tests/sfp/test_show_intf_xcvr.py | 14 +++++++++++++- tests/platform_tests/sfp/util.py | 19 +++++++++---------- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/tests/platform_tests/sfp/test_show_intf_xcvr.py b/tests/platform_tests/sfp/test_show_intf_xcvr.py index 928720e0a34..962e58dd762 100644 --- a/tests/platform_tests/sfp/test_show_intf_xcvr.py +++ b/tests/platform_tests/sfp/test_show_intf_xcvr.py @@ -83,7 +83,19 @@ def test_check_show_lpmode(duthosts, enum_rand_one_per_hwsku_frontend_hostname, return assert sfp_lpmode['rc'] == 0, "Run command '{}' failed".format(cmd_sfp_presence) + sfp_lpmode_data = sfp_lpmode["stdout_lines"] + + # Check if the header is present + header = sfp_lpmode_data[0] + logging.info(f"The header is: {header}") + if header.replace(" ", "") != "Port Low-power Mode".replace(" ", ""): + logging.error("Invalid output format: Header missing") + return False + + # Check interface lpmode + sfp_lpmode_info = parse_output(sfp_lpmode_data[2:]) + logging.info(f"The interface sfp lpmode info is: {sfp_lpmode_info}") for intf in dev_conn: if intf not in xcvr_skip_list[duthost.hostname]: assert validate_transceiver_lpmode( - sfp_lpmode['stdout']), "Interface mode incorrect in 'show interface transceiver lpmode'" + sfp_lpmode_info, intf), "Interface mode incorrect in 'show interface transceiver lpmode'" diff --git a/tests/platform_tests/sfp/util.py b/tests/platform_tests/sfp/util.py index 50065ceb421..c793291b432 100644 --- a/tests/platform_tests/sfp/util.py +++ b/tests/platform_tests/sfp/util.py @@ -47,15 +47,14 @@ def get_dev_conn(duthost, conn_graph_facts, asic_index): return portmap, dev_conn -def validate_transceiver_lpmode(output): - lines = output.strip().split('\n') - # Check if the header is present - if lines[0].replace(" ", "") != "Port Low-power Mode".replace(" ", ""): - logging.error("Invalid output format: Header missing") +def validate_transceiver_lpmode(sfp_lpmode, port): + lpmode = sfp_lpmode.get(port) + if lpmode is None: + logging.error(f"Interface {port} does not present in the show command") return False - for line in lines[2:]: - port, lpmode = line.strip().split() - if lpmode not in ["Off", "On"]: - logging.error("Invalid low-power mode {} for port {}".format(lpmode, port)) - return False + + if lpmode not in ["Off", "On"]: + logging.error("Invalid low-power mode {} for port {}".format(lpmode, port)) + return False + return True From 827177af2f774be0c0a0f324f6299fe1bdabdabb Mon Sep 17 00:00:00 2001 From: Jibin Bao Date: Thu, 7 Nov 2024 05:44:24 +0800 Subject: [PATCH 011/175] fix testQosSaiDscpToPgMapping issue (#15378) The issue is introduced by the PR:https://github.com/sonic-net/sonic-mgmt/pull/9859. This PR should be dedicated for broadcom-dnx, it should not change the logic for the remaining platform, so restore the original logic for non-broadcom-dnx. --- tests/saitests/py3/sai_qos_tests.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index dd5b159f684..eb3373596b4 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -1193,11 +1193,11 @@ def runTest(self): print(list(map(operator.sub, pg_cntrs, pg_cntrs_base)), file=sys.stderr) for i in range(0, PG_NUM): - # DNX/Chassis: - # pg = 0 => Some extra packets with unmarked TC - # pg = 4 => Extra packets for LACP/BGP packets - # pg = 7 => packets from cpu to front panel ports - if platform_asic and platform_asic in ["broadcom-dnx", "cisco-8000"]: + if platform_asic and platform_asic == "broadcom-dnx": + # DNX/Chassis: + # pg = 0 => Some extra packets with unmarked TC + # pg = 4 => Extra packets for LACP/BGP packets + # pg = 7 => packets from cpu to front panel ports if i == pg: if i == 3: assert (pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) @@ -1210,9 +1210,19 @@ def runTest(self): assert (pg_cntrs[i] == pg_cntrs_base[i]) else: if i == pg: - assert (pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) + if i == 0 or i == 4: + assert (pg_cntrs[pg] >= + pg_cntrs_base[pg] + len(dscps)) + else: + assert (pg_cntrs[pg] == + pg_cntrs_base[pg] + len(dscps)) else: - assert (pg_cntrs[i] == pg_cntrs_base[i]) + # LACP packets are mapped to queue0 and tcp syn packets for BGP to queue4 + # So for those queues the count could be more + if i == 0 or i == 4: + assert (pg_cntrs[i] >= pg_cntrs_base[i]) + else: + assert (pg_cntrs[i] == pg_cntrs_base[i]) # confirm that dscp pkts are received total_recv_cnt = 0 dscp_recv_cnt = 0 From 7d6094d05289da91b0eacee6d93663aef4e668c3 Mon Sep 17 00:00:00 2001 From: Jianquan Ye Date: Thu, 7 Nov 2024 08:56:57 +1000 Subject: [PATCH 012/175] [CI/CD] prolong pytest collect only time out setting (#15386) Description of PR Summary: Fixes the timeout issue on pytest collect only Approach What is the motivation for this PR? Fixes the timeout issue on pytest collect only co-authorized by: jianquanye@microsoft.com --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c06c4b0bfca..1256f817404 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -35,7 +35,7 @@ stages: - job: validate_test_cases displayName: "Validate Test Cases" - timeoutInMinutes: 20 + timeoutInMinutes: 30 continueOnError: false pool: sonic-common steps: From 9c40bc43d7513264bc9a00de399d2d98d6584560 Mon Sep 17 00:00:00 2001 From: Jianquan Ye Date: Thu, 7 Nov 2024 09:05:10 +1000 Subject: [PATCH 013/175] Fix the wrong reboot wait/timeout setting on test_link_down (#15377) Description of PR Summary: Fixes # (issue) Fix the wait ssh timeout. set_max_to_reboot is expected to set max wait time due to different duthost, But previously it auto uses the fixture duthost, which always return the first dut in duthosts, For example, if duthosts= [lc1,lc2,rp] the duthost=lc. But for test case test_link_down_on_sup_reboot, we should get the configuration for rp. Hence update the logic, explicitly pass the target duthost to the function. Approach What is the motivation for this PR? Fix the failure for chassis on test_link_down How did you do it? explicitly pass the target duthost to the set_max_to_reboot . How did you verify/test it? run it locally with physical testbes co-authorized by: jianquanye@microsoft.com --- tests/platform_tests/test_link_down.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/platform_tests/test_link_down.py b/tests/platform_tests/test_link_down.py index 75426c219ef..7cad169c6a2 100644 --- a/tests/platform_tests/test_link_down.py +++ b/tests/platform_tests/test_link_down.py @@ -26,7 +26,6 @@ MAX_TIME_TO_REBOOT = 120 -@pytest.fixture(scope='function') def set_max_to_reboot(duthost): """ For chassis testbeds, we need to specify plt_reboot_ctrl in inventory file, @@ -148,12 +147,13 @@ def check_interfaces_and_services_all_LCs(duthosts, conn_graph_facts, xcvr_skip_ def test_link_down_on_sup_reboot(duthosts, localhost, enum_supervisor_dut_hostname, - conn_graph_facts, set_max_to_reboot, + conn_graph_facts, fanouthosts, xcvr_skip_list): if len(duthosts.nodes) == 1: pytest.skip("Skip single-host dut for this test") duthost = duthosts[enum_supervisor_dut_hostname] + set_max_to_reboot(duthost) # There are some errors due to reboot happened before this test file for some reason, # and SUP may not have enough time to recover all dockers and the wait for process wait for 300 secs in @@ -203,9 +203,10 @@ def test_link_down_on_sup_reboot(duthosts, localhost, enum_supervisor_dut_hostna def test_link_status_on_host_reboot(duthosts, localhost, enum_rand_one_per_hwsku_frontend_hostname, - conn_graph_facts, set_max_to_reboot, + conn_graph_facts, fanouthosts, xcvr_skip_list): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + set_max_to_reboot(duthost) hostname = duthost.hostname # Before test, check all interfaces and services are up From ee3a928e59b095946d2e9c41b25d1a91769463f5 Mon Sep 17 00:00:00 2001 From: Jibin Bao Date: Thu, 7 Nov 2024 09:03:35 +0800 Subject: [PATCH 014/175] update for test_acl_tcp_rst due to design change (#13648) 1. For receiver and sender, tcp srt packet will have no payload. 2. Remove some fields ignore to check them, because they are expected same as the sent packet. Change-Id: Icf9b975c6563188202408d09612d79ea0bb1184f --- tests/dash/dash_acl.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/dash/dash_acl.py b/tests/dash/dash_acl.py index 1ce0abe5f7d..0248d4d6b91 100644 --- a/tests/dash/dash_acl.py +++ b/tests/dash/dash_acl.py @@ -1132,11 +1132,6 @@ def _check_tcp_rst_pkt_acl_permit(pkt): def _check_tcp_rst_pkt_acl_deny(pkt): def _set_do_not_care_fields(expected_rst_packt, bit_length_after_inner_tcp_falg): - expected_rst_packt.set_do_not_care(128, 16) # external packet total length - expected_rst_packt.set_do_not_care(304, 16) # udp length - expected_rst_packt.set_do_not_care(336, 16) # vxlan flags - expected_rst_packt.set_do_not_care(352, 16) # vxlan group policy id - expected_rst_packt.set_do_not_care(528, 16) # inner ip total length expected_rst_packt.set_do_not_care(592, 16) # checksum in inner packet # it includes the fields after inner tcp flag expected_rst_packt.set_do_not_care(784, bit_length_after_inner_tcp_falg) @@ -1146,11 +1141,12 @@ def _get_expected_rst_packet_to_receiver(): inner_extra_conf_to_receiver = copy.deepcopy(pkt.inner_extra_conf) inner_extra_conf_to_receiver["tcp_flags"] = "R" inner_extra_conf_to_receiver["ip_id"] = 0x0000 + inner_extra_conf_to_receiver["pktlen"] = 54 _, _, _, expected_rst_packet_to_receiver = packets.inbound_vnet_packets(pkt.dash_config_info, inner_extra_conf_to_receiver, inner_packet_type='tcp') logger.info("Set ignore fields for expected rst packet sent to receiver") - _set_do_not_care_fields(expected_rst_packet_to_receiver, 416) + _set_do_not_care_fields(expected_rst_packet_to_receiver, 48) return expected_rst_packet_to_receiver @@ -1185,7 +1181,7 @@ def _get_expected_rst_packet_to_sender(): # Verify packet(no syn) is dropped # verify packet RST packet is sent to two ends - len_expected_rst_packet_to_receiver = 150 + len_expected_rst_packet_to_receiver = 104 len_expected_rst_packet_to_sender = 104 packets.verify_tcp_packet_drop_rst_packet_sent( ptfadapter, From abb3df35e444a53054bddf5dc0fd14b447f4c2c1 Mon Sep 17 00:00:00 2001 From: Perumal Venkatesh Date: Wed, 6 Nov 2024 17:26:34 -0800 Subject: [PATCH 015/175] Increase startup_tsa_tsb time based on Cisco's observation. (#15367) Description of PR We are noticing that the time diff is ranging from 120-145 secs. Hence increasing it to 160secs to be on the safer side. After increasing the time we are seeing all testcases passing with all other changes that was added in PR #13290 In our case, since kdump is enabled, during abnormal reboot case, our reboot-cause is Kernel Panic. Made an appropriate change for Cisco chassis Approach What is the motivation for this PR? Check the functionality with a slight increase in time How did you do it? How did you verify/test it? Any platform specific information? Validated on Cisco 8808 chassis with T2 profile Supported testbed topology if it's a new test case? Documentation =========================== short test summary info ============================ PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_dut_cold_reboot[sfd-lt2-lc0] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_dut_abnormal_reboot[sfd-lt2-lc0] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_user_init_tsa[sfd-lt2-lc0] PASSED bgp/test_startup_tsa_tsb_service.py::test_user_init_tsa_while_service_run_on_dut[sfd-lt2-lc0] PASSED bgp/test_startup_tsa_tsb_service.py::test_user_init_tsb_while_service_run_on_dut[sfd-lt2-lc0] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_timer_efficiency[sfd-lt2-lc0] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_dut_cold_reboot[sfd-lt2-lc1] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_dut_abnormal_reboot[sfd-lt2-lc1] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_user_init_tsa[sfd-lt2-lc1] PASSED bgp/test_startup_tsa_tsb_service.py::test_user_init_tsa_while_service_run_on_dut[sfd-lt2-lc1] PASSED bgp/test_startup_tsa_tsb_service.py::test_user_init_tsb_while_service_run_on_dut[sfd-lt2-lc1] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_timer_efficiency[sfd-lt2-lc1] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_supervisor_cold_reboot[sfd-lt2-sup] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_supervisor_abnormal_reboot[sfd-lt2-sup] PASSED bgp/test_startup_tsa_tsb_service.py::test_user_init_tsb_on_sup_while_service_run_on_dut[sfd-lt2-sup] PASSED bgp/test_startup_tsa_tsb_service.py::test_tsa_tsb_service_with_tsa_on_sup[sfd-lt2-sup] ================= 16 passed, 1 warning in 31255.04s (8:40:55) ================== co-authorized by: jianquanye@microsoft.com --- tests/bgp/test_reliable_tsa.py | 8 ++-- tests/bgp/test_startup_tsa_tsb_service.py | 48 ++++++++++++++++------- 2 files changed, 37 insertions(+), 19 deletions(-) diff --git a/tests/bgp/test_reliable_tsa.py b/tests/bgp/test_reliable_tsa.py index e956d6ef26a..928e9590d97 100644 --- a/tests/bgp/test_reliable_tsa.py +++ b/tests/bgp/test_reliable_tsa.py @@ -850,7 +850,7 @@ def test_sup_tsa_act_with_sup_reboot(duthosts, localhost, enum_supervisor_dut_ho logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in the same maintenance state like before supervisor reboot @@ -1043,7 +1043,7 @@ def test_dut_tsa_act_with_reboot_when_sup_dut_on_tsb_init(duthosts, localhost, e logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify startup_tsa_tsb service is not started and in exited due to manual TSA pytest_assert(wait_until(tsa_tsb_timer[linecard], 20, 0, get_tsa_tsb_service_status, linecard, 'exited'), @@ -1355,7 +1355,7 @@ def test_sup_tsa_when_startup_tsa_tsb_service_running(duthosts, localhost, enum_ logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify startup_tsa_tsb service is started and running pytest_assert(wait_until(tsa_tsb_timer[linecard], 20, 0, get_tsa_tsb_service_status, linecard, 'running'), @@ -1464,7 +1464,7 @@ def test_sup_tsb_when_startup_tsa_tsb_service_running(duthosts, localhost, enum_ logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify startup_tsa_tsb service is started and running pytest_assert(wait_until(tsa_tsb_timer[linecard], 20, 0, get_tsa_tsb_service_status, linecard, 'running'), diff --git a/tests/bgp/test_startup_tsa_tsb_service.py b/tests/bgp/test_startup_tsa_tsb_service.py index 2b3e779b328..4170fdb766a 100644 --- a/tests/bgp/test_startup_tsa_tsb_service.py +++ b/tests/bgp/test_startup_tsa_tsb_service.py @@ -19,7 +19,7 @@ logger = logging.getLogger(__name__) - +KERNEL_PANIC_REBOOT_CAUSE = "Kernel Panic" COLD_REBOOT_CAUSE = 'cold' UNKNOWN_REBOOT_CAUSE = "Unknown" SUP_REBOOT_CAUSE = 'Reboot from Supervisor' @@ -209,7 +209,7 @@ def test_tsa_tsb_service_with_dut_cold_reboot(duthosts, localhost, enum_rand_one logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -325,7 +325,7 @@ def test_tsa_tsb_service_with_dut_abnormal_reboot(duthosts, localhost, enum_rand service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() logger.info("Time difference between dut up-time & tsa_tsb_service up-time is {}".format(int(time_diff))) - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Make sure BGP containers are running properly before verifying @@ -384,8 +384,17 @@ def test_tsa_tsb_service_with_dut_abnormal_reboot(duthosts, localhost, enum_rand # Make sure the dut's reboot cause is as expected logger.info("Check reboot cause of the dut") reboot_cause = get_reboot_cause(duthost) - pytest_assert(reboot_cause == UNKNOWN_REBOOT_CAUSE, - "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE)) + out = duthost.command('show kdump config') + if "Enabled" not in out["stdout"]: + pytest_assert( + reboot_cause == UNKNOWN_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE) + ) + else: + pytest_assert( + reboot_cause == KERNEL_PANIC_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, KERNEL_PANIC_REBOOT_CAUSE) + ) @pytest.mark.disable_loganalyzer @@ -442,7 +451,7 @@ def test_tsa_tsb_service_with_supervisor_cold_reboot(duthosts, localhost, enum_s logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -592,7 +601,7 @@ def test_tsa_tsb_service_with_supervisor_abnormal_reboot(duthosts, localhost, en logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Make sure BGP containers are running properly before verifying @@ -669,8 +678,17 @@ def test_tsa_tsb_service_with_supervisor_abnormal_reboot(duthosts, localhost, en # Make sure the Supervisor's reboot cause is as expected logger.info("Check reboot cause of the supervisor") reboot_cause = get_reboot_cause(suphost) - pytest_assert(reboot_cause == UNKNOWN_REBOOT_CAUSE, - "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE)) + out = suphost.command('show kdump config') + if "Enabled" not in out["stdout"]: + pytest_assert( + reboot_cause == UNKNOWN_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, UNKNOWN_REBOOT_CAUSE) + ) + else: + pytest_assert( + reboot_cause == KERNEL_PANIC_REBOOT_CAUSE, + "Reboot cause {} did not match the trigger {}".format(reboot_cause, KERNEL_PANIC_REBOOT_CAUSE) + ) @pytest.mark.disable_loganalyzer @@ -718,7 +736,7 @@ def test_tsa_tsb_service_with_user_init_tsa(duthosts, localhost, enum_rand_one_p logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Ensure startup_tsa_tsb service is in exited state after dut reboot @@ -825,7 +843,7 @@ def test_user_init_tsa_while_service_run_on_dut(duthosts, localhost, enum_rand_o logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -941,7 +959,7 @@ def test_user_init_tsb_while_service_run_on_dut(duthosts, localhost, enum_rand_o logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -1059,7 +1077,7 @@ def test_user_init_tsb_on_sup_while_service_run_on_dut(duthosts, localhost, logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. @@ -1184,7 +1202,7 @@ def test_tsa_tsb_timer_efficiency(duthosts, localhost, enum_rand_one_per_hwsku_f logging.info('DUT {} up since {}'.format(duthost.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(duthost) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") logging.info("Wait until all critical services are fully started") @@ -1309,7 +1327,7 @@ def test_tsa_tsb_service_with_tsa_on_sup(duthosts, localhost, logging.info('DUT {} up since {}'.format(linecard.hostname, dut_uptime)) service_uptime = get_tsa_tsb_service_uptime(linecard) time_diff = (service_uptime - dut_uptime).total_seconds() - pytest_assert(int(time_diff) < 120, + pytest_assert(int(time_diff) < 160, "startup_tsa_tsb service started much later than the expected time after dut reboot") # Verify DUT is in maintenance state. From 7ea5c82a899d4403adb78a9c599d47c944552d25 Mon Sep 17 00:00:00 2001 From: Sai Kiran <110003254+opcoder0@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:12:44 +1100 Subject: [PATCH 016/175] Re-configure ExaBGP http-api (for py3 only) (#15383) Re-configure ExaBGP's http-api (for Py3 only). With Python 3 only environment using ExaBGP 4 with Flask 3.x the werkzeug Requests max form memory is capped to 500k (https://werkzeug.palletsprojects.com/en/stable/request_data/#limiting-request-data). This breaks announce routes. This PR increases the limit. --- ansible/library/exabgp.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ansible/library/exabgp.py b/ansible/library/exabgp.py index 3a8b41de911..f46b262211d 100644 --- a/ansible/library/exabgp.py +++ b/ansible/library/exabgp.py @@ -78,6 +78,16 @@ def run_command(): return "OK\\n" if __name__ == '__main__': + # with werkzeug 3.x the default size of max_form_memory_size + # is 500K. Routes reach a bit beyond that and the client + # receives HTTP 413. + # Configure the max size to 4 MB to be safe. + if not six.PY2: + from werkzeug import Request + max_content_length = 4 * 1024 * 1024 + Request.max_content_length = max_content_length + Request.max_form_memory_size = max_content_length + Request.max_form_parts = max_content_length app.run(host='0.0.0.0', port=sys.argv[1]) ''' From 20a1958073430280d8f1988b03fbc2c368304214 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Thu, 7 Nov 2024 13:31:19 +0800 Subject: [PATCH 017/175] Remove skip_traffic_test fixture in acl tests (#15366) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test in testcases How did you verify/test it? --- .../custom_acl_table/test_custom_acl_table.py | 30 ++--- .../acl/null_route/test_null_route_helper.py | 10 +- tests/acl/test_acl.py | 116 +++++++++--------- tests/acl/test_acl_outer_vlan.py | 66 +++++----- tests/acl/test_stress_acl.py | 24 ++-- 5 files changed, 118 insertions(+), 128 deletions(-) diff --git a/tests/acl/custom_acl_table/test_custom_acl_table.py b/tests/acl/custom_acl_table/test_custom_acl_table.py index 617e9cac66f..7e1cddc252b 100644 --- a/tests/acl/custom_acl_table/test_custom_acl_table.py +++ b/tests/acl/custom_acl_table/test_custom_acl_table.py @@ -11,7 +11,6 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 logger = logging.getLogger(__name__) @@ -251,7 +250,7 @@ def build_exp_pkt(input_pkt): def test_custom_acl(rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter, setup_acl_rules, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_counterpoll_interval, remove_dataacl_table, skip_traffic_test): # noqa F811 + setup_counterpoll_interval, remove_dataacl_table): # noqa F811 """ The test case is to verify the functionality of custom ACL table Test steps @@ -263,6 +262,7 @@ def test_custom_acl(rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter, 6. Verify the counter of expected rule increases as expected """ mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) + asic_type = rand_selected_dut.facts['asic_type'] if "dualtor" in tbinfo["topo"]["name"]: mg_facts_unselected_dut = rand_unselected_dut.get_extended_minigraph_facts(tbinfo) vlan_name = list(mg_facts['minigraph_vlans'].keys())[0] @@ -288,15 +288,17 @@ def test_custom_acl(rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter, exp_pkt = build_exp_pkt(pkt) # Send and verify packet clear_acl_counter(rand_selected_dut) - if not skip_traffic_test: - if "dualtor-aa" in tbinfo["topo"]["name"]: - clear_acl_counter(rand_unselected_dut) - ptfadapter.dataplane.flush() - testutils.send(ptfadapter, pkt=pkt, port_id=src_port_indice) - testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=dst_port_indices, timeout=5) - acl_counter = read_acl_counter(rand_selected_dut, rule) - if "dualtor-aa" in tbinfo["topo"]["name"]: - acl_counter_unselected_dut = read_acl_counter(rand_unselected_dut, rule) - acl_counter += acl_counter_unselected_dut - # Verify acl counter - pytest_assert(acl_counter == 1, "ACL counter for {} didn't increase as expected".format(rule)) + if "dualtor-aa" in tbinfo["topo"]["name"]: + clear_acl_counter(rand_unselected_dut) + if asic_type == 'vs': + logger.info("Skip ACL verification on VS platform") + continue + ptfadapter.dataplane.flush() + testutils.send(ptfadapter, pkt=pkt, port_id=src_port_indice) + testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=dst_port_indices, timeout=5) + acl_counter = read_acl_counter(rand_selected_dut, rule) + if "dualtor-aa" in tbinfo["topo"]["name"]: + acl_counter_unselected_dut = read_acl_counter(rand_unselected_dut, rule) + acl_counter += acl_counter_unselected_dut + # Verify acl counter + pytest_assert(acl_counter == 1, "ACL counter for {} didn't increase as expected".format(rule)) diff --git a/tests/acl/null_route/test_null_route_helper.py b/tests/acl/null_route/test_null_route_helper.py index e2f5da0a557..fbea5d5f1d7 100644 --- a/tests/acl/null_route/test_null_route_helper.py +++ b/tests/acl/null_route/test_null_route_helper.py @@ -9,7 +9,7 @@ from ptf.mask import Mask import ptf.packet as scapy -from tests.common.fixtures.ptfhost_utils import remove_ip_addresses, skip_traffic_test # noqa F401 +from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 import ptf.testutils as testutils from tests.common.helpers.assertions import pytest_require from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError @@ -229,12 +229,10 @@ def generate_packet(src_ip, dst_ip, dst_mac): return pkt, exp_pkt -def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_action, skip_traffic_test): # noqa F811 +def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_action): # noqa F811 """ Send packet with ptfadapter and verify if packet is forwarded or dropped as expected. """ - if skip_traffic_test: - return ptfadapter.dataplane.flush() testutils.send(ptfadapter, pkt=pkt, port_id=tx_port) if expected_action == FORWARD: @@ -244,7 +242,7 @@ def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_ def test_null_route_helper(rand_selected_dut, tbinfo, ptfadapter, - apply_pre_defined_rules, setup_ptf, skip_traffic_test): # noqa F811 + apply_pre_defined_rules, setup_ptf): # noqa F811 """ Test case to verify script null_route_helper. Some packets are generated as defined in TEST_DATA and sent to DUT, @@ -280,4 +278,4 @@ def test_null_route_helper(rand_selected_dut, tbinfo, ptfadapter, time.sleep(1) send_and_verify_packet(ptfadapter, pkt, exp_pkt, random.choice(ptf_interfaces), - rx_port, expected_result, skip_traffic_test) + rx_port, expected_result) diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index e97d0fb176d..1831e1343f0 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -19,7 +19,6 @@ from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from tests.common.config_reload import config_reload from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr # noqa F401 from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.utilities import wait_until, get_upstream_neigh_type, get_downstream_neigh_type, check_msg_in_syslog @@ -634,7 +633,7 @@ def setup_rules(self, dut, acl_table, ip_version): """ pass - def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 + def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 """Perform actions after rules have been applied. Args: @@ -664,7 +663,7 @@ def teardown_rules(self, dut): @pytest.fixture(scope="class", autouse=True) def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_entries, tbinfo, - ip_version, conn_graph_facts): # noqa F811 + ip_version, conn_graph_facts): # noqa F811 """Setup/teardown ACL rules for the current set of tests. Args: @@ -704,7 +703,7 @@ def tear_down_acl_rule_single_dut(self, duthost, loganalyzer): duthost, LOG_EXPECT_ACL_RULE_REMOVE_RE) def set_up_acl_rules_single_dut(self, acl_table, - conn_graph_facts, dut_to_analyzer_map, duthost, # noqa F811 + conn_graph_facts, dut_to_analyzer_map, duthost, # noqa F811 ip_version, localhost, populate_vlan_arp_entries, tbinfo): logger.info("{}: ACL rule application started".format(duthost.hostname)) @@ -967,57 +966,57 @@ def expected_mask_routed_packet(self, pkt, ip_version): return exp_pkt - def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage, skip_traffic_test): # noqa F811 + def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage): """Verify that unmatched packets are dropped for ingress.""" if stage == "egress": pytest.skip("Only run for ingress") pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) - def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage, skip_traffic_test): # noqa F811 + def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage): """Verify that default egress rule allow all traffics""" if stage == "ingress": pytest.skip("Only run for egress") pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) def test_source_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward a packet on source IP.""" src_ip = "20.0.0.2" if ip_version == "ipv4" else "60c0:a800::6" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(1) def test_rules_priority_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we respect rule priorites in the forwarding case.""" src_ip = "20.0.0.7" if ip_version == "ipv4" else "60c0:a800::7" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(20) def test_rules_priority_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we respect rule priorites in the drop case.""" src_ip = "20.0.0.3" if ip_version == "ipv4" else "60c0:a800::4" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(7) def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, vlan_name, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version, vlan_name): """Verify that we can match and forward a packet on destination IP.""" dst_ip = DOWNSTREAM_IP_TO_ALLOW[ip_version] \ if direction == "uplink->downlink" else UPSTREAM_IP_TO_ALLOW[ip_version] pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) # Because m0_l3_scenario use differnet IPs, so need to verify different acl rules. if direction == "uplink->downlink": if setup["topo"] == "m0_l3": @@ -1037,13 +1036,13 @@ def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check.append(rule_id) def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, vlan_name, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version, vlan_name): """Verify that we can match and drop a packet on destination IP.""" dst_ip = DOWNSTREAM_IP_TO_BLOCK[ip_version] \ if direction == "uplink->downlink" else UPSTREAM_IP_TO_BLOCK[ip_version] pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) # Because m0_l3_scenario use differnet IPs, so need to verify different acl rules. if direction == "uplink->downlink": if setup["topo"] == "m0_l3": @@ -1063,156 +1062,156 @@ def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check.append(rule_id) def test_source_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop a packet on source IP.""" src_ip = "20.0.0.6" if ip_version == "ipv4" else "60c0:a800::3" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(14) def test_udp_source_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward a UDP packet on source IP.""" src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8" pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(13) def test_udp_source_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop a UDP packet on source IP.""" src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2" pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(26) def test_icmp_source_ip_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop an ICMP packet on source IP.""" src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2" pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(25) def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward an ICMP packet on source IP.""" src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8" pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(12) def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on L4 destination port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x1217) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(9) def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on L4 source port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x120D) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(4) def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on a range of L4 destination ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x123B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(11) def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on a range of L4 source ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x123A) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(10) def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on a range of L4 destination ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(22) def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on a range of L4 source ports.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(17) def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on the IP protocol.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7E) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(5) def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and forward on the TCP flags.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x1B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(6) def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on L4 destination port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(22) def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on L4 source port.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(17) def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on the IP protocol.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7F) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(18) def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on the TCP flags.""" pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x24) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version) counters_sanity_check.append(19) def test_icmp_match_forwarded(self, setup, direction, ptfadapter, - counters_sanity_check, ip_version, skip_traffic_test): # noqa F811 + counters_sanity_check, ip_version): """Verify that we can match and drop on the TCP flags.""" src_ip = "20.0.0.10" if ip_version == "ipv4" else "60c0:a800::10" pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1) - self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version, skip_traffic_test) + self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version) counters_sanity_check.append(29) - def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version, skip_traffic_test): # noqa F811 + def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version): exp_pkt = self.expected_mask_routed_packet(pkt, ip_version) if ip_version == "ipv4": @@ -1220,9 +1219,6 @@ def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_ver else: downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IPv6].dst) - if skip_traffic_test: - return - ptfadapter.dataplane.flush() testutils.send(ptfadapter, self.src_port, pkt) if direction == "uplink->downlink" and downstream_dst_port: @@ -1299,7 +1295,7 @@ class TestAclWithReboot(TestBasicAcl): upon startup. """ - def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 + def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 """Save configuration and reboot after rules are applied. Args: @@ -1344,7 +1340,7 @@ class TestAclWithPortToggle(TestBasicAcl): Verify that ACLs still function as expected after links flap. """ - def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 + def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, conn_graph_facts): # noqa F811 """Toggle ports after rules are applied. Args: diff --git a/tests/acl/test_acl_outer_vlan.py b/tests/acl/test_acl_outer_vlan.py index a9a237b8a08..0c1d0288401 100644 --- a/tests/acl/test_acl_outer_vlan.py +++ b/tests/acl/test_acl_outer_vlan.py @@ -14,7 +14,7 @@ from tests.common.utilities import wait_until from tests.common.config_reload import config_reload from tests.common.helpers.assertions import pytest_assert, pytest_require -from tests.common.fixtures.ptfhost_utils import change_mac_addresses, skip_traffic_test # noqa F401 +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from abc import abstractmethod from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -515,7 +515,7 @@ def setup(self, rand_selected_dut, ptfhost, ip_version, vlan_setup_info): self.post_running_hook(rand_selected_dut, ptfhost, ip_version) def _do_verification(self, ptfadapter, duthost, tbinfo, vlan_setup_info, - ip_version, tagged_mode, action, skip_traffic_test): # noqa F811 + ip_version, tagged_mode, action): # noqa F811 vlan_setup, _, _, _ = vlan_setup_info test_setup_config = self.setup_cfg(duthost, tbinfo, vlan_setup, tagged_mode, ip_version) @@ -556,17 +556,21 @@ def _do_verification(self, ptfadapter, duthost, tbinfo, vlan_setup_info, testutils.send(ptfadapter, port, mac_pkt) table_name = ACL_TABLE_NAME_TEMPLATE.format(stage, ip_version) + asic_type = duthost.facts['asic_type'] + if asic_type == 'vs': + logger.info("Skip ACL verification on VS platform") + return try: self._setup_acl_rules(duthost, stage, ip_version, outer_vlan_id, action) - if not skip_traffic_test: - count_before = get_acl_counter(duthost, table_name, RULE_1, timeout=0) - send_and_verify_traffic(ptfadapter, pkt, exp_pkt, src_port, dst_port, pkt_action=action) - count_after = get_acl_counter(duthost, table_name, RULE_1) - - logger.info("Verify Acl counter incremented {} > {}".format(count_after, count_before)) - pytest_assert(count_after >= count_before + 1, - "Unexpected results, counter_after {} > counter_before {}" - .format(count_after, count_before)) + + count_before = get_acl_counter(duthost, table_name, RULE_1, timeout=0) + send_and_verify_traffic(ptfadapter, pkt, exp_pkt, src_port, dst_port, pkt_action=action) + count_after = get_acl_counter(duthost, table_name, RULE_1) + + logger.info("Verify Acl counter incremented {} > {}".format(count_after, count_before)) + pytest_assert(count_after >= count_before + 1, + "Unexpected results, counter_after {} > counter_before {}" + .format(count_after, count_before)) except Exception as e: raise (e) finally: @@ -574,83 +578,75 @@ def _do_verification(self, ptfadapter, duthost, tbinfo, vlan_setup_info, @pytest.mark.po2vlan def test_tagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on tagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_TAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_TAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_tagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on tagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_TAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_TAGGED, ACTION_DROP) @pytest.mark.po2vlan def test_untagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on untagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_UNTAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_UNTAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_untagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on untagged interface """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_UNTAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_UNTAGGED, ACTION_DROP) @pytest.mark.po2vlan def test_combined_tagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on tagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_TAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_COMBINE_TAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_combined_tagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on tagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_TAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_COMBINE_TAGGED, ACTION_DROP) @pytest.mark.po2vlan def test_combined_untagged_forwarded(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is forwarded by ACL rule on untagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_UNTAGGED, ACTION_FORWARD, skip_traffic_test) + ip_version, TYPE_COMBINE_UNTAGGED, ACTION_FORWARD) @pytest.mark.po2vlan def test_combined_untagged_dropped(self, ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 - skip_traffic_test): # noqa F811 + ip_version, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ Verify packet is dropped by ACL rule on untagged interface, and the interface belongs to two vlans """ self._do_verification(ptfadapter, rand_selected_dut, tbinfo, vlan_setup_info, - ip_version, TYPE_COMBINE_UNTAGGED, ACTION_DROP, skip_traffic_test) + ip_version, TYPE_COMBINE_UNTAGGED, ACTION_DROP) @pytest.fixture(scope='module', autouse=True) diff --git a/tests/acl/test_stress_acl.py b/tests/acl/test_stress_acl.py index bae67cdb873..47244a30bc4 100644 --- a/tests/acl/test_stress_acl.py +++ b/tests/acl/test_stress_acl.py @@ -7,7 +7,6 @@ from collections import defaultdict from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa F401 from tests.common.utilities import wait_until -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 pytestmark = [ pytest.mark.topology("t0", "t1", "m0", "mx"), @@ -119,7 +118,7 @@ def prepare_test_port(rand_selected_dut, tbinfo): def verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, del_rule_id, verity_status, skip_traffic_test): # noqa F811 + acl_rule_list, del_rule_id, verity_status): for acl_id in acl_rule_list: ip_addr1 = acl_id % 256 @@ -146,13 +145,12 @@ def verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, exp_pkt.set_do_not_care_scapy(packet.Ether, 'src') exp_pkt.set_do_not_care_scapy(packet.IP, "chksum") - if not skip_traffic_test: - ptfadapter.dataplane.flush() - testutils.send(test=ptfadapter, port_id=ptf_src_port, pkt=pkt) - if verity_status == "forward" or acl_id == del_rule_id: - testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) - elif verity_status == "drop" and acl_id != del_rule_id: - testutils.verify_no_packet_any(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) + ptfadapter.dataplane.flush() + testutils.send(test=ptfadapter, port_id=ptf_src_port, pkt=pkt) + if verity_status == "forward" or acl_id == del_rule_id: + testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) + elif verity_status == "drop" and acl_id != del_rule_id: + testutils.verify_no_packet_any(test=ptfadapter, pkt=exp_pkt, ports=ptf_dst_ports) def acl_rule_loaded(rand_selected_dut, acl_rule_list): @@ -168,7 +166,7 @@ def acl_rule_loaded(rand_selected_dut, acl_rule_list): def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_file, prepare_test_port, get_function_completeness_level, - toggle_all_simulator_ports_to_rand_selected_tor, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 ptf_src_port, ptf_dst_ports, dut_port = prepare_test_port @@ -186,7 +184,7 @@ def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_ rand_selected_dut.shell(cmd_create_table) acl_rule_list = list(range(1, ACL_RULE_NUMS + 1)) verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, 0, "forward", skip_traffic_test) + acl_rule_list, 0, "forward") try: loops = 0 while loops <= loop_times: @@ -204,7 +202,7 @@ def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_ wait_until(wait_timeout, 2, 0, acl_rule_loaded, rand_selected_dut, acl_rule_list) verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, 0, "drop", skip_traffic_test) + acl_rule_list, 0, "drop") del_rule_id = random.choice(acl_rule_list) rand_selected_dut.shell('sonic-db-cli CONFIG_DB del "ACL_RULE|STRESS_ACL| RULE_{}"'.format(del_rule_id)) @@ -212,7 +210,7 @@ def test_acl_add_del_stress(rand_selected_dut, tbinfo, ptfadapter, prepare_test_ wait_until(wait_timeout, 2, 0, acl_rule_loaded, rand_selected_dut, acl_rule_list) verify_acl_rules(rand_selected_dut, ptfadapter, ptf_src_port, ptf_dst_ports, - acl_rule_list, del_rule_id, "drop", skip_traffic_test) + acl_rule_list, del_rule_id, "drop") loops += 1 finally: From 43ae678c6aeaf6090a824829fc65d6b4ed1030c2 Mon Sep 17 00:00:00 2001 From: Chris <156943338+ccroy-arista@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:54:36 -0800 Subject: [PATCH 018/175] sonic-mgmt: fix info in testbed_vm_info.py err msg (#15249) In ansible/library/testbed_vm_info.py, there is a format string passed to a method without the actual formatting applied, i.e. the '{}'s are printed out rather than the desired information they're supposed to represent. This change passes the error message to the method with the correct formatting applied. --- ansible/library/testbed_vm_info.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ansible/library/testbed_vm_info.py b/ansible/library/testbed_vm_info.py index c5293be7bf1..19b1a485819 100644 --- a/ansible/library/testbed_vm_info.py +++ b/ansible/library/testbed_vm_info.py @@ -129,8 +129,7 @@ def main(): else: err_msg = "Cannot find the vm {} in VM inventory file {}, please make sure you have enough VMs" \ "for the topology you are using." - err_msg.format(vm_name, vm_facts.vm_file) - module.fail_json(msg=err_msg) + module.fail_json(msg=err_msg.format(vm_name, vm_facts.vm_file)) module.exit_json( ansible_facts={'neighbor_eosvm_mgmt': vm_mgmt_ip, 'topoall': vm_facts.topoall}) except (IOError, OSError): From 357c09a398226efe23ef021e55913fb39e8ebf3b Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Thu, 7 Nov 2024 15:49:15 +0800 Subject: [PATCH 019/175] Set default kvm_support to False in ptf_runner (#15380) What is the motivation for this PR? In PR #12598, we set default kvm_support to True in ptf_runner params, but most of tests do not support traffic test in KVM, to minimize code change in testcases, it's better to set default kvm_support to False How did you do it? Set default kvm_support to False and set kvm_support to True in tests currently running ptf_runner How did you verify/test it? --- tests/arp/test_arpall.py | 15 ++++++++++----- tests/bgp/test_bgp_speaker.py | 3 ++- tests/dhcp_relay/test_dhcp_relay.py | 21 ++++++++++++++------- tests/dhcp_relay/test_dhcp_relay_stress.py | 9 ++++++--- tests/dhcp_relay/test_dhcpv6_relay.py | 3 ++- tests/fdb/test_fdb_flush.py | 1 + tests/fdb/test_fdb_mac_expire.py | 3 ++- tests/fdb/test_fdb_mac_learning.py | 1 + tests/ipfwd/test_mtu.py | 3 ++- tests/pc/test_lag_2.py | 3 ++- tests/pc/test_lag_member.py | 3 ++- tests/ptf_runner.py | 3 ++- tests/radv/test_radv_ipv6_ra.py | 12 ++++++++---- 13 files changed, 54 insertions(+), 26 deletions(-) diff --git a/tests/arp/test_arpall.py b/tests/arp/test_arpall.py index b885023b688..11637cacd26 100644 --- a/tests/arp/test_arpall.py +++ b/tests/arp/test_arpall.py @@ -26,7 +26,8 @@ def test_arp_unicast_reply(common_setup_teardown, intfs_for_test, enum_frontend_ clear_dut_arp_cache(duthost, asichost.cli_ns_option) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } log_file = "/tmp/arptest.VerifyUnicastARPReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) ptf_runner(ptfhost, 'ptftests', "arptest.VerifyUnicastARPReply", '/root/ptftests', @@ -45,7 +46,8 @@ def test_arp_expect_reply(common_setup_teardown, intfs_for_test, enum_frontend_a asichost = duthost.asic_instance(enum_frontend_asic_index) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } # Start PTF runner and send correct arp packets @@ -69,7 +71,8 @@ def test_arp_no_reply_other_intf(common_setup_teardown, intfs_for_test, enum_fro clear_dut_arp_cache(duthost, asichost.cli_ns_option) intf2_params = { 'acs_mac': router_mac, - 'port': intf2_indice + 'port': intf2_indice, + 'kvm_support': True } log_file = "/tmp/arptest.SrcOutRangeNoReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) ptf_runner(ptfhost, 'ptftests', "arptest.SrcOutRangeNoReply", '/root/ptftests', @@ -87,7 +90,8 @@ def test_arp_no_reply_src_out_range(common_setup_teardown, intfs_for_test, enum_ asichost = duthost.asic_instance(enum_frontend_asic_index) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } # Check DUT won't reply ARP and install ARP entry when src address is not in interface subnet range @@ -108,7 +112,8 @@ def test_arp_garp_no_update(common_setup_teardown, intfs_for_test, enum_frontend asichost = duthost.asic_instance(enum_frontend_asic_index) params = { 'acs_mac': router_mac, - 'port': intf1_indice + 'port': intf1_indice, + 'kvm_support': True } # Test Gratuitous ARP behavior, no Gratuitous ARP installed when arp was not resolved before diff --git a/tests/bgp/test_bgp_speaker.py b/tests/bgp/test_bgp_speaker.py index 6082f19fc90..28c6e26b1db 100644 --- a/tests/bgp/test_bgp_speaker.py +++ b/tests/bgp/test_bgp_speaker.py @@ -334,7 +334,8 @@ def bgp_speaker_announce_routes_common(common_setup_teardown, tbinfo, duthost, "ipv6": ipv6, "testbed_mtu": mtu, "asic_type": asic_type, - "test_balancing": False}, + "test_balancing": False, + "kvm_support": True}, log_file="/tmp/bgp_speaker_test.FibTest.log", socket_recv_size=16384, is_python3=True) diff --git a/tests/dhcp_relay/test_dhcp_relay.py b/tests/dhcp_relay/test_dhcp_relay.py index 99e622dbe8c..f14db90766d 100644 --- a/tests/dhcp_relay/test_dhcp_relay.py +++ b/tests/dhcp_relay/test_dhcp_relay.py @@ -254,7 +254,8 @@ def test_dhcp_relay_default(ptfhost, dut_dhcp_relay_data, validate_dut_routes_ex "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) if not skip_dhcpmon: time.sleep(36) # dhcpmon debug counter prints every 18 seconds @@ -343,7 +344,8 @@ def test_dhcp_relay_with_source_port_ip_in_relay_enabled(ptfhost, dut_dhcp_relay "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), "testing_mode": testing_mode, - "enable_source_port_ip_in_relay": True}, + "enable_source_port_ip_in_relay": True, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) if not skip_dhcpmon: time.sleep(36) # dhcpmon debug counter prints every 18 seconds @@ -404,7 +406,8 @@ def test_dhcp_relay_after_link_flap(ptfhost, dut_dhcp_relay_data, validate_dut_r "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -460,7 +463,8 @@ def test_dhcp_relay_start_with_uplinks_down(ptfhost, dut_dhcp_relay_data, valida "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -495,7 +499,8 @@ def test_dhcp_relay_unicast_mac(ptfhost, dut_dhcp_relay_data, validate_dut_route "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -529,7 +534,8 @@ def test_dhcp_relay_random_sport(ptfhost, dut_dhcp_relay_data, validate_dut_rout "client_udp_src_port": RANDOM_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) @@ -597,7 +603,8 @@ def test_dhcp_relay_counter(ptfhost, dut_dhcp_relay_data, validate_dut_routes_ex "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dhcp_relay['switch_loopback_ip'], "uplink_mac": str(dhcp_relay['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test_counter.DHCPTest.log", is_python3=True) for type in dhcp_message_types: if type in ["Discover", "Request"]: diff --git a/tests/dhcp_relay/test_dhcp_relay_stress.py b/tests/dhcp_relay/test_dhcp_relay_stress.py index edfd94761ef..d7a69d16ffc 100644 --- a/tests/dhcp_relay/test_dhcp_relay_stress.py +++ b/tests/dhcp_relay/test_dhcp_relay_stress.py @@ -55,7 +55,8 @@ def test_dhcp_relay_restart_with_stress(ptfhost, dut_dhcp_relay_data, validate_d "uplink_mac": str(dut_dhcp_relay_data[0]['uplink_mac']), "testing_mode": testing_mode, "duration": duration, - "pps": pps}, + "pps": pps, + "kvm_support": True}, log_file="/tmp/dhcp_relay_stress_test.DHCPContinuousStressTest.log", is_python3=True, async_mode=True) @@ -95,7 +96,8 @@ def _check_socket_buffer(): "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT, "switch_loopback_ip": dut_dhcp_relay_data[0]['switch_loopback_ip'], "uplink_mac": str(dut_dhcp_relay_data[0]['uplink_mac']), - "testing_mode": testing_mode}, + "testing_mode": testing_mode, + "kvm_support": True}, log_file="/tmp/dhcp_relay_test.stress.DHCPTest.log", is_python3=True) @@ -162,7 +164,8 @@ def test_dhcp_relay_stress(ptfhost, ptfadapter, dut_dhcp_relay_data, validate_du "uplink_mac": str(dhcp_relay['uplink_mac']), "packets_send_duration": packets_send_duration, "client_packets_per_sec": client_packets_per_sec, - "testing_mode": testing_mode + "testing_mode": testing_mode, + "kvm_support": True } count_file = '/tmp/dhcp_stress_test_{}.json'.format(dhcp_type) diff --git a/tests/dhcp_relay/test_dhcpv6_relay.py b/tests/dhcp_relay/test_dhcpv6_relay.py index e06a6d7057d..2446727f710 100644 --- a/tests/dhcp_relay/test_dhcpv6_relay.py +++ b/tests/dhcp_relay/test_dhcpv6_relay.py @@ -324,7 +324,8 @@ def test_dhcpv6_relay_counter(ptfhost, duthosts, rand_one_dut_hostname, dut_dhcp "dut_mac": str(dhcp_relay['uplink_mac']), "vlan_ip": str(dhcp_relay['downlink_vlan_iface']['addr']), "loopback_ipv6": str(dhcp_relay['loopback_ipv6']), - "is_dualtor": str(dhcp_relay['is_dualtor'])}, + "is_dualtor": str(dhcp_relay['is_dualtor']), + "kvm_support": True}, log_file="/tmp/dhcpv6_relay_test.DHCPCounterTest.log", is_python3=True) for type in message_types: diff --git a/tests/fdb/test_fdb_flush.py b/tests/fdb/test_fdb_flush.py index 5c7670bb355..82594dcf620 100644 --- a/tests/fdb/test_fdb_flush.py +++ b/tests/fdb/test_fdb_flush.py @@ -324,6 +324,7 @@ def dynamic_fdb_oper(self, duthost, tbinfo, ptfhost, create_or_clear): "router_mac": duthost.facts["router_mac"], "fdb_info": self.FDB_INFO_FILE, "dummy_mac_prefix": self.DUMMY_MAC_PREFIX, + "kvm_support": True } self.__runPtfTest(ptfhost, "fdb_flush_test.FdbFlushTest", testParams) elif 'clear' == create_or_clear: diff --git a/tests/fdb/test_fdb_mac_expire.py b/tests/fdb/test_fdb_mac_expire.py index 4f340f46c1f..e98e86c2d7a 100644 --- a/tests/fdb/test_fdb_mac_expire.py +++ b/tests/fdb/test_fdb_mac_expire.py @@ -222,7 +222,8 @@ def testFdbMacExpire(self, request, tbinfo, rand_selected_dut, ptfhost, refresh_ "fdb_info": self.FDB_INFO_FILE, "dummy_mac_prefix": self.DUMMY_MAC_PREFIX, "refresh_type": refresh_type, - "aging_time": fdbAgingTime + "aging_time": fdbAgingTime, + "kvm_support": True } self.__runPtfTest(ptfhost, "fdb_mac_expire_test.FdbMacExpireTest", testParams) diff --git a/tests/fdb/test_fdb_mac_learning.py b/tests/fdb/test_fdb_mac_learning.py index a8bf4243e8e..e8f192243b4 100644 --- a/tests/fdb/test_fdb_mac_learning.py +++ b/tests/fdb/test_fdb_mac_learning.py @@ -167,6 +167,7 @@ def dynamic_fdb_oper(self, duthost, tbinfo, ptfhost, dut_ptf_ports): "router_mac": duthost.facts["router_mac"], "dut_ptf_ports": dut_ptf_ports, "dummy_mac_prefix": self.DUMMY_MAC_PREFIX, + "kvm_support": True } self.__runPtfTest(ptfhost, "fdb_mac_learning_test.FdbMacLearningTest", testParams) diff --git a/tests/ipfwd/test_mtu.py b/tests/ipfwd/test_mtu.py index b80c2489379..b8351a3bd59 100644 --- a/tests/ipfwd/test_mtu.py +++ b/tests/ipfwd/test_mtu.py @@ -35,7 +35,8 @@ def test_mtu(tbinfo, ptfhost, mtu, gather_facts): "src_router_ipv6": gather_facts['src_router_ipv6'], "dst_host_ipv6": gather_facts['dst_host_ipv6'], "src_ptf_port_list": gather_facts['src_port_ids'], - "dst_ptf_port_list": gather_facts['dst_port_ids'] + "dst_ptf_port_list": gather_facts['dst_port_ids'], + "kvm_support": True }, log_file=log_file, socket_recv_size=16384, diff --git a/tests/pc/test_lag_2.py b/tests/pc/test_lag_2.py index cff700c924b..f50e1b1cd1a 100644 --- a/tests/pc/test_lag_2.py +++ b/tests/pc/test_lag_2.py @@ -100,7 +100,8 @@ def __verify_lag_lacp_timing(self, lacp_timer, exp_iface): 'timeout': 35, 'packet_timing': lacp_timer, 'ether_type': 0x8809, - 'interval_count': 3 + 'interval_count': 3, + 'kvm_support': True } ptf_runner(self.ptfhost, 'acstests', "lag_test.LacpTimingTest", '/root/ptftests', params=params, is_python3=True) diff --git a/tests/pc/test_lag_member.py b/tests/pc/test_lag_member.py index 7f53c358d9f..b39009adb2c 100644 --- a/tests/pc/test_lag_member.py +++ b/tests/pc/test_lag_member.py @@ -452,7 +452,8 @@ def run_lag_member_traffic_test(duthost, dut_vlan, ptf_ports, ptfhost): "dut_mac": duthost.facts["router_mac"], "dut_vlan": dut_vlan, "ptf_lag": ptf_lag, - ATTR_PORT_NOT_BEHIND_LAG: ptf_not_lag + ATTR_PORT_NOT_BEHIND_LAG: ptf_not_lag, + "kvm_support": True } ptf_runner(ptfhost, 'acstests', "lag_test.LagMemberTrafficTest", "/root/ptftests", params=params, is_python3=True) diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py index 7fc6e5a2471..ab87aa8e4b3 100644 --- a/tests/ptf_runner.py +++ b/tests/ptf_runner.py @@ -105,7 +105,8 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={}, module_ignore_errors=False, is_python3=None, async_mode=False, pdb=False): dut_type = get_dut_type(host) - if dut_type == "kvm" and params.get("kvm_support", True) is False: + kvm_support = params.get("kvm_support", False) + if dut_type == "kvm" and kvm_support is False: logger.info("Skip test case {} for not support on KVM DUT".format(testname)) return True diff --git a/tests/radv/test_radv_ipv6_ra.py b/tests/radv/test_radv_ipv6_ra.py index 8f453bb68fe..aff361fa3aa 100644 --- a/tests/radv/test_radv_ipv6_ra.py +++ b/tests/radv/test_radv_ipv6_ra.py @@ -134,7 +134,8 @@ def test_radv_router_advertisement( "downlink_vlan_mac": vlan_intf['downlink_vlan_intf']['mac'], "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], - "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS}, + "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS, + "kvm_support": True}, log_file="/tmp/radv_ipv6_ra_test.RadvUnSolicitedRATest.log", is_python3=True) @@ -161,7 +162,8 @@ def test_solicited_router_advertisement( "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], "ptf_port_ip6": vlan_intf['ptf_port']['ip6'], - "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS}, + "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS, + "kvm_support": True}, log_file="/tmp/radv_ipv6_ra_test.RadvSolicitedRATest.log", is_python3=True) @@ -187,7 +189,8 @@ def test_unsolicited_router_advertisement_with_m_flag( "downlink_vlan_mac": vlan_intf['downlink_vlan_intf']['mac'], "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], - "max_ra_interval": 180}, + "max_ra_interval": 180, + "kvm_support": True}, log_file="/tmp/router_adv_mflag_test.RadvUnSolicitedRATest.log", is_python3=True) @@ -214,5 +217,6 @@ def test_solicited_router_advertisement_with_m_flag( "downlink_vlan_ip6": vlan_intf['downlink_vlan_intf']['ip6'], "ptf_port_index": vlan_intf['ptf_port']['port_idx'], "ptf_port_ip6": vlan_intf['ptf_port']['ip6'], - "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS}, + "max_ra_interval": RADV_MAX_RA_INTERVAL_SECS, + "kvm_support": True}, log_file="/tmp/router_adv_mflag_test.RadvSolicitedRATest.log", is_python3=True) From 3597c7cdd8044c097f113ca744a85fc6e336ac45 Mon Sep 17 00:00:00 2001 From: Rajendra Kumar Thirumurthi Date: Thu, 7 Nov 2024 01:12:05 -0800 Subject: [PATCH 020/175] ACL testcase fix for Cisco 8122 Platform (#15404) * ACL testcase fix for Cisco 8122 Platform * ACL testcase fix for Cisco 8122 Platform --- tests/acl/test_acl.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index 1831e1343f0..2908cfc2038 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -806,10 +806,8 @@ def counters_sanity_check(self, duthosts, acl_rules, acl_table): continue counters_after[PACKETS_COUNT] += acl_facts[duthost]['after'][rule][PACKETS_COUNT] counters_after[BYTES_COUNT] += acl_facts[duthost]['after'][rule][BYTES_COUNT] - if (duthost.facts["hwsku"] == "Cisco-8111-O64" or - duthost.facts["hwsku"] == "Cisco-8111-O32" or - duthost.facts["hwsku"] == "Cisco-8111-C32" or - duthost.facts["hwsku"] == "Cisco-8111-O62C2"): + if duthost.facts["platform"] in ["x86_64-8111_32eh_o-r0", + "x86_64-8122_64eh_o-r0", "x86_64-8122_64ehf_o-r0"]: skip_byte_accounting = True logger.info("Counters for ACL rule \"{}\" after traffic:\n{}" From 730b79b9b0adcb197eef471f30157e6a9704baff Mon Sep 17 00:00:00 2001 From: rbpittman Date: Thu, 7 Nov 2024 04:17:01 -0500 Subject: [PATCH 021/175] Revise after XON counter change. (#15400) --- tests/saitests/py3/sai_qos_tests.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index eb3373596b4..2d649a4221e 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -3504,16 +3504,7 @@ def get_pfc_tx_cnt(src_port_id, pg_cntr_idx): pfc_tx_cnt_base = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) time.sleep(2) xoff_txd = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) - pfc_tx_cnt_base - print("Verifying XOFF TX, count {}".format(xoff_txd), file=sys.stderr) - assert xoff_txd != 0, "Expected XOFF" - - # TODO: Revisit when TX counter in this case is correctly handled - send_packet(self, src_port_id, pkt, 1) - time.sleep(2) - pfc_tx_cnt_base = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) - time.sleep(2) - xoff_txd = get_pfc_tx_cnt(src_port_id, pg_cntr_idx) - pfc_tx_cnt_base - print("Verifying XOFF TX stopped, count {}".format(xoff_txd), file=sys.stderr) + print("Verifying no XOFF TX, count {}".format(xoff_txd), file=sys.stderr) assert xoff_txd == 0, "Unexpected XOFF" finally: From fe375aa2aff288d7b3c579c6215aca4793ee2922 Mon Sep 17 00:00:00 2001 From: wenyiz2021 <91497961+wenyiz2021@users.noreply.github.com> Date: Thu, 7 Nov 2024 02:51:04 -0800 Subject: [PATCH 022/175] Update test_chassis_reboot.py (#15422) Description of PR Summary: Fixes # (issue) Fix the issue when running dut.command("reboot"), when some T2 platform running the command itself can exceed ansible timeout we defined in ansible.cfg that is 60sec. In this case, test will error out saying "Host unreachable", but it's actually due to ansible command timeout. Approach What is the motivation for this PR? Avoid testcase failure that is command runtime dependent How did you do it? Thread the reboot command, so even it timeout in a single thread, main thread is fine How did you verify/test it? Before: 03/11/2024 12:04:25 test_chassis_reboot.chassis_cold_reboot L0027 INFO | Run cold reboot on 03/11/2024 12:04:25 base._run L0071 DEBUG | /var/src/sonic-mgmt_8800-1_66b4a53de4614bccc2e74f8c/tests/common/devices/multi_asic.py::_run_on_asics#135: [8800-lc4] AnsibleModule::command, args=["reboot"], kwargs={} 03/11/2024 12:05:24 transport._log L1873 DEBUG | EOF in transport thread 03/11/2024 12:05:24 __init__.pytest_runtest_call L0040 ERROR | Traceback (most recent call last): File "/usr/local/lib/python3.8/dist-packages/_pytest/python.py", line 1788, in runtest self.ihook.pytest_pyfunc_call(pyfuncitem=self) File "/usr/local/lib/python3.8/dist-packages/pluggy/_hooks.py", line 513, in __call__ return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult) File "/usr/local/lib/python3.8/dist-packages/pluggy/_manager.py", line 120, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "/usr/local/lib/python3.8/dist-packages/pluggy/_callers.py", line 139, in _multicall raise exception.with_traceback(exception.__traceback__) File "/usr/local/lib/python3.8/dist-packages/pluggy/_callers.py", line 103, in _multicall res = hook_impl.function(*args) File "/usr/local/lib/python3.8/dist-packages/_pytest/python.py", line 194, in pytest_pyfunc_call result = testfunction(**testargs) File "/var/src/sonic-mgmt_t2-8800-1_66b4a53de4614bccc2e74f8c/tests/platform_tests/test_chassis_reboot.py", line 70, in test_parallel_reboot chassis_cold_reboot(dut, localhost) File "/var/src/sonic-mgmt_t2-8800-1_66b4a53de4614bccc2e74f8c/tests/platform_tests/test_chassis_reboot.py", line 28, in chassis_cold_reboot dut.command("reboot") File "/var/src/sonic-mgmt-t2-8800-1_66b4a53de4614bccc2e74f8c/tests/common/devices/multi_asic.py", line 135, in _run_on_asics return getattr(self.sonichost, self.multi_asic_attr)(*module_args, **complex_args) File "/var/src/sonic-mgmt-t2-8800-1_66b4a53de4614bccc2e74f8c/tests/common/devices/base.py", line 105, in _run res = self.module(*module_args, **complex_args)[self.hostname] File "/usr/local/lib/python3.8/dist-packages/pytest_ansible/module_dispatcher/v213.py", line 232, in _run raise AnsibleConnectionFailure( pytest_ansible.errors.AnsibleConnectionFailure: Host unreachable in the inventory After: ----------------------------------------------------------------------------------------- live log sessionfinish -----------------------------------------------------------------------------------------05:47:44 __init__.pytest_terminal_summary L0067 INFO | Can not get Allure report URL. Please check logs =============================================================================== 1 passed, 1 warning in 2017.89s (0:33:37) ================================================================================DEBUG:tests.conftest:[log_custom_msg] item: DEBUG:tests.conftest:append custom_msg: {'dut_check_result': {'core_dump_check_pass': True, 'config_db_check_pass': False}} Any platform specific information? The issue is seen on Cisco T2 that takes more time to reboot. But is a general enhancement. co-authorized by: jianquanye@microsoft.com --- tests/platform_tests/test_chassis_reboot.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/platform_tests/test_chassis_reboot.py b/tests/platform_tests/test_chassis_reboot.py index 30d2bc5d255..9d872818529 100644 --- a/tests/platform_tests/test_chassis_reboot.py +++ b/tests/platform_tests/test_chassis_reboot.py @@ -5,6 +5,7 @@ import random import logging import time +import concurrent.futures from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from tests.common.reboot import wait_for_startup,\ @@ -59,15 +60,16 @@ def test_parallel_reboot(duthosts, localhost, conn_graph_facts, xcvr_skip_list): core_dumps = {} # Perform reboot on multiple LCs within 30sec - for dut in duthosts: - if dut.is_supervisor_node(): - continue + executor = concurrent.futures.ThreadPoolExecutor(max_workers=8) + for dut in duthosts.frontend_nodes: # collect core dump before reboot core_dumps[dut.hostname] = get_core_dump(dut) # Perform cold reboot on all linecards, with an internal within 30sec to mimic a parallel reboot scenario - chassis_cold_reboot(dut, localhost) + # Change this to threaded reboot, to avoid ansible command timeout in 60sec, we have seen some T2 platform + # reboot exceed 60 sec, and causes test to error out + executor.submit(chassis_cold_reboot, dut, localhost) # Wait for 0 ~ 30sec rand_interval = random.randint(0, 30) @@ -88,9 +90,7 @@ def test_parallel_reboot(duthosts, localhost, conn_graph_facts, xcvr_skip_list): "Not all BGP sessions are established on DUT") # Check if new core dumps are generated - for dut in duthosts: - if dut.is_supervisor_node(): - continue + for dut in duthosts.frontend_nodes: post_core_dump = get_core_dump(dut) new_core_dumps = (set(post_core_dump) - set(core_dumps[dut.hostname])) From 829ca01441bcc3c44f4baf3dc3c8e48cc5d9a474 Mon Sep 17 00:00:00 2001 From: Perumal Venkatesh Date: Thu, 7 Nov 2024 03:37:39 -0800 Subject: [PATCH 023/175] Adding log analyzer ignore messages (#15420) Description of PR Adding log analyzer ignore messages based on Cisco Chassis. These messages are harmless messages. Adding it to loganalyzer ignore file WARNING kernel: [55926.179286] pcieport 0000:0a:00.0: device [10b5:8713] error status/mask=00002001/0000e000 ERR kernel: [39978.088854] cisco-fpga-p2pm-m-slot p2pm-m-slot.2: cisco_fpga_select_new_acpi_companion: searching for child status0 0x2420017a; fpga_id 0x42 ERR kernel: [ 8.160032] cisco-fpga-pci 0000:5f:00.0: cisco_fpga_select_new_acpi_companion: searching for child status0 0x26100179; fpga_id 0x61 create a loganalyzer ignore Approach What is the motivation for this PR? To ignore harmless log messages that can cause testcase errors. How did you do it? Adding the messages as part of log analyzer ignore file co-authorized by: jianquanye@microsoft.com --- .../files/tools/loganalyzer/loganalyzer_common_ignore.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index b30a4de578e..1dfafae8765 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -285,6 +285,10 @@ r, ".* INFO .*[duty_cycle_map]: illegal pwm value .*" r, ".* INFO .*command '/usr/sbin/smartctl' failed: [116] Stale file handle.*" r, ".* INFO healthd.*Key 'TEMPERATURE_INFO|ASIC' field 'high_threshold' unavailable in database 'STATE_DB'.*" r, ".* INFO healthd.*Key 'TEMPERATURE_INFO|ASIC' field 'temperature' unavailable in database 'STATE_DB'.*" +r, ".* ERR kernel:.*cisco-fpga-p2pm-m-slot p2pm-m-slot\.\d+: cisco_fpga_select_new_acpi_companion: searching for child status\d+ 0x[0-9a-f]+; fpga_id 0x[0-9a-f]+.*" +r, ".* ERR kernel:.*cisco-fpga-pci \d+:\d+:\d+\.\d+: cisco_fpga_select_new_acpi_companion: searching for child status\d+ 0x[0-9a-f]+; fpga_id 0x[0-9a-f]+.*" +r, ".* WARNING kernel:.*pcieport.*device.*error.*status/mask=.*" + # Ignore rsyslog librelp error if rsyslogd on host or container is down or going down r, ".* ERR .*#rsyslogd: librelp error 10008 forwarding to server .* - suspending.*" From e64bb719d18c02dd9b6d8722a46e651e1659ba26 Mon Sep 17 00:00:00 2001 From: Yatish Date: Thu, 7 Nov 2024 10:56:07 -0800 Subject: [PATCH 024/175] =?UTF-8?q?[Chassis]=20Added=20new=20testcase=20fo?= =?UTF-8?q?r=20checking=20console=20connections=20from=20supervisor=20t?= =?UTF-8?q?=E2=80=A6=20(#15198)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Approach What is the motivation for this PR? Testgap - #5084 How did you do it? How did you verify/test it? By running on Cisco and Arista T2 chassis Any platform specific information? Arista and Cisco --- tests/common/helpers/console_helper.py | 38 +++++++++ .../dut_console/test_console_chassis_conn.py | 82 +++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 tests/dut_console/test_console_chassis_conn.py diff --git a/tests/common/helpers/console_helper.py b/tests/common/helpers/console_helper.py index 58bf82326aa..37f4f95997e 100644 --- a/tests/common/helpers/console_helper.py +++ b/tests/common/helpers/console_helper.py @@ -1,5 +1,6 @@ import pytest import pexpect +import re def assert_expect_text(client, text, target_line, timeout_sec=0.1): @@ -22,3 +23,40 @@ def create_ssh_client(ip, user, pwd): def ensure_console_session_up(client, line): client.expect_exact('Successful connection to line [{}]'.format(line)) client.expect_exact('Press ^A ^X to disconnect') + + +def get_target_lines(duthost): + """ + retrieve the indices of online line cards. + Returns a list of indices of the line cards that are online. + """ + result = duthost.shell("show chassis module status", module_ignore_errors=True) + lines = result['stdout'].splitlines() + linecards = [] + + # Pattern to match lines that have a "LINE-CARD" entry and "Online" in the Oper-Status column + linecard_pattern = re.compile(r"^\s*(LINE-CARD\d+)\s+.*?\s+\d+\s+Online\s+up\s+\S+") + + for line in lines: + match = linecard_pattern.match(line) + if match: + linecard_name = match.group(1) + index = linecard_name.split("LINE-CARD")[1] + linecards.append(index) + + if not linecards: + pytest.fail("No line cards are online.") + + return linecards + + +def handle_pexpect_exceptions(target_line): + """Handle pexpect exceptions during console interactions.""" + try: + yield + except pexpect.exceptions.EOF: + pytest.fail(f"EOF reached during console interaction for line {target_line}.") + except pexpect.exceptions.TIMEOUT: + pytest.fail(f"Timeout reached during console interaction for line {target_line}.") + except Exception as e: + pytest.fail(f"Error occured during console interaction for line {target_line}: {e}") diff --git a/tests/dut_console/test_console_chassis_conn.py b/tests/dut_console/test_console_chassis_conn.py new file mode 100644 index 00000000000..49e05babb41 --- /dev/null +++ b/tests/dut_console/test_console_chassis_conn.py @@ -0,0 +1,82 @@ +import pexpect +import pytest +import time + +from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.console_helper import get_target_lines, handle_pexpect_exceptions + +pytestmark = [ + pytest.mark.topology("t2") # Test is only for T2 Chassis +] + + +def test_console_availability_serial_ports(duthost, duthosts, creds, enum_supervisor_dut_hostname): + + duthost = duthosts[enum_supervisor_dut_hostname] + dutip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] + dutuser, dutpass = creds['sonicadmin_user'], creds['sonicadmin_password'] + + target_lines = get_target_lines(duthost) # List of Serial port numbers connected from supervisor to linecards + + for target_line in target_lines: + if 'arista' in duthost.facts['hwsku'].lower(): + console_command = f"sudo /usr/bin/picocom /dev/ttySCD{target_line}" + try: + client = pexpect.spawn('ssh {}@{} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' + .format(dutuser, dutip)) + client.expect('[Pp]assword:') + client.sendline(dutpass) + client.sendline(console_command) + time.sleep(5) + client.sendline('\n') + client.expect(['login:'], timeout=20) + client.sendline(dutuser) + client.expect(['[Pp]assword:'], timeout=10) + client.sendline(dutpass) + + i = client.expect([r'.*Software\s+for\s+Open\s+Networking\s+in\s+the\s+Cloud.*', + 'Login incorrect'], timeout=100) + pytest_assert(i == 0, + f"Failed to connect to line card {target_line} " + "on Arista device. Please check credentials.") + + client.sendline('exit') + time.sleep(2) + client.sendcontrol('a') + time.sleep(2) + client.sendcontrol('x') + except Exception as e: + handle_pexpect_exceptions(target_line)(e) + + elif 'cisco' in duthost.facts['hwsku'].lower(): + console_command = f"sudo /opt/cisco/bin/rconsole.py -s {target_line}" + try: + client = pexpect.spawn('ssh {}@{} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null' + .format(dutuser, dutip)) + client.expect('[Pp]assword:') + client.sendline(dutpass) + time.sleep(10) + client.sendline(console_command) + time.sleep(10) + client.sendline(dutuser) + client.expect(['[Pp]assword:'], timeout=10) + time.sleep(10) + client.sendline(dutpass) + time.sleep(10) + + i = client.expect([r'.*Software\s+for\s+Open\s+Networking\s+in\s+the\s+Cloud.*', + 'Login incorrect'], timeout=100) + pytest_assert(i == 0, + f"Failed to connect to line card {target_line} on Cisco device.Please check credentials.") + + client.sendline('exit') + time.sleep(2) + client.sendcontrol('\\') + time.sleep(2) + client.sendline('quit') + + except Exception as e: + handle_pexpect_exceptions(target_line)(e) + + else: + pytest.skip("Skipping test because test is not supported on this hwsku.") From d5dfb8a7d1ae3decaf3da0a30c0cd6d76e9027c7 Mon Sep 17 00:00:00 2001 From: ansrajpu-git <113939367+ansrajpu-git@users.noreply.github.com> Date: Thu, 7 Nov 2024 14:07:08 -0500 Subject: [PATCH 025/175] [Chassis][Voq][Qos]qos_yaml updated for 400G (#14802) Fixing intermittent failure in lossy queue by adjusting the pkts_num_trig_egr_drp for 'broadcom-dnx' t2 chassis. Updation on the original PR # #14585 Summary: Fixes # (issue) Type of change Bug fix Testbed and Framework(new/improvement) Test case(new/improvement) Back port request 202012 202205 202305 202311 202405 Approach What is the motivation for this PR? How did you do it? Since the pkts_num_leakout is more for 100G.Adjusting the count of pkt sent to trigger egress drop. How did you verify/test it? Executed the qos test and verify. --- tests/qos/files/qos_params.j2c.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/qos/files/qos_params.j2c.yaml b/tests/qos/files/qos_params.j2c.yaml index aa88cee420c..9b569efaca0 100644 --- a/tests/qos/files/qos_params.j2c.yaml +++ b/tests/qos/files/qos_params.j2c.yaml @@ -481,7 +481,7 @@ qos_params: dscp: 8 ecn: 1 pg: 0 - pkts_num_trig_egr_drp: 2179900 + pkts_num_trig_egr_drp: 2179770 pkts_num_margin: 100 wm_pg_shared_lossless: dscp: 3 @@ -497,7 +497,7 @@ qos_params: ecn: 1 pg: 0 pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 2179900 + pkts_num_trig_egr_drp: 2179770 packet_size: 64 cell_size: 4096 pkts_num_margin: 40 @@ -523,7 +523,7 @@ qos_params: ecn: 1 queue: 0 pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 2179900 + pkts_num_trig_egr_drp: 2179770 cell_size: 4096 wm_buf_pool_lossy: dscp: 8 From db8b51765bb33363d455e9cd0aa4835871112de4 Mon Sep 17 00:00:00 2001 From: smagarwal-arista <160662020+smagarwal-arista@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:18:04 -0500 Subject: [PATCH 026/175] Retry test_psu.py::TestPsuApi::test_power if power not within tolerance (#14788) * Retry test_power calculated power above 10% If the test fails on `abs(power - (voltage*current)) < power*0.1` then retry the test. Test is repeated for a maximum of three times until it passes, else it is failed. This change is introduced to account for the stringent tolerance level of 10% and the errors that might be caused due to time differences reading each of the parameters. * Retry test_power.py::TestPsuApi::test_power if power above tolerance If the test fails on `abs(power - (voltage*current)) < power*0.1` then retry the test. The test is repeated for a maximum of three times until it passes. This change is introduced to account for the stringent tolerance level of 10% and the errors that might be caused due to time differences in reading each of the parameters. * Add check to detect occurrence of a failure before power calculation * Resolve pre-commit check issue * Refactor test_power function to improve readability * Use wait_until function for retry --- .../api/platform_api_test_base.py | 3 + tests/platform_tests/api/test_psu.py | 99 ++++++++++--------- 2 files changed, 54 insertions(+), 48 deletions(-) diff --git a/tests/platform_tests/api/platform_api_test_base.py b/tests/platform_tests/api/platform_api_test_base.py index 0550242eaa6..cb183d1adc6 100644 --- a/tests/platform_tests/api/platform_api_test_base.py +++ b/tests/platform_tests/api/platform_api_test_base.py @@ -30,3 +30,6 @@ def assert_expectations(self): # TODO: When we move to Python 3.3+, we can use self.failed_expectations.clear() instead del self.failed_expectations[:] pytest_assert(False, err_msg) + + def get_len_failed_expectations(self): + return len(self.failed_expectations) diff --git a/tests/platform_tests/api/test_psu.py b/tests/platform_tests/api/test_psu.py index 877187b40d5..b9ad83b1ea2 100644 --- a/tests/platform_tests/api/test_psu.py +++ b/tests/platform_tests/api/test_psu.py @@ -6,7 +6,7 @@ from tests.common.utilities import skip_release from tests.platform_tests.cli.util import get_skip_mod_list from .platform_api_test_base import PlatformApiTestBase -from tests.common.utilities import skip_release_for_platform +from tests.common.utilities import skip_release_for_platform, wait_until ################################################### @@ -89,6 +89,16 @@ def skip_absent_psu(self, psu_num, platform_api_conn): return True return False + def get_psu_parameter(self, psu_info, psu_parameter, get_data, message): + data = None + is_supported = self.get_psu_facts(psu_info["duthost"], psu_info["psu_id"], True, psu_parameter) + if is_supported: + data = get_data(psu_info["api"], psu_info["psu_id"]) + if self.expect(data is not None, "Failed to retrieve {} of PSU {}".format(message, psu_info["psu_id"])): + self.expect(isinstance(data, float), "PSU {} {} appears incorrect".format(psu_info["psu_id"], message)) + + return data + # # Functions to test methods inherited from DeviceBase class # @@ -204,68 +214,61 @@ def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, plat ''' PSU power test ''' duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["arista"]) + voltage = current = power = None + psu_info = { + "duthost": duthost, + "api": platform_api_conn, + "psu_id": None + } + + def check_psu_power(failure_count): + nonlocal voltage + nonlocal current + nonlocal power + voltage = self.get_psu_parameter(psu_info, "voltage", psu.get_voltage, "voltage") + current = self.get_psu_parameter(psu_info, "current", psu.get_current, "current") + power = self.get_psu_parameter(psu_info, "power", psu.get_power, "power") + + failure_occured = self.get_len_failed_expectations() > failure_count + if current and voltage and power: + is_within_tolerance = abs(power - (voltage*current)) < power*0.1 + if not failure_occured and not is_within_tolerance: + return False + + self.expect(is_within_tolerance, "PSU {} reading does not make sense \ + (power:{}, voltage:{}, current:{})".format(psu_id, power, voltage, current)) + + return True for psu_id in range(self.num_psus): + failure_count = self.get_len_failed_expectations() + psu_info['psu_id'] = psu_id name = psu.get_name(platform_api_conn, psu_id) if name in self.psu_skip_list: logger.info("skipping check for {}".format(name)) else: - voltage = None - voltage_supported = self.get_psu_facts(duthost, psu_id, True, "voltage") - if voltage_supported: - voltage = psu.get_voltage(platform_api_conn, psu_id) - if self.expect(voltage is not None, "Failed to retrieve voltage of PSU {}".format(psu_id)): - self.expect(isinstance(voltage, float), "PSU {} voltage appears incorrect".format(psu_id)) - current = None - current_supported = self.get_psu_facts(duthost, psu_id, True, "current") - if current_supported: - current = psu.get_current(platform_api_conn, psu_id) - if self.expect(current is not None, "Failed to retrieve current of PSU {}".format(psu_id)): - self.expect(isinstance(current, float), "PSU {} current appears incorrect".format(psu_id)) - power = None - power_supported = self.get_psu_facts(duthost, psu_id, True, "power") - if power_supported: - power = psu.get_power(platform_api_conn, psu_id) - if self.expect(power is not None, "Failed to retrieve power of PSU {}".format(psu_id)): - self.expect(isinstance(power, float), "PSU {} power appears incorrect".format(psu_id)) - max_supp_power = None - max_power_supported = self.get_psu_facts(duthost, psu_id, True, "max_power") - if max_power_supported: - max_supp_power = psu.get_maximum_supplied_power(platform_api_conn, psu_id) - if self.expect(max_supp_power is not None, - "Failed to retrieve maximum supplied power power of PSU {}".format(psu_id)): - self.expect(isinstance(max_supp_power, float), - "PSU {} maximum supplied power appears incorrect".format(psu_id)) - - if current is not None and voltage is not None and power is not None: - self.expect(abs(power - (voltage*current)) < power*0.1, "PSU {} reading does not make sense \ - (power:{}, voltage:{}, current:{})".format(psu_id, power, voltage, current)) + check_result = wait_until(30, 10, 0, check_psu_power, failure_count) + self.expect(check_result, "PSU {} reading does not make sense \ + (power:{}, voltage:{}, current:{})".format(psu_id, power, voltage, current)) + + self.get_psu_parameter(psu_info, "max_power", psu.get_maximum_supplied_power, + "maximum supplied power") powergood_status = psu.get_powergood_status(platform_api_conn, psu_id) if self.expect(powergood_status is not None, "Failed to retrieve operational status of PSU {}".format(psu_id)): self.expect(powergood_status is True, "PSU {} is not operational".format(psu_id)) - high_threshold = None - voltage_high_threshold_supported = self.get_psu_facts(duthost, psu_id, True, "voltage_high_threshold") - if voltage_high_threshold_supported: - high_threshold = psu.get_voltage_high_threshold(platform_api_conn, psu_id) - if self.expect(high_threshold is not None, - "Failed to retrieve the high voltage threshold of PSU {}".format(psu_id)): - self.expect(isinstance(high_threshold, float), - "PSU {} voltage high threshold appears incorrect".format(psu_id)) - low_threshold = None - voltage_low_threshold_supported = self.get_psu_facts(duthost, psu_id, True, "voltage_low_threshold") - if voltage_low_threshold_supported: - low_threshold = psu.get_voltage_low_threshold(platform_api_conn, psu_id) - if self.expect(low_threshold is not None, - "Failed to retrieve the low voltage threshold of PSU {}".format(psu_id)): - self.expect(isinstance(low_threshold, float), - "PSU {} voltage low threshold appears incorrect".format(psu_id)) - if high_threshold is not None and low_threshold is not None: + high_threshold = self.get_psu_parameter(psu_info, "voltage_high_threshold", + psu.get_voltage_high_threshold, "high voltage threshold") + low_threshold = self.get_psu_parameter(psu_info, "voltage_low_threshold", + psu.get_voltage_low_threshold, "low voltage threshold") + + if high_threshold and low_threshold: self.expect(voltage < high_threshold and voltage > low_threshold, "Voltage {} of PSU {} is not in between {} and {}" .format(voltage, psu_id, low_threshold, high_threshold)) + self.assert_expectations() def test_temperature(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): From 6a3d8e57eaf3bd6d5a08d016fb1c52b5c0e8a9ed Mon Sep 17 00:00:00 2001 From: wumiao_nokia Date: Thu, 7 Nov 2024 18:12:45 -0500 Subject: [PATCH 027/175] Multi-asic Support for Telemetry Test (#14982) Description of PR Add multi-asic support for telemetry test. This does not include multi-asic support for events (bgp, swss etc) as there are issues with multi-asic support on server side. A different PR #13826 will handle telemetry event multi-asic support on mgmt. Approach What is the motivation for this PR? Multi-asic support for telemetry test How did you do it? How did you verify/test it? Whole telemetry OC testd pass with the changes (exclude events test). co-authorized by: jianquanye@microsoft.com --- .azure-pipelines/pr_test_scripts.yaml | 2 ++ tests/common/devices/multi_asic.py | 16 +++++++--- tests/common/devices/sonic.py | 11 +++++-- .../tests_mark_conditions.yaml | 3 +- tests/telemetry/telemetry_utils.py | 10 +++++-- tests/telemetry/test_telemetry.py | 29 +++++++++++++------ 6 files changed, 51 insertions(+), 20 deletions(-) diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index 3d48dfbef25..af74bf44aa0 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -447,6 +447,8 @@ multi-asic-t1-lag: - process_monitoring/test_critical_process_monitoring.py - container_checker/test_container_checker.py - http/test_http_copy.py + - telemetry/test_telemetry_cert_rotation.py + - telemetry/test_telemetry.py dpu: - dash/test_dash_vnet.py diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py index df1eaf1b7b7..6f541c201af 100644 --- a/tests/common/devices/multi_asic.py +++ b/tests/common/devices/multi_asic.py @@ -286,6 +286,12 @@ def get_linux_ip_cmd_for_namespace(self, cmd, namespace): ns_cmd = cmd.replace('ip', 'ip -n {}'.format(namespace)) return ns_cmd + def get_cli_cmd_for_namespace(self, cmd, namespace): + if not namespace: + return cmd + ns_cmd = cmd.replace('sonic-db-cli', 'sonic-db-cli -n {}'.format(namespace)) + return ns_cmd + @property def ttl_decr_value(self): """ @@ -520,9 +526,10 @@ def modify_syslog_rate_limit(self, feature, rl_option='disable'): cmds.append(cmd_reload.format(docker)) self.sonichost.shell_cmds(cmds=cmds) - def get_bgp_neighbors(self): + def get_bgp_neighbors(self, namespace=None): """ - Get a diction of BGP neighbor states + Get a diction of BGP neighbor states. If namespace is not None + will get a dictionary of BGP neighbor states for that namespace Args: None @@ -531,8 +538,9 @@ def get_bgp_neighbors(self): """ bgp_neigh = {} for asic in self.asics: - bgp_info = asic.bgp_facts() - bgp_neigh.update(bgp_info["ansible_facts"]["bgp_neighbors"]) + if namespace is None or asic.namespace == namespace: + bgp_info = asic.bgp_facts() + bgp_neigh.update(bgp_info["ansible_facts"]["bgp_neighbors"]) return bgp_neigh diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 892535c94fb..83a6d52ed31 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -1378,17 +1378,22 @@ def get_intf_link_local_ipv6_addr(self, intf): addr = self.shell(cmd)["stdout"] return addr - def get_bgp_neighbor_info(self, neighbor_ip): + def get_bgp_neighbor_info(self, neighbor_ip, asic_id=None): """ @summary: return bgp neighbor info @param neighbor_ip: bgp neighbor IP """ nbip = ipaddress.ip_address(neighbor_ip) + vtysh = "vtysh" + if asic_id is not None: + vtysh = "vtysh -n {}".format(asic_id) + if nbip.version == 4: - out = self.command("vtysh -c \"show ip bgp neighbor {} json\"".format(neighbor_ip)) + out = self.command("{} -c \"show ip bgp neighbor {} json\"".format(vtysh, neighbor_ip)) else: - out = self.command("vtysh -c \"show bgp ipv6 neighbor {} json\"".format(neighbor_ip)) + out = self.command("{} -c \"show bgp ipv6 neighbor {} json\"".format(vtysh, neighbor_ip)) + nbinfo = json.loads(re.sub(r"\\\"", '"', re.sub(r"\\n", "", out['stdout']))) logging.info("bgp neighbor {} info {}".format(neighbor_ip, nbinfo)) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index d3ab72aad49..d91c871a263 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1890,10 +1890,11 @@ telemetry/test_telemetry.py: telemetry/test_telemetry.py::test_telemetry_queue_buffer_cnt: skip: conditions_logical_operator: or - reason: "Testcase ignored due to switch type is voq / Unsupported in MGFX topos" + reason: "Testcase ignored due to switch type is voq / Unsupported in MGFX topos / multi-asic issue 15393" conditions: - "(switch_type=='voq')" - "topo_type in ['m0', 'mx']" + - "(is_multi_asic==True) and https://github.com/sonic-net/sonic-mgmt/issues/15393" ####################################### ##### pktgen ##### diff --git a/tests/telemetry/telemetry_utils.py b/tests/telemetry/telemetry_utils.py index ef4cac780e6..1b6de9e9ed5 100644 --- a/tests/telemetry/telemetry_utils.py +++ b/tests/telemetry/telemetry_utils.py @@ -106,7 +106,7 @@ def trigger_logger(duthost, log, process, container="", priority="local0.notice" def generate_client_cli(duthost, gnxi_path, method=METHOD_GET, xpath="COUNTERS/Ethernet0", target="COUNTERS_DB", subscribe_mode=SUBSCRIBE_MODE_STREAM, submode=SUBMODE_SAMPLE, - intervalms=0, update_count=3, create_connections=1, filter_event_regex="", + intervalms=0, update_count=3, create_connections=1, filter_event_regex="", namespace=None, timeout=-1): """ Generate the py_gnmicli command line based on the given params. t --target: gNMI target; required @@ -121,11 +121,15 @@ def generate_client_cli(duthost, gnxi_path, method=METHOD_GET, xpath="COUNTERS/E update_count: Max number of streaming updates to receive. 0 means no limit. default 0 create_connections: Creates TCP connections with gNMI server; default 1; -1 for infinite connections filter_event_regex: Regex to filter event when querying events path + namespace: namespace for multi-asic timeout: Subscription duration in seconds; After X seconds, request terminates; default none """ env = GNMIEnvironment(duthost, GNMIEnvironment.TELEMETRY_MODE) - cmdFormat = 'python ' + gnxi_path + 'gnmi_cli_py/py_gnmicli.py -g -t {0} -p {1} -m {2} -x {3} -xt {4} -o {5}' - cmd = cmdFormat.format(duthost.mgmt_ip, env.gnmi_port, method, xpath, target, "ndastreamingservertest") + ns = "" + if namespace is not None: + ns = "/{}".format(namespace) + cmdFormat = 'python ' + gnxi_path + 'gnmi_cli_py/py_gnmicli.py -g -t {0} -p {1} -m {2} -x {3} -xt {4}{5} -o {6}' + cmd = cmdFormat.format(duthost.mgmt_ip, env.gnmi_port, method, xpath, target, ns, "ndastreamingservertest") if method == METHOD_SUBSCRIBE: cmd += " --subscribe_mode {0} --submode {1} --interval {2} --update_count {3} --create_connections {4}".format( diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index 3014c3aea98..c975f532fd4 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -281,24 +281,33 @@ def test_on_change_updates(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, logger.info("Testing on change update notifications") duthost = duthosts[enum_rand_one_per_hwsku_hostname] + if duthost.is_supervisor_node(): + pytest.skip( + "Skipping test as no Ethernet0 frontpanel port on supervisor") skip_201911_and_older(duthost) - cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_SUBSCRIBE, - submode=SUBMODE_ONCHANGE, update_count=2, xpath="NEIGH_STATE_TABLE", - target="STATE_DB") - bgp_nbrs = list(duthost.get_bgp_neighbors().keys()) + nslist = duthost.get_asic_namespace_list() + ns = random.choice(nslist) + bgp_nbrs = list(duthost.get_bgp_neighbors(ns).keys()) bgp_neighbor = random.choice(bgp_nbrs) - bgp_info = duthost.get_bgp_neighbor_info(bgp_neighbor) + asic_id = duthost.get_asic_id_from_namespace(ns) + bgp_info = duthost.get_bgp_neighbor_info(bgp_neighbor, asic_id) original_state = bgp_info["bgpState"] new_state = "Established" if original_state.lower() == "active" else "Active" + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_SUBSCRIBE, + submode=SUBMODE_ONCHANGE, update_count=2, xpath="NEIGH_STATE_TABLE", + target="STATE_DB", namespace=ns) + def callback(result): logger.info("Assert that ptf client output is non empty and contains on change update") try: assert result != "", "Did not get output from PTF client" finally: - duthost.shell("sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, - original_state)) + ccmd = "sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, + original_state) + ccmd = duthost.get_cli_cmd_for_namespace(ccmd, ns) + duthost.shell(ccmd) ret = parse_gnmi_output(result, 1, bgp_neighbor) assert ret is True, "Did not find key in update" @@ -306,8 +315,10 @@ def callback(result): client_thread.start() wait_until(5, 1, 0, check_gnmi_cli_running, ptfhost) - duthost.shell("sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, - new_state)) + cmd = "sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, + new_state) + cmd = duthost.get_cli_cmd_for_namespace(cmd, ns) + duthost.shell(cmd) client_thread.join(60) # max timeout of 60s, expect update to come in <=30s From db7c2f21aeb103ef3ad725f530d9eccfd237f3cb Mon Sep 17 00:00:00 2001 From: liamkearney-msft Date: Fri, 8 Nov 2024 10:08:02 +1000 Subject: [PATCH 028/175] [dut_console/test_console_baud_rate]: cast expected baudrate to str (#15217) Description of PR Cast expected console baudrate to str so comparison works properly Summary: Fixes #14874 Signed-off-by: Liam Kearney --- tests/dut_console/test_console_baud_rate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/dut_console/test_console_baud_rate.py b/tests/dut_console/test_console_baud_rate.py index 6c974edfc95..a156c6f77d4 100644 --- a/tests/dut_console/test_console_baud_rate.py +++ b/tests/dut_console/test_console_baud_rate.py @@ -22,7 +22,7 @@ def is_sonic_console(conn_graph_facts, dut_hostname): def get_expected_baud_rate(duthost): - DEFAULT_BAUDRATE = "9600" + DEFAULT_BAUDRATE = 9600 hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname] return hostvars.get('console_baudrate', DEFAULT_BAUDRATE) @@ -31,7 +31,7 @@ def test_console_baud_rate_config(duthost): expected_baud_rate = get_expected_baud_rate(duthost) res = duthost.shell("cat /proc/cmdline | grep -Eo 'console=ttyS[0-9]+,[0-9]+' | cut -d ',' -f2") pytest_require(res["stdout"] != "", "Cannot get baud rate") - if res["stdout"] != expected_baud_rate: + if res["stdout"] != str(expected_baud_rate): global pass_config_test pass_config_test = False pytest.fail("Device baud rate is {}, expected {}".format(res["stdout"], expected_baud_rate)) From e98017585d0ce1d82534de9fb294a2e9c2725173 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:24:01 +0800 Subject: [PATCH 029/175] Remove skip_traffic_test fixture in hash tests (#15436) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in hash tests How did you verify/test it? --- tests/hash/test_generic_hash.py | 295 +++++++++++++++----------------- 1 file changed, 140 insertions(+), 155 deletions(-) diff --git a/tests/hash/test_generic_hash.py b/tests/hash/test_generic_hash.py index 7e4db3fdadb..fd9c0191d0d 100644 --- a/tests/hash/test_generic_hash.py +++ b/tests/hash/test_generic_hash.py @@ -15,7 +15,6 @@ from tests.common.utilities import wait_until from tests.ptf_runner import ptf_runner from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer from tests.common.reboot import reboot from tests.common.config_reload import config_reload @@ -130,8 +129,8 @@ def test_hash_capability(duthost, global_hash_capabilities): # noqa:F811 'The lag hash capability is not as expected.') -def test_ecmp_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_capabilities, # noqa:F811 - restore_vxlan_port, toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 +def test_ecmp_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_capabilities, # noqa:F811 + restore_vxlan_port, toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the ecmp hash. The hash field to test is randomly chosen from the supported hash fields. Args: @@ -174,23 +173,22 @@ def test_ecmp_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_ # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_lag_hash(duthost, ptfhost, tbinfo, fine_params, mg_facts, restore_configuration, # noqa:F811 restore_vxlan_port, global_hash_capabilities, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 + toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the lag hash. The hash field to test is randomly chosen from the supported hash fields. When hash field is in [DST_MAC, ETHERTYPE, VLAN_ID], need to re-configure the dut for L2 traffic. @@ -247,18 +245,17 @@ def test_lag_hash(duthost, ptfhost, tbinfo, fine_params, mg_facts, restore_confi if not is_l2_test: pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def config_all_hash_fields(duthost, global_hash_capabilities): # noqa:F811 @@ -273,7 +270,7 @@ def config_all_hash_algorithm(duthost, ecmp_algorithm, lag_algorithm): # noqa:F def test_ecmp_and_lag_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, global_hash_capabilities, # noqa:F811 restore_vxlan_port, get_supported_hash_algorithms, # noqa:F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 + toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the hash behavior when both ecmp and lag hash are configured with a same field. The hash field to test is randomly chosen from the supported hash fields. @@ -312,23 +309,22 @@ def test_ecmp_and_lag_hash(duthost, tbinfo, ptfhost, fine_params, mg_facts, glob # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_interfaces, # noqa:F811 restore_vxlan_port, global_hash_capabilities, get_supported_hash_algorithms, # noqa:F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa:F811 + toggle_all_simulator_ports_to_upper_tor): # noqa:F811 """ Test case to validate the ecmp hash when there is nexthop flapping. The hash field to test is randomly chosen from the supported hash fields. @@ -368,18 +364,17 @@ def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_i # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Randomly shutdown 1 nexthop interface'): interface = random.choice(list(uplink_interfaces.keys())) remaining_uplink_interfaces = uplink_interfaces.copy() @@ -389,18 +384,17 @@ def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_i mg_facts, downlink_interfaces=[], uplink_interfaces=remaining_uplink_interfaces) shutdown_interface(duthost, interface) with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Startup the interface, and then flap it 3 more times'): startup_interface(duthost, interface) flap_interfaces(duthost, [interface], times=3) @@ -408,24 +402,22 @@ def test_nexthop_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_i 'The default route is not restored after the flapping.') ptf_params['expected_port_groups'] = origin_ptf_expected_port_groups with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_lag_member_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_configuration, # noqa F811 restore_interfaces, global_hash_capabilities, restore_vxlan_port, # noqa F811 - get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor, # noqa F811 - skip_traffic_test): # noqa F811 + get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor): # noqa F811 """ Test case to validate the lag hash when there is lag member flapping. The hash field to test is randomly chosen from the supported hash fields. @@ -482,18 +474,17 @@ def test_lag_member_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restor if not is_l2_test: pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Randomly select one member in each portchannel and flap them 3 times'): # Randomly choose the members to flap @@ -509,24 +500,22 @@ def test_lag_member_flap(duthost, tbinfo, ptfhost, fine_params, mg_facts, restor pytest_assert(wait_until(30, 5, 0, check_default_route, duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_lag_member_remove_add(duthost, tbinfo, ptfhost, fine_params, mg_facts, restore_configuration, # noqa F811 restore_interfaces, global_hash_capabilities, restore_vxlan_port, # noqa F811 - get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor, # noqa F811 - skip_traffic_test): # noqa F811 + get_supported_hash_algorithms, toggle_all_simulator_ports_to_upper_tor): # noqa F811 """ Test case to validate the lag hash when a lag member is removed from the lag and added back for a few times. @@ -584,18 +573,17 @@ def test_lag_member_remove_add(duthost, tbinfo, ptfhost, fine_params, mg_facts, if not is_l2_test: pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step('Randomly select one member in each portchannel and remove it from the lag and add it back'): # Randomly choose the members to remove/add @@ -609,23 +597,22 @@ def test_lag_member_remove_add(duthost, tbinfo, ptfhost, fine_params, mg_facts, 'The default route is not available or some nexthops are missing.') with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) def test_reboot(duthost, tbinfo, ptfhost, localhost, fine_params, mg_facts, restore_vxlan_port, # noqa F811 global_hash_capabilities, reboot_type, get_supported_hash_algorithms, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_upper_tor): # noqa F811 """ Test case to validate the hash behavior after fast/warm/cold reboot. The hash field to test is randomly chosen from the supported hash fields. @@ -665,18 +652,17 @@ def test_reboot(duthost, tbinfo, ptfhost, localhost, fine_params, mg_facts, rest # Check the default route before the ptf test pytest_assert(check_default_route(duthost, uplink_interfaces.keys()), 'The default route is not available or some nexthops are missing.') - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) with allure.step(f'Randomly choose a reboot type: {reboot_type}, and reboot'): # Save config if reboot type is config reload or cold reboot @@ -698,18 +684,17 @@ def test_reboot(duthost, tbinfo, ptfhost, localhost, fine_params, mg_facts, rest pytest_assert(wait_until(60, 10, 0, check_default_route, duthost, uplink_interfaces.keys()), "The default route is not established after the cold reboot.") with allure.step('Start the ptf test, send traffic and check the balancing'): - if not skip_traffic_test: - ptf_runner( - ptfhost, - "ptftests", - "generic_hash_test.GenericHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=PTF_LOG_PATH, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True - ) + ptf_runner( + ptfhost, + "ptftests", + "generic_hash_test.GenericHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=PTF_LOG_PATH, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True + ) @pytest.mark.disable_loganalyzer From 4e2f38e331e6e3f4d9fd45d681d8058a049873dd Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:25:10 +0800 Subject: [PATCH 030/175] Remove skip_traffic_test fixture in gcu tests (#15435) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in gcu tests How did you verify/test it? --- .../test_dynamic_acl.py | 78 +++++++------------ 1 file changed, 26 insertions(+), 52 deletions(-) diff --git a/tests/generic_config_updater/test_dynamic_acl.py b/tests/generic_config_updater/test_dynamic_acl.py index f7c86f056b9..2b5a3b2ed1d 100644 --- a/tests/generic_config_updater/test_dynamic_acl.py +++ b/tests/generic_config_updater/test_dynamic_acl.py @@ -25,7 +25,6 @@ from ipaddress import ip_network, IPv6Network, IPv4Network from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.gu_utils import expect_op_success, expect_op_failure from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import apply_formed_json_patch @@ -846,8 +845,7 @@ def dynamic_acl_create_dhcp_forward_rule(duthost, setup): expect_acl_rule_match(duthost, "DHCPV6_RULE", expected_v6_rule_content, setup) -def dynamic_acl_verify_packets(setup, ptfadapter, packets, packets_dropped, src_port=None, - skip_traffic_test=False): # noqa F811 +def dynamic_acl_verify_packets(setup, ptfadapter, packets, packets_dropped, src_port=None): """Verify that the given packets are either dropped/forwarded correctly Args: @@ -862,9 +860,6 @@ def dynamic_acl_verify_packets(setup, ptfadapter, packets, packets_dropped, src_ if src_port is None: src_port = setup["blocked_src_port_indice"] - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return for rule, pkt in list(packets.items()): logger.info("Testing that {} packets are correctly {}".format(rule, action_type)) exp_pkt = build_exp_pkt(pkt) @@ -1069,8 +1064,7 @@ def test_gcu_acl_arp_rule_creation(rand_selected_dut, setup, dynamic_acl_create_table, prepare_ptf_intf_and_ip, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that we can create a blanket ARP/NDP packet forwarding rule with GCU, and that ARP/NDP packets are correctly forwarded while all others are dropped.""" @@ -1105,8 +1099,7 @@ def test_gcu_acl_arp_rule_creation(rand_selected_dut, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=True, - src_port=ptf_intf_index, - skip_traffic_test=skip_traffic_test) + src_port=ptf_intf_index) def test_gcu_acl_dhcp_rule_creation(rand_selected_dut, @@ -1115,8 +1108,7 @@ def test_gcu_acl_dhcp_rule_creation(rand_selected_dut, setup, dynamic_acl_create_table, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor): # noqa F811 """Verify that DHCP and DHCPv6 forwarding rules can be created, and that dhcp packets are properly forwarded whereas others are dropped""" @@ -1131,8 +1123,7 @@ def test_gcu_acl_dhcp_rule_creation(rand_selected_dut, dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), - packets_dropped=True, - skip_traffic_test=skip_traffic_test) + packets_dropped=True) def test_gcu_acl_drop_rule_creation(rand_selected_dut, @@ -1140,8 +1131,7 @@ def test_gcu_acl_drop_rule_creation(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that we can create a drop rule via GCU, and that once this drop rule is in place packets that match the drop rule are dropped and packets that do not match the drop rule are forwarded""" @@ -1150,14 +1140,12 @@ def test_gcu_acl_drop_rule_creation(rand_selected_dut, dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), - packets_dropped=True, - skip_traffic_test=skip_traffic_test) + packets_dropped=True) dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=False, - src_port=setup["unblocked_src_port_indice"], - skip_traffic_test=skip_traffic_test) + src_port=setup["unblocked_src_port_indice"]) def test_gcu_acl_drop_rule_removal(rand_selected_dut, @@ -1165,8 +1153,7 @@ def test_gcu_acl_drop_rule_removal(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that once a drop rule is removed, packets that were previously being dropped are now forwarded""" dynamic_acl_create_three_drop_rules(rand_selected_dut, setup) @@ -1176,8 +1163,7 @@ def test_gcu_acl_drop_rule_removal(rand_selected_dut, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=False, - src_port=setup["scale_port_indices"][2], - skip_traffic_test=skip_traffic_test) + src_port=setup["scale_port_indices"][2]) def test_gcu_acl_forward_rule_priority_respected(rand_selected_dut, @@ -1185,8 +1171,7 @@ def test_gcu_acl_forward_rule_priority_respected(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that forward rules and drop rules can be created at the same time, with the forward rules having higher priority than drop. Then, perform a traffic test to confirm that packets that match both the forward and drop rules are correctly forwarded, as the forwarding rules have higher priority""" @@ -1195,10 +1180,10 @@ def test_gcu_acl_forward_rule_priority_respected(rand_selected_dut, dynamic_acl_create_secondary_drop_rule(rand_selected_dut, setup) dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup), - packets_dropped=False, skip_traffic_test=skip_traffic_test) + packets_dropped=False) dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), - packets_dropped=True, skip_traffic_test=skip_traffic_test) + packets_dropped=True) def test_gcu_acl_forward_rule_replacement(rand_selected_dut, @@ -1206,8 +1191,7 @@ def test_gcu_acl_forward_rule_replacement(rand_selected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that forward rules can be created, and then afterwards can have their match pattern updated to a new value. Confirm that packets sent that match this new value are correctly forwarded, and that packets that are sent that match the old, replaced value are correctly dropped.""" @@ -1221,10 +1205,8 @@ def test_gcu_acl_forward_rule_replacement(rand_selected_dut, packets=generate_packets(setup, DST_IP_FORWARDED_REPLACEMENT, DST_IPV6_FORWARDED_REPLACEMENT), - packets_dropped=False, - skip_traffic_test=skip_traffic_test) - dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup), packets_dropped=True, - skip_traffic_test=skip_traffic_test) + packets_dropped=False) + dynamic_acl_verify_packets(setup, ptfadapter, packets=generate_packets(setup), packets_dropped=True) @pytest.mark.parametrize("ip_type", ["IPV4", "IPV6"]) @@ -1234,8 +1216,7 @@ def test_gcu_acl_forward_rule_removal(rand_selected_dut, setup, ip_type, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Test that if a forward rule is created, and then removed, that packets associated with that rule are properly no longer forwarded, and packets associated with the remaining rule are forwarded""" @@ -1252,15 +1233,12 @@ def test_gcu_acl_forward_rule_removal(rand_selected_dut, # generate_packets returns ipv4 and ipv6 packets. remove vals from two dicts so that only correct packets remain drop_packets.pop(other_type) forward_packets.pop(ip_type) - dynamic_acl_verify_packets(setup, ptfadapter, drop_packets, packets_dropped=True, - skip_traffic_test=skip_traffic_test) - dynamic_acl_verify_packets(setup, ptfadapter, forward_packets, packets_dropped=False, - skip_traffic_test=skip_traffic_test) + dynamic_acl_verify_packets(setup, ptfadapter, drop_packets, packets_dropped=True) + dynamic_acl_verify_packets(setup, ptfadapter, forward_packets, packets_dropped=False) def test_gcu_acl_scale_rules(rand_selected_dut, rand_unselected_dut, ptfadapter, setup, dynamic_acl_create_table, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Perform a scale test, creating 150 forward rules with top priority, and then creating a drop rule for every single VLAN port on our device. Select any one of our blocked ports, as well as the ips for two of our forward rules, @@ -1280,27 +1258,23 @@ def test_gcu_acl_scale_rules(rand_selected_dut, rand_unselected_dut, ptfadapter, ptfadapter, generate_packets(setup, v4_dest, v6_dest), packets_dropped=False, - src_port=blocked_scale_port, - skip_traffic_test=skip_traffic_test) + src_port=blocked_scale_port) dynamic_acl_verify_packets(setup, ptfadapter, generate_packets(setup, DST_IP_BLOCKED, DST_IPV6_BLOCKED), packets_dropped=True, - src_port=blocked_scale_port, - skip_traffic_test=skip_traffic_test) + src_port=blocked_scale_port) def test_gcu_acl_nonexistent_rule_replacement(rand_selected_dut, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup, - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup): """Confirm that replacing a nonexistent rule results in operation failure""" dynamic_acl_replace_nonexistent_rule(rand_selected_dut, setup) def test_gcu_acl_nonexistent_table_removal(rand_selected_dut, - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup, - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup): """Confirm that removing a nonexistent table results in operation failure""" dynamic_acl_remove_nonexistent_table(rand_selected_dut, setup) From 3ae670e155f858028049a8845ca50fca5855652d Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:25:52 +0800 Subject: [PATCH 031/175] Remove skip_traffic_test fixture in fib tests (#15433) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in fib tests --- tests/fib/test_fib.py | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index c1e8f47bfbb..e65b90d81e2 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -10,7 +10,6 @@ from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # noqa F401 from tests.common.fixtures.ptfhost_utils import ptf_test_port_map_active_active, ptf_test_port_map -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.dualtor.mux_simulator_control import mux_server_url # noqa F401 @@ -84,8 +83,7 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, mux_status_from_nic_simulator, ignore_ttl, single_fib_for_duts, # noqa F401 duts_running_config_facts, duts_minigraph_facts, - validate_active_active_dualtor_setup, # noqa F401 - skip_traffic_test): # noqa F811 + validate_active_active_dualtor_setup): # noqa F811 if 'dualtor' in updated_tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') @@ -105,8 +103,6 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format( ipv4, ipv6, timestamp) logging.info("PTF log file: %s" % log_file) - if skip_traffic_test is True: - return ptf_runner( ptfhost, "ptftests", @@ -319,7 +315,7 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s hash_keys, ptfhost, ipver, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 updated_tbinfo, mux_server_url, mux_status_from_nic_simulator, ignore_ttl, # noqa F811 single_fib_for_duts, duts_running_config_facts, duts_minigraph_facts, # noqa F811 - setup_active_active_ports, active_active_ports, skip_traffic_test): # noqa F811 + setup_active_active_ports, active_active_ports): # noqa F811 if 'dualtor' in updated_tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') @@ -335,8 +331,6 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE - if skip_traffic_test is True: - return ptf_runner( ptfhost, "ptftests", @@ -371,7 +365,7 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files_per_function, # noqa F811 hash_keys, ptfhost, ipver, tbinfo, mux_server_url, # noqa F811 ignore_ttl, single_fib_for_duts, duts_running_config_facts, # noqa F811 - duts_minigraph_facts, skip_traffic_test): # noqa F811 + duts_minigraph_facts): # noqa F811 # Skip test on none T1 testbed pytest_require('t1' == tbinfo['topo']['type'], "The test case runs on T1 topology") @@ -385,8 +379,6 @@ def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "hash_test.IPinIPHashTest", @@ -413,8 +405,7 @@ def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files def test_ipinip_hash_negative(add_default_route_to_dut, duthosts, fib_info_files_per_function, # noqa F811 ptfhost, ipver, tbinfo, mux_server_url, ignore_ttl, single_fib_for_duts, # noqa F811 - duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator, - skip_traffic_test): # noqa F811 + duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator): hash_keys = ['inner_length'] timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') log_file = "/tmp/hash_test.IPinIPHashTest.{}.{}.log".format( @@ -426,8 +417,6 @@ def test_ipinip_hash_negative(add_default_route_to_dut, duthosts, fib_info_files else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "hash_test.IPinIPHashTest", From f7d704f1fd67c65e95462e8e8ffb7553455475cf Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:26:29 +0800 Subject: [PATCH 032/175] Remove skip_traffic_test fixture in everflow tests (#15432) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in everflow tests --- tests/everflow/everflow_test_utilities.py | 17 ++- tests/everflow/test_everflow_ipv6.py | 139 ++++++------------ tests/everflow/test_everflow_per_interface.py | 7 +- tests/everflow/test_everflow_testbed.py | 91 ++++-------- 4 files changed, 85 insertions(+), 169 deletions(-) diff --git a/tests/everflow/everflow_test_utilities.py b/tests/everflow/everflow_test_utilities.py index 97e80743fea..8377cb40548 100644 --- a/tests/everflow/everflow_test_utilities.py +++ b/tests/everflow/everflow_test_utilities.py @@ -753,8 +753,7 @@ def send_and_check_mirror_packets(self, src_port=None, dest_ports=None, expect_recv=True, - valid_across_namespace=True, - skip_traffic_test=False): + valid_across_namespace=True): # In Below logic idea is to send traffic in such a way so that mirror traffic # will need to go across namespaces and within namespace. If source and mirror destination @@ -789,9 +788,6 @@ def send_and_check_mirror_packets(self, src_port_set.add(dest_ports[0]) src_port_metadata_map[dest_ports[0]] = (None, 2) - if skip_traffic_test is True: - logging.info("Skipping traffic test") - return # Loop through Source Port Set and send traffic on each source port of the set for src_port in src_port_set: expected_mirror_packet = BaseEverflowTest.get_expected_mirror_packet(mirror_session, @@ -810,10 +806,15 @@ def send_and_check_mirror_packets(self, if expect_recv: time.sleep(STABILITY_BUFFER) - _, received_packet = testutils.verify_packet_any_port(ptfadapter, - expected_mirror_packet, - ports=dest_ports) + result = testutils.verify_packet_any_port(ptfadapter, + expected_mirror_packet, + ports=dest_ports) + if isinstance(result, bool): + logging.info("Using dummy testutils to skip traffic test, skip following checks") + return + + _, received_packet = result logging.info("Received packet: %s", packet.Ether(received_packet).summary()) inner_packet = self._extract_mirror_payload(received_packet, len(mirror_packet_sent)) diff --git a/tests/everflow/test_everflow_ipv6.py b/tests/everflow/test_everflow_ipv6.py index 6ffd720f287..df3a4b0e3a3 100644 --- a/tests/everflow/test_everflow_ipv6.py +++ b/tests/everflow/test_everflow_ipv6.py @@ -13,7 +13,6 @@ # Module-level fixtures from .everflow_test_utilities import setup_info # noqa: F401 from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 pytestmark = [ pytest.mark.topology("t0", "t1", "t2", "m0") @@ -155,8 +154,7 @@ def background_traffic(run_count=None): def test_src_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on Source IPv6 addresses.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -170,13 +168,11 @@ def test_src_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_dst_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on Destination IPv6 addresses.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -190,13 +186,11 @@ def test_dst_ipv6_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_next_header_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on the Next Header field.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, next_header=0x7E) @@ -205,13 +199,11 @@ def test_next_header_mirroring(self, setup_info, setup_mirror_session, ptfadapte ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_src_port_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on the L4 Source Port.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, sport=9000) @@ -220,13 +212,11 @@ def test_l4_src_port_mirroring(self, setup_info, setup_mirror_session, ptfadapte ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_dst_port_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on the L4 Destination Port.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, dport=9001) @@ -235,14 +225,12 @@ def test_l4_dst_port_mirroring(self, setup_info, setup_mirror_session, ptfadapte ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_src_port_range_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on a range of L4 Source Ports.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, sport=10200) @@ -251,14 +239,12 @@ def test_l4_src_port_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_dst_port_range_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on a range of L4 Destination Ports.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, dport=10700) @@ -267,13 +253,11 @@ def test_l4_dst_port_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_tcp_flags_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on TCP Flags.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, flags=0x1B) @@ -282,13 +266,11 @@ def test_tcp_flags_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_dscp_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match on DSCP.""" test_packet = self._base_tcpv6_packet(everflow_direction, ptfadapter, setup_info, dscp=37) @@ -297,13 +279,11 @@ def test_dscp_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ever ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_l4_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match from a source port to a range of destination ports and vice-versa.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -320,8 +300,7 @@ def test_l4_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_tcpv6_packet( everflow_direction, @@ -338,13 +317,11 @@ def test_l4_range_mirroring(self, setup_info, setup_mirror_session, ptfadapter, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_tcp_response_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match a SYN -> SYN-ACK pattern.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -360,8 +337,7 @@ def test_tcp_response_mirroring(self, setup_info, setup_mirror_session, ptfadapt ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_tcpv6_packet( everflow_direction, @@ -377,14 +353,12 @@ def test_tcp_response_mirroring(self, setup_info, setup_mirror_session, ptfadapt ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_tcp_application_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match a TCP handshake between a client and server.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -402,8 +376,7 @@ def test_tcp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_tcpv6_packet( everflow_direction, @@ -421,14 +394,12 @@ def test_tcp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_udp_application_mirroring(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match UDP traffic between a client and server application.""" test_packet = self._base_udpv6_packet( everflow_direction, @@ -446,8 +417,7 @@ def test_udp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, ptfadapter, @@ -464,13 +434,11 @@ def test_udp_application_mirroring(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that the protocol number is ignored if it is not specified in the ACL rule.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -485,8 +453,7 @@ def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, @@ -501,8 +468,7 @@ def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, @@ -518,14 +484,12 @@ def test_any_protocol(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_any_transport_protocol(self, setup_info, setup_mirror_session, # noqa F811 ptfadapter, everflow_dut, everflow_direction, setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that src port and dst port rules match regardless of whether TCP or UDP traffic is sent.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -542,8 +506,7 @@ def test_any_transport_protocol(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) test_packet = self._base_udpv6_packet( everflow_direction, @@ -560,13 +523,11 @@ def test_any_transport_protocol(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_invalid_tcp_rule(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that the ASIC does not reject rules with TCP flags if the protocol is not TCP.""" pass @@ -577,8 +538,7 @@ def test_invalid_tcp_rule(self, setup_info, setup_mirror_session, ptfadapter, ev def test_source_subnet(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with a Source IPv6 Subnet.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -595,13 +555,11 @@ def test_source_subnet(self, setup_info, setup_mirror_session, ptfadapter, everf ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_dest_subnet(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with a Destination IPv6 Subnet.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -618,13 +576,11 @@ def test_dest_subnet(self, setup_info, setup_mirror_session, ptfadapter, everflo ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_both_subnets(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with both source and destination subnets.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -641,13 +597,11 @@ def test_both_subnets(self, setup_info, setup_mirror_session, ptfadapter, everfl ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def test_fuzzy_subnets(self, setup_info, setup_mirror_session, ptfadapter, everflow_dut, # noqa F811 setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - skip_traffic_test): # noqa F811 + everflow_direction, toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 """Verify that we can match packets with non-standard subnet sizes.""" test_packet = self._base_tcpv6_packet( everflow_direction, @@ -664,8 +618,7 @@ def test_fuzzy_subnets(self, setup_info, setup_mirror_session, ptfadapter, everf ptfadapter, everflow_dut, test_packet, everflow_direction, src_port=EverflowIPv6Tests.rx_port_ptf_id, - dest_ports=EverflowIPv6Tests.tx_port_ids, - skip_traffic_test=skip_traffic_test) + dest_ports=EverflowIPv6Tests.tx_port_ids) def _base_tcpv6_packet(self, direction, diff --git a/tests/everflow/test_everflow_per_interface.py b/tests/everflow/test_everflow_per_interface.py index 820513d6675..8bef2b5ed78 100644 --- a/tests/everflow/test_everflow_per_interface.py +++ b/tests/everflow/test_everflow_per_interface.py @@ -13,7 +13,6 @@ from .everflow_test_utilities import setup_info, EVERFLOW_DSCP_RULES # noqa: F401 from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa: F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 pytestmark = [ pytest.mark.topology("any") @@ -181,8 +180,7 @@ def send_and_verify_packet(ptfadapter, packet, expected_packet, tx_port, rx_port def test_everflow_per_interface(ptfadapter, setup_info, apply_acl_rule, tbinfo, # noqa F811 - toggle_all_simulator_ports_to_rand_selected_tor, ip_ver, # noqa F811 - skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, ip_ver): # noqa F811 """Verify packet ingress from candidate ports are captured by EVERFLOW, while packets ingress from unselected ports are not captured """ @@ -192,9 +190,6 @@ def test_everflow_per_interface(ptfadapter, setup_info, apply_acl_rule, tbinfo, setup_info[UP_STREAM]['ingress_router_mac'], setup_info, ip_ver) uplink_ports = everflow_config["monitor_port_ptf_ids"] - if skip_traffic_test: - return - # Verify that packet ingressed from INPUT_PORTS (candidate ports) are mirrored for port, ptf_idx in list(everflow_config['candidate_ports'].items()): logger.info("Verifying packet ingress from {} is mirrored".format(port)) diff --git a/tests/everflow/test_everflow_testbed.py b/tests/everflow/test_everflow_testbed.py index 1e0777dcd58..cfe3c8f109e 100644 --- a/tests/everflow/test_everflow_testbed.py +++ b/tests/everflow/test_everflow_testbed.py @@ -15,7 +15,6 @@ # Module-level fixtures from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa: F401 from tests.common.fixtures.ptfhost_utils import copy_acstests_directory # noqa: F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa: F401 from .everflow_test_utilities import setup_info, setup_arp_responder, EVERFLOW_DSCP_RULES # noqa: F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa: F401 from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor # noqa: F401 @@ -135,8 +134,7 @@ def add_dest_routes(self, setup_info, tbinfo, dest_port_type): # noqa F811 def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """ Verify basic forwarding scenarios for the Everflow feature. @@ -170,8 +168,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Add a (better) unresolved route to the mirror session destination IP @@ -188,8 +185,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the unresolved route @@ -212,8 +208,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the better route. @@ -230,8 +225,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) remote_dut.shell(remote_dut.get_vtysh_cmd_for_namespace( @@ -241,8 +235,7 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """Verify that session destination MAC address is changed after neighbor MAC address update.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -265,8 +258,7 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Update the MAC on the neighbor interface for the route we installed @@ -286,8 +278,7 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) finally: @@ -308,15 +299,13 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """Verify that session is still active after removal of next hop from ECMP route that was not in use.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -348,8 +337,7 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses everflow_dut, rx_port_ptf_id, tx_port_ptf_ids, - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remaining Scenario not applicable for this topology @@ -374,8 +362,7 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses [tx_port_ptf_id], dest_port_type, expect_recv=False, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Remove the extra hop @@ -393,8 +380,7 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses [tx_port_ptf_id], dest_port_type, expect_recv=False, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Verify that mirrored traffic is still sent to one of the original next hops @@ -405,15 +391,13 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses everflow_dut, rx_port_ptf_id, tx_port_ptf_ids, - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test): # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally): # noqa F811 """Verify that session is still active after removal of next hop from ECMP route that was in use.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -444,8 +428,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Add two new ECMP next hops @@ -469,8 +452,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi rx_port_ptf_id, [tx_port_ptf_id], dest_port_type, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Verify that traffic is not sent along either of the new next hops @@ -487,8 +469,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi tx_port_ptf_ids, dest_port_type, expect_recv=False, - valid_across_namespace=False, - skip_traffic_test=skip_traffic_test + valid_across_namespace=False ) # Remove the original next hop @@ -505,8 +486,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi rx_port_ptf_id, [tx_port_ptf_id], dest_port_type, - expect_recv=False, - skip_traffic_test=skip_traffic_test + expect_recv=False ) # Verify that mirrored traffis is now sent along either of the new next hops @@ -517,8 +497,7 @@ def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_sessi everflow_dut, rx_port_ptf_id, tx_port_ptf_ids, - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) def test_everflow_dscp_with_policer( @@ -530,8 +509,7 @@ def test_everflow_dscp_with_policer( config_method, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 - setup_standby_ports_on_rand_unselected_tor_unconditionally, # noqa F811 - skip_traffic_test # noqa F811 + setup_standby_ports_on_rand_unselected_tor_unconditionally # noqa F811 ): """Verify that we can rate-limit mirrored traffic from the MIRROR_DSCP table. This tests single rate three color policer mode and specifically checks CIR value @@ -617,9 +595,6 @@ def test_everflow_dscp_with_policer( config_method, rules=EVERFLOW_DSCP_RULES) - if skip_traffic_test is True: - return - # Run test with expected CIR/CBS in packets/sec and tolerance % partial_ptf_runner(setup_info, dest_port_type, @@ -635,8 +610,7 @@ def test_everflow_dscp_with_policer( cir=rate_limit, cbs=rate_limit, send_time=send_time, - tolerance=everflow_tolerance, - skip_traffic_test=skip_traffic_test) + tolerance=everflow_tolerance) finally: # Clean up ACL rules and routes BaseEverflowTest.remove_acl_rule_config(everflow_dut, table_name, config_method) @@ -651,8 +625,7 @@ def test_everflow_dscp_with_policer( def test_everflow_frwd_with_bkg_trf(self, setup_info, # noqa F811 setup_mirror_session, - dest_port_type, ptfadapter, tbinfo, - skip_traffic_test # noqa F811 + dest_port_type, ptfadapter, tbinfo ): """ Verify basic forwarding scenarios for the Everflow feature with background traffic. @@ -743,8 +716,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Add a (better) unresolved route to the mirror session destination IP @@ -762,8 +734,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the unresolved route @@ -786,8 +757,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) # Remove the better route. @@ -804,8 +774,7 @@ def background_traffic(run_count=None): everflow_dut, rx_port_ptf_id, [tx_port_ptf_id], - dest_port_type, - skip_traffic_test=skip_traffic_test + dest_port_type ) remote_dut.shell(remote_dut.get_vtysh_cmd_for_namespace( @@ -820,8 +789,7 @@ def background_traffic(run_count=None): background_traffic(run_count=1) def _run_everflow_test_scenarios(self, ptfadapter, setup, mirror_session, duthost, rx_port, - tx_ports, direction, expect_recv=True, valid_across_namespace=True, - skip_traffic_test=False): # noqa F811 + tx_ports, direction, expect_recv=True, valid_across_namespace=True): # FIXME: In the ptf_runner version of these tests, LAGs were passed down to the tests # as comma-separated strings of LAG member port IDs (e.g. portchannel0001 -> "2,3"). # Because the DSCP test is still using ptf_runner we will preserve this for now, @@ -859,8 +827,7 @@ def _run_everflow_test_scenarios(self, ptfadapter, setup, mirror_session, duthos src_port=rx_port, dest_ports=tx_port_ids, expect_recv=expect_recv, - valid_across_namespace=valid_across_namespace, - skip_traffic_test=skip_traffic_test, + valid_across_namespace=valid_across_namespace ) def _base_tcp_packet( From cdfd82733e2e2621703cea8ef7dc32e4046264d5 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:32:06 +0800 Subject: [PATCH 033/175] Remove skip_traffic_test fixture in drop_packets tests (#15430) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in drop_packets tests How did you verify/test it? --- tests/drop_packets/drop_packets.py | 84 ++++++++++-------------- tests/drop_packets/test_drop_counters.py | 42 +++++------- 2 files changed, 51 insertions(+), 75 deletions(-) diff --git a/tests/drop_packets/drop_packets.py b/tests/drop_packets/drop_packets.py index 9948968af0c..6cfa62c922a 100644 --- a/tests/drop_packets/drop_packets.py +++ b/tests/drop_packets/drop_packets.py @@ -15,7 +15,6 @@ from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from tests.common import config_reload -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.dut_utils import is_mellanox_fanout RX_DRP = "RX_DRP" @@ -516,7 +515,7 @@ def send_packets(pkt, ptfadapter, ptf_tx_port_id, num_packets=1): def test_equal_smac_dmac_drop(do_test, ptfadapter, setup, fanouthost, - pkt_fields, ports_info, enum_fanout_graph_facts, skip_traffic_test): # noqa F811 + pkt_fields, ports_info, enum_fanout_graph_facts): # noqa F811 """ @summary: Create a packet with equal SMAC and DMAC. """ @@ -555,7 +554,7 @@ def test_equal_smac_dmac_drop(do_test, ptfadapter, setup, fanouthost, group = "L2" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - comparable_pkt=comparable_pkt, skip_traffic_test=skip_traffic_test) + comparable_pkt=comparable_pkt) def test_multicast_smac_drop(do_test, ptfadapter, setup, fanouthost, @@ -599,11 +598,11 @@ def test_multicast_smac_drop(do_test, ptfadapter, setup, fanouthost, group = "L2" do_test(group, pkt, ptfadapter, ports_info, - setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt, skip_traffic_test=skip_traffic_test) + setup["neighbor_sniff_ports"], comparable_pkt=comparable_pkt) def test_not_expected_vlan_tag_drop(do_test, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, setup, pkt_fields, ports_info, skip_traffic_test): + ptfadapter, setup, pkt_fields, ports_info): """ @summary: Create a VLAN tagged packet which VLAN ID does not match ingress port VLAN ID. """ @@ -636,11 +635,10 @@ def test_not_expected_vlan_tag_drop(do_test, duthosts, enum_rand_one_per_hwsku_f ) group = "L2" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"]) -def test_dst_ip_is_loopback_addr(do_test, ptfadapter, setup, pkt_fields, tx_dut_ports, ports_info, skip_traffic_test): +def test_dst_ip_is_loopback_addr(do_test, ptfadapter, setup, pkt_fields, tx_dut_ports, ports_info): """ @summary: Create a packet with loopback destination IP adress. """ @@ -658,11 +656,10 @@ def test_dst_ip_is_loopback_addr(do_test, ptfadapter, setup, pkt_fields, tx_dut_ tcp_dport=pkt_fields["tcp_dport"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) -def test_src_ip_is_loopback_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): +def test_src_ip_is_loopback_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with loopback source IP adress. """ @@ -680,11 +677,10 @@ def test_src_ip_is_loopback_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_f tcp_dport=pkt_fields["tcp_dport"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) -def test_dst_ip_absent(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): +def test_dst_ip_absent(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with absent destination IP address. """ @@ -712,13 +708,12 @@ def test_dst_ip_absent(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, por group = "L3" print(("msm group {}, setup {}".format(group, setup))) - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("ip_addr", ["ipv4", "ipv6"]) def test_src_ip_is_multicast_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ip_addr, - ports_info, skip_traffic_test): + ports_info): """ @summary: Create a packet with multicast source IP adress. """ @@ -752,11 +747,11 @@ def test_src_ip_is_multicast_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_ group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, ip_ver=ip_addr, skip_traffic_test=skip_traffic_test) + tx_dut_ports, ip_ver=ip_addr) def test_src_ip_is_class_e(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with source IP address in class E. """ @@ -779,14 +774,12 @@ def test_src_ip_is_class_e(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsk tcp_dport=pkt_fields["tcp_dport"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("addr_type, addr_direction", [("ipv4", "src"), ("ipv6", "src"), ("ipv4", "dst"), ("ipv6", "dst")]) -def test_ip_is_zero_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, addr_type, addr_direction, - ports_info, skip_traffic_test): +def test_ip_is_zero_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, addr_type, addr_direction, ports_info): """ @summary: Create a packet with "0.0.0.0" source or destination IP address. """ @@ -833,11 +826,11 @@ def test_ip_is_zero_addr(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, a pytest.skip("Src IP zero packets are not dropped on Broadcom DNX platform currently") do_test(group, pkt, ptfadapter, ports_info, list(setup["dut_to_ptf_port_map"].values()), tx_dut_ports, - ip_ver=addr_type, skip_traffic_test=skip_traffic_test) + ip_ver=addr_type) def test_dst_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet with link-local address "169.254.0.0/16". """ @@ -860,11 +853,10 @@ def test_dst_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsk group = "L3" logger.info(pkt_params) - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) -def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): +def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info): """ @summary: Create a packet drops by loopback-filter. Loop-back filter means that route to the host with DST IP of received packet exists on received interface @@ -892,13 +884,11 @@ def test_loopback_filter(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, p group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_ip_pkt_with_expired_ttl(duthost, do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, - ports_info, sai_acl_drop_adj_enabled, configure_copp_drop_for_ttl_error, - skip_traffic_test): + ports_info, sai_acl_drop_adj_enabled, configure_copp_drop_for_ttl_error): """ @summary: Create an IP packet with TTL=0. """ @@ -916,12 +906,12 @@ def test_ip_pkt_with_expired_ttl(duthost, do_test, ptfadapter, setup, tx_dut_por group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled, skip_traffic_test=skip_traffic_test) + tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled) @pytest.mark.parametrize("pkt_field, value", [("version", 1), ("chksum", 10), ("ihl", 1)]) def test_broken_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, pkt_field, - value, ports_info, sai_acl_drop_adj_enabled, skip_traffic_test): + value, ports_info, sai_acl_drop_adj_enabled): """ @summary: Create a packet with broken IP header. """ @@ -940,11 +930,11 @@ def test_broken_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled, skip_traffic_test=skip_traffic_test) + tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled) def test_absent_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, ports_info, - sai_acl_drop_adj_enabled, skip_traffic_test): + sai_acl_drop_adj_enabled): """ @summary: Create packets with absent IP header. """ @@ -967,12 +957,12 @@ def test_absent_ip_header(do_test, ptfadapter, setup, tx_dut_ports, pkt_fields, group = "L3" do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled, skip_traffic_test=skip_traffic_test) + tx_dut_ports, skip_counter_check=sai_acl_drop_adj_enabled) @pytest.mark.parametrize("eth_dst", ["01:00:5e:00:01:02", "ff:ff:ff:ff:ff:ff"]) def test_unicast_ip_incorrect_eth_dst(do_test, ptfadapter, setup, tx_dut_ports, - pkt_fields, eth_dst, ports_info, skip_traffic_test): + pkt_fields, eth_dst, ports_info): """ @summary: Create packets with multicast/broadcast ethernet dst. """ @@ -992,15 +982,14 @@ def test_unicast_ip_incorrect_eth_dst(do_test, ptfadapter, setup, tx_dut_ports, ) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) @pytest.mark.parametrize("igmp_version,msg_type", [("v1", "general_query"), ("v3", "general_query"), ("v1", "membership_report"), ("v2", "membership_report"), ("v3", "membership_report"), ("v2", "leave_group")]) def test_non_routable_igmp_pkts(do_test, ptfadapter, setup, fanouthost, tx_dut_ports, - pkt_fields, igmp_version, msg_type, ports_info, skip_traffic_test): + pkt_fields, igmp_version, msg_type, ports_info): """ @summary: Create an IGMP non-routable packets. """ @@ -1085,12 +1074,11 @@ def test_non_routable_igmp_pkts(do_test, ptfadapter, setup, fanouthost, tx_dut_p pkt.getlayer("IP").dst, pkt_fields["ipv4_src"]) group = "L3" - do_test(group, pkt, ptfadapter, ports_info, list(setup["dut_to_ptf_port_map"].values()), - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, list(setup["dut_to_ptf_port_map"].values()), tx_dut_ports) def test_acl_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, acl_ingress, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, acl_ingress, ports_info): """ @summary: Verify that DUT drops packet with SRC IP 20.0.0.0/24 matched by ingress ACL """ @@ -1114,12 +1102,11 @@ def test_acl_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_fronten tcp_dport=pkt_fields["tcp_dport"] ) - do_test("ACL", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test("ACL", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_acl_egress_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, acl_egress, ports_info, skip_traffic_test): + setup, tx_dut_ports, pkt_fields, acl_egress, ports_info): """ @summary: Verify that DUT drops packet with DST IP 192.168.144.1/24 matched by egress ACL and ACL drop counter incremented @@ -1145,5 +1132,4 @@ def test_acl_egress_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_ ip_ttl=64 ) do_test(discard_group="ACL", pkt=pkt, ptfadapter=ptfadapter, ports_info=ports_info, - sniff_ports=setup["neighbor_sniff_ports"], tx_dut_ports=tx_dut_ports, drop_information="OUTDATAACL", - skip_traffic_test=skip_traffic_test) + sniff_ports=setup["neighbor_sniff_ports"], tx_dut_ports=tx_dut_ports, drop_information="OUTDATAACL") diff --git a/tests/drop_packets/test_drop_counters.py b/tests/drop_packets/test_drop_counters.py index c1835fb8e89..ff12f8ee865 100755 --- a/tests/drop_packets/test_drop_counters.py +++ b/tests/drop_packets/test_drop_counters.py @@ -22,7 +22,6 @@ test_acl_egress_drop # noqa F401 from tests.common.helpers.constants import DEFAULT_NAMESPACE from tests.common.fixtures.conn_graph_facts import enum_fanout_graph_facts # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 pytestmark = [ pytest.mark.topology("any") @@ -141,8 +140,7 @@ def handle_backend_acl(duthost, tbinfo): def base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, # noqa F811 - tx_dut_ports=None, skip_counter_check=False, drop_information=None, # noqa F811 - skip_traffic_test=False): # noqa F811 + tx_dut_ports=None, skip_counter_check=False, drop_information=None): # noqa F811 """ Base test function for verification of L2 or L3 packet drops. Verification type depends on 'discard_group' value. Supported 'discard_group' values: 'L2', 'L3', 'ACL', 'NO_DROPS' @@ -162,9 +160,6 @@ def base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, port if skip_counter_check: logger.info("Skipping counter check") return None - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return None if discard_group == "L2": verify_drop_counters(duthosts, asic_index, ports_info["dut_iface"], @@ -297,8 +292,7 @@ def check_if_skip(): @pytest.fixture(scope='module') def do_test(duthosts): def do_counters_test(discard_group, pkt, ptfadapter, ports_info, sniff_ports, tx_dut_ports=None, # noqa F811 - comparable_pkt=None, skip_counter_check=False, drop_information=None, ip_ver='ipv4', - skip_traffic_test=False): # noqa F811 + comparable_pkt=None, skip_counter_check=False, drop_information=None, ip_ver='ipv4'): """ Execute test - send packet, check that expected discard counters were incremented and packet was dropped @param discard_group: Supported 'discard_group' values: 'L2', 'L3', 'ACL', 'NO_DROPS' @@ -310,24 +304,24 @@ def do_counters_test(discard_group, pkt, ptfadapter, ports_info, sniff_ports, tx @param ip_ver: A string, ipv4 or ipv6 """ check_if_skip() + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + skip_counter_check = True + asic_index = ports_info["asic_index"] base_verification(discard_group, pkt, ptfadapter, duthosts, asic_index, ports_info, tx_dut_ports, - skip_counter_check=skip_counter_check, drop_information=drop_information, - skip_traffic_test=skip_traffic_test) + skip_counter_check=skip_counter_check, drop_information=drop_information) # Verify packets were not egresed the DUT if discard_group != "NO_DROPS": exp_pkt = expected_packet_mask(pkt, ip_ver=ip_ver) - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=sniff_ports) return do_counters_test def test_reserved_dmac_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, fanouthost, pkt_fields, ports_info, skip_traffic_test): # noqa F811 + setup, fanouthost, pkt_fields, ports_info): # noqa F811 """ @summary: Verify that packet with reserved DMAC is dropped and L2 drop counter incremented @used_mac_address: @@ -361,12 +355,11 @@ def test_reserved_dmac_drop(do_test, ptfadapter, duthosts, enum_rand_one_per_hws ) group = "L2" - do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - skip_traffic_test=skip_traffic_test) + do_test(group, pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"]) -def test_no_egress_drop_on_down_link(do_test, ptfadapter, setup, tx_dut_ports, # noqa F811 - pkt_fields, rif_port_down, ports_info, skip_traffic_test): # noqa F811 +def test_no_egress_drop_on_down_link(do_test, ptfadapter, setup, tx_dut_ports, # noqa F811 + pkt_fields, rif_port_down, ports_info): # noqa F811 """ @summary: Verify that packets on ingress port are not dropped when egress RIF link is down and check that drop counters not incremented @@ -384,12 +377,11 @@ def test_no_egress_drop_on_down_link(do_test, ptfadapter, setup, tx_dut_ports, tcp_dport=pkt_fields["tcp_dport"] ) - do_test("NO_DROPS", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test("NO_DROPS", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_src_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - setup, tx_dut_ports, pkt_fields, ports_info, skip_traffic_test): # noqa F811 + setup, tx_dut_ports, pkt_fields, ports_info): # noqa F811 """ @summary: Verify that packet with link-local address "169.254.0.0/16" is dropped and L3 drop counter incremented """ @@ -412,12 +404,11 @@ def test_src_ip_link_local(do_test, ptfadapter, duthosts, enum_rand_one_per_hwsk pkt = testutils.simple_tcp_packet(**pkt_params) logger.info(pkt_params) - do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - tx_dut_ports, skip_traffic_test=skip_traffic_test) + do_test("L3", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], tx_dut_ports) def test_ip_pkt_with_exceeded_mtu(do_test, ptfadapter, setup, tx_dut_ports, # noqa F811 - pkt_fields, mtu_config, ports_info, skip_traffic_test): # noqa F811 + pkt_fields, mtu_config, ports_info): # noqa F811 """ @summary: Verify that IP packet with exceeded MTU is dropped and L3 drop counter incremented """ @@ -447,7 +438,6 @@ def test_ip_pkt_with_exceeded_mtu(do_test, ptfadapter, setup, tx_dut_ports, ) L2_COL_KEY = RX_ERR try: - do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"], - skip_traffic_test=skip_traffic_test) + do_test("L2", pkt, ptfadapter, ports_info, setup["neighbor_sniff_ports"]) finally: L2_COL_KEY = RX_DRP From 1cc1b8157fc39f66452f6e781e4619e507ea7fdc Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:32:27 +0800 Subject: [PATCH 034/175] Remove skip_traffic_test fixture in decap tests (#15429) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in decap tests How did you verify/test it? --- tests/decap/test_decap.py | 7 +------ tests/decap/test_subnet_decap.py | 15 +++++---------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/tests/decap/test_decap.py b/tests/decap/test_decap.py index 9464ce15a5b..41264bb894a 100644 --- a/tests/decap/test_decap.py +++ b/tests/decap/test_decap.py @@ -21,7 +21,6 @@ from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.fixtures.ptfhost_utils import ptf_test_port_map_active_active from tests.common.fixtures.fib_utils import fib_info_files # noqa F401 from tests.common.fixtures.fib_utils import single_fib_for_duts # noqa F401 @@ -193,8 +192,7 @@ def simulate_vxlan_teardown(duthosts, ptfhost, tbinfo): def test_decap(tbinfo, duthosts, ptfhost, setup_teardown, mux_server_url, # noqa F811 toggle_all_simulator_ports_to_random_side, supported_ttl_dscp_params, ip_ver, loopback_ips, # noqa F811 - duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator, # noqa F811 - skip_traffic_test): # noqa F811 + duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator): # noqa F811 setup_info = setup_teardown asic_type = duthosts[0].facts["asic_type"] ecn_mode = "copy_from_outer" @@ -214,9 +212,6 @@ def test_decap(tbinfo, duthosts, ptfhost, setup_teardown, mux_server_url, else: apply_decap_cfg(duthosts, ip_ver, loopback_ips, ttl_mode, dscp_mode, ecn_mode, 'SET') - if skip_traffic_test: - return - if 'dualtor' in tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') diff --git a/tests/decap/test_subnet_decap.py b/tests/decap/test_subnet_decap.py index bed66fbe68a..7c2b48486b6 100644 --- a/tests/decap/test_subnet_decap.py +++ b/tests/decap/test_subnet_decap.py @@ -9,7 +9,6 @@ import ptf.testutils as testutils from ptf.mask import Mask from tests.common.dualtor.dual_tor_utils import rand_selected_interface # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa F401 from tests.common.config_reload import config_reload @@ -192,23 +191,20 @@ def build_expected_vlan_subnet_packet(encapsulated_packet, ip_version, stage, de def verify_packet_with_expected(ptfadapter, stage, pkt, exp_pkt, send_port, - recv_ports=[], recv_port=None, timeout=10, skip_traffic_test=False): # noqa F811 - if skip_traffic_test is True: - logger.info("Skip traffic test") - return + recv_ports=[], recv_port=None, timeout=10): # noqa F811 ptfadapter.dataplane.flush() testutils.send(ptfadapter, send_port, pkt) if stage == "positive": - testutils.verify_packet_any_port(ptfadapter, exp_pkt, recv_ports, timeout=10) + testutils.verify_packet_any_port(ptfadapter, exp_pkt, recv_ports, timeout=timeout) elif stage == "negative": - testutils.verify_packet(ptfadapter, exp_pkt, recv_port, timeout=10) + testutils.verify_packet(ptfadapter, exp_pkt, recv_port, timeout=timeout) @pytest.mark.parametrize("ip_version", ["IPv4", "IPv6"]) @pytest.mark.parametrize("stage", ["positive", "negative"]) def test_vlan_subnet_decap(request, rand_selected_dut, tbinfo, ptfhost, ptfadapter, ip_version, stage, prepare_subnet_decap_config, prepare_vlan_subnet_test_port, - prepare_negative_ip_port_map, setup_arp_responder, skip_traffic_test): # noqa F811 + prepare_negative_ip_port_map, setup_arp_responder): # noqa F811 ptf_src_port, _, upstream_port_ids = prepare_vlan_subnet_test_port encapsulated_packet = build_encapsulated_vlan_subnet_packet(ptfadapter, rand_selected_dut, ip_version, stage) @@ -221,5 +217,4 @@ def test_vlan_subnet_decap(request, rand_selected_dut, tbinfo, ptfhost, ptfadapt ptf_target_port = None verify_packet_with_expected(ptfadapter, stage, encapsulated_packet, exp_pkt, - ptf_src_port, recv_ports=upstream_port_ids, recv_port=ptf_target_port, - skip_traffic_test=skip_traffic_test) + ptf_src_port, recv_ports=upstream_port_ids, recv_port=ptf_target_port) From 27f029656b5349ab299a2735c647548c8f597693 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:32:53 +0800 Subject: [PATCH 035/175] Remove skip_traffic_test fixture in copp tests (#15428) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in copp tests --- tests/copp/test_copp.py | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/tests/copp/test_copp.py b/tests/copp/test_copp.py index d96526e2c59..324a3e6679e 100644 --- a/tests/copp/test_copp.py +++ b/tests/copp/test_copp.py @@ -41,7 +41,6 @@ # Module-level fixtures from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 pytestmark = [ pytest.mark.topology("t0", "t1", "t2", "m0", "mx") @@ -84,7 +83,7 @@ class TestCOPP(object): "LLDP", "UDLD"]) def test_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfhost, copp_testbed, dut_type, skip_traffic_test): # noqa F811 + ptfhost, copp_testbed, dut_type): """ Validates that rate-limited COPP groups work as expected. @@ -96,13 +95,11 @@ def test_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_host ptfhost, protocol, copp_testbed, - dut_type, - skip_traffic_test=skip_traffic_test) + dut_type) @pytest.mark.disable_loganalyzer def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfhost, check_image_version, copp_testbed, dut_type, backup_restore_config_db, - skip_traffic_test): # noqa F811 + ptfhost, check_image_version, copp_testbed, dut_type, backup_restore_config_db): """ Validates that one new trap(bgp) can be installed @@ -125,16 +122,14 @@ def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, self.trap_id.upper(), copp_testbed, dut_type, - has_trap=False, - skip_traffic_test=skip_traffic_test) + has_trap=False) logger.info("Set always_enabled of {} to true".format(self.trap_id)) copp_utils.configure_always_enabled_for_trap(duthost, self.trap_id, "true") logger.info("Verify {} trap status is installed by sending traffic".format(self.trap_id)) pytest_assert( - wait_until(60, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type, - skip_traffic_test=skip_traffic_test), + wait_until(60, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type), "Installing {} trap fail".format(self.trap_id)) @pytest.mark.disable_loganalyzer @@ -142,7 +137,7 @@ def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, "disable_feature_status"]) def test_remove_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, check_image_version, copp_testbed, dut_type, - backup_restore_config_db, remove_trap_type, skip_traffic_test): # noqa F811 + backup_restore_config_db, remove_trap_type): """ Validates that The trap(bgp) can be uninstalled after deleting the corresponding entry from the feature table @@ -160,7 +155,7 @@ def test_remove_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, copp_utils.uninstall_trap(duthost, "ip2me", "ip2me") logger.info("Pre condition: make trap {} is installed".format(self.feature_name)) - pre_condition_install_trap(ptfhost, duthost, copp_testbed, self.trap_id, self.feature_name, skip_traffic_test) + pre_condition_install_trap(ptfhost, duthost, copp_testbed, self.trap_id, self.feature_name) if remove_trap_type == "delete_feature_entry": logger.info("Remove feature entry: {}".format(self.feature_name)) @@ -172,13 +167,13 @@ def test_remove_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, logger.info("Verify {} trap status is uninstalled by sending traffic".format(self.trap_id)) pytest_assert( wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), - copp_testbed, dut_type, has_trap=False, skip_traffic_test=skip_traffic_test), + copp_testbed, dut_type, has_trap=False), "uninstalling {} trap fail".format(self.trap_id)) @pytest.mark.disable_loganalyzer def test_trap_config_save_after_reboot(self, duthosts, localhost, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, check_image_version, copp_testbed, dut_type, - backup_restore_config_db, request, skip_traffic_test): # noqa F811 + backup_restore_config_db, request): # noqa F811 """ Validates that the trap configuration is saved or not after reboot(reboot, fast-reboot, warm-reboot) @@ -207,8 +202,7 @@ def test_trap_config_save_after_reboot(self, duthosts, localhost, enum_rand_one_ copp_utils.verify_always_enable_value(duthost, self.trap_id, "true") logger.info("Verify {} trap status is installed by sending traffic".format(self.trap_id)) pytest_assert( - wait_until(200, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type, - skip_traffic_test=skip_traffic_test), + wait_until(200, 20, 0, _copp_runner, duthost, ptfhost, self.trap_id.upper(), copp_testbed, dut_type), "Installing {} trap fail".format(self.trap_id)) @@ -279,7 +273,7 @@ def ignore_expected_loganalyzer_exceptions(enum_rand_one_per_hwsku_frontend_host loganalyzer[enum_rand_one_per_hwsku_frontend_hostname].ignore_regex.extend(ignoreRegex) -def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True, skip_traffic_test=False): # noqa F811 +def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True): """ Configures and runs the PTF test cases. """ @@ -299,9 +293,6 @@ def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True, skip_ device_sockets = ["0-{}@tcp://127.0.0.1:10900".format(test_params.nn_target_port), "1-{}@tcp://{}:10900".format(test_params.nn_target_port, dut_ip)] - if skip_traffic_test is True: - logger.info("Skipping traffic test.") - return True # NOTE: debug_level can actually slow the PTF down enough to fail the test cases # that are not rate limited. Until this is addressed, do not use this flag as part of # nightly test runs. @@ -497,15 +488,14 @@ def backup_restore_config_db(duthosts, enum_rand_one_per_hwsku_frontend_hostname copp_utils.restore_config_db(duthost) -def pre_condition_install_trap(ptfhost, duthost, copp_testbed, trap_id, feature_name, skip_traffic_test): # noqa F811 +def pre_condition_install_trap(ptfhost, duthost, copp_testbed, trap_id, feature_name): # noqa F811 copp_utils.install_trap(duthost, feature_name) logger.info("Set always_enabled of {} to false".format(trap_id)) copp_utils.configure_always_enabled_for_trap(duthost, trap_id, "false") logger.info("Verify {} trap status is installed by sending traffic in pre_condition".format(trap_id)) pytest_assert( - wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, trap_id.upper(), copp_testbed, dut_type, - skip_traffic_test=skip_traffic_test), + wait_until(100, 20, 0, _copp_runner, duthost, ptfhost, trap_id.upper(), copp_testbed, dut_type), "Installing {} trap fail".format(trap_id)) From 1b583ae39483e2f17c0c478fd2ff517bc77b38d4 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:33:16 +0800 Subject: [PATCH 036/175] Remove skip_traffic_test fixture in arp tests (#15427) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in arp tests How did you verify/test it? --- tests/arp/test_stress_arp.py | 13 ++++++------- tests/arp/test_unknown_mac.py | 12 +++++------- tests/arp/test_wr_arp.py | 10 ++++------ tests/common/arp_utils.py | 5 ++--- tests/vxlan/test_vnet_vxlan.py | 9 ++------- 5 files changed, 19 insertions(+), 30 deletions(-) diff --git a/tests/arp/test_stress_arp.py b/tests/arp/test_stress_arp.py index 9461448a4ce..c6dcd250261 100644 --- a/tests/arp/test_stress_arp.py +++ b/tests/arp/test_stress_arp.py @@ -9,7 +9,6 @@ in6_getnsma, inet_pton, inet_ntop, socket from ipaddress import ip_address, ip_network from tests.common.utilities import wait_until, increment_ipv6_addr -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.errors import RunAnsibleModuleFail ARP_BASE_IP = "172.16.0.1/16" @@ -86,7 +85,7 @@ def genrate_ipv4_ip(): def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, - ptfadapter, get_function_completeness_level, skip_traffic_test): # noqa F811 + ptfadapter, get_function_completeness_level): """ Send gratuitous ARP (GARP) packet sfrom the PTF to the DUT @@ -95,7 +94,7 @@ def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, normalized_level = get_function_completeness_level if normalized_level is None: normalized_level = "debug" - + asic_type = duthost.facts['asic_type'] ipv4_avaliable = get_crm_resources(duthost, "ipv4_neighbor", "available") fdb_avaliable = get_crm_resources(duthost, "fdb_entry", "available") pytest_assert(ipv4_avaliable > 0 and fdb_avaliable > 0, "Entries have been filled") @@ -113,7 +112,7 @@ def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, loop_times -= 1 try: add_arp(ptf_intf_ipv4_hosts, intf1_index, ptfadapter) - if not skip_traffic_test: + if asic_type != 'vs': # There is a certain probability of hash collision, we set the percentage as 1% here # The entries we add will not exceed 10000, so the number we tolerate is 100 logger.debug("Expected route number: {}, real route number {}" @@ -175,7 +174,7 @@ def add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable): def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, - ptfadapter, get_function_completeness_level, proxy_arp_enabled, skip_traffic_test): # noqa F811 + ptfadapter, get_function_completeness_level, proxy_arp_enabled): _, _, ptf_intf_ipv6_addr, _, ptf_intf_index = ip_and_intf_info ptf_intf_ipv6_addr = increment_ipv6_addr(ptf_intf_ipv6_addr) pytest_require(proxy_arp_enabled, 'Proxy ARP not enabled for all VLANs') @@ -184,7 +183,7 @@ def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, normalized_level = get_function_completeness_level if normalized_level is None: normalized_level = "debug" - + asic_type = duthost.facts['asic_type'] loop_times = LOOP_TIMES_LEVEL_MAP[normalized_level] ipv6_avaliable = get_crm_resources(duthost, "ipv6_neighbor", "available") fdb_avaliable = get_crm_resources(duthost, "fdb_entry", "available") @@ -196,7 +195,7 @@ def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, loop_times -= 1 try: add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable) - if not skip_traffic_test: + if asic_type != 'vs': # There is a certain probability of hash collision, we set the percentage as 1% here # The entries we add will not exceed 10000, so the number we tolerate is 100 logger.debug("Expected route number: {}, real route number {}" diff --git a/tests/arp/test_unknown_mac.py b/tests/arp/test_unknown_mac.py index 109addaa32a..99d53249265 100644 --- a/tests/arp/test_unknown_mac.py +++ b/tests/arp/test_unknown_mac.py @@ -14,7 +14,6 @@ from tests.common import constants from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert, pytest_require from tests.common.dualtor.dual_tor_utils import mux_cable_server_ip from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -260,7 +259,7 @@ class TrafficSendVerify(object): """ Send traffic and check interface counters and ptf ports """ @initClassVars def __init__(self, duthost, ptfadapter, dst_ip, ptf_dst_port, ptf_vlan_ports, - intfs, ptf_ports, arp_entry, dscp, skip_traffic_test): # noqa F811 + intfs, ptf_ports, arp_entry, dscp): # noqa F811 """ Args: duthost(AnsibleHost) : dut instance @@ -278,7 +277,6 @@ def __init__(self, duthost, ptfadapter, dst_ip, ptf_dst_port, ptf_vlan_ports, self.pkt_map = dict() self.pre_rx_drops = dict() self.dut_mac = duthost.facts['router_mac'] - self.skip_traffic_test = skip_traffic_test def _constructPacket(self): """ @@ -361,7 +359,8 @@ def runTest(self): self._constructPacket() logger.info("Clear all counters before test run") self.duthost.command("sonic-clear counters") - if not self.skip_traffic_test: + asic_type = self.duthost.facts["asic_type"] + if asic_type != "vs": time.sleep(1) logger.info("Collect drop counters before test run") self._verifyIntfCounters(pretest=True) @@ -378,7 +377,7 @@ def runTest(self): class TestUnknownMac(object): @pytest.mark.parametrize("dscp", ["dscp-3", "dscp-4", "dscp-8"]) - def test_unknown_mac(self, unknownMacSetup, dscp, duthosts, rand_one_dut_hostname, ptfadapter, skip_traffic_test): # noqa F811 + def test_unknown_mac(self, unknownMacSetup, dscp, duthosts, rand_one_dut_hostname, ptfadapter): """ Verify unknown mac behavior for lossless and lossy priority @@ -404,7 +403,6 @@ def test_unknown_mac(self, unknownMacSetup, dscp, duthosts, rand_one_dut_hostnam self.ptf_vlan_ports = setup['ptf_vlan_ports'] self.intfs = setup['intfs'] self.ptf_ports = setup['ptf_ports'] - self.skip_traffic_test = skip_traffic_test self.validateEntries() self.run() @@ -422,5 +420,5 @@ def run(self): thandle = TrafficSendVerify(self.duthost, self.ptfadapter, self.dst_ip, self.ptf_dst_port, self.ptf_vlan_ports, self.intfs, self.ptf_ports, - self.arp_entry, self.dscp, self.skip_traffic_test) + self.arp_entry, self.dscp) thandle.runTest() diff --git a/tests/arp/test_wr_arp.py b/tests/arp/test_wr_arp.py index 92dd027220d..edc163d8ad3 100644 --- a/tests/arp/test_wr_arp.py +++ b/tests/arp/test_wr_arp.py @@ -4,7 +4,6 @@ from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.storage_backend.backend_utils import skip_test_module_over_backend_topologies # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.utilities import wait_until @@ -68,7 +67,7 @@ def warmRebootSystemFlag(duthost): duthost.shell(cmd='sonic-db-cli STATE_DB hset "WARM_RESTART_ENABLE_TABLE|system" enable false') -def test_wr_arp(request, duthost, ptfhost, creds, skip_traffic_test): # noqa F811 +def test_wr_arp(request, duthost, ptfhost, creds): ''' Control Plane Assistant test for Warm-Reboot. @@ -85,10 +84,10 @@ def test_wr_arp(request, duthost, ptfhost, creds, skip_traffic_test): # noqa F Returns: None ''' - testWrArp(request, duthost, ptfhost, creds, skip_traffic_test) + testWrArp(request, duthost, ptfhost, creds) -def test_wr_arp_advance(request, duthost, ptfhost, creds, skip_traffic_test): # noqa F811 +def test_wr_arp_advance(request, duthost, ptfhost, creds): testDuration = request.config.getoption('--test_duration', default=DEFAULT_TEST_DURATION) ptfIp = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] dutIp = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] @@ -96,8 +95,7 @@ def test_wr_arp_advance(request, duthost, ptfhost, creds, skip_traffic_test): logger.info('Warm-Reboot Control-Plane assist feature') sonicadmin_alt_password = duthost.host.options['variable_manager'].\ _hostvars[duthost.hostname]['sonic_default_passwords'] - if skip_traffic_test is True: - return + ptf_runner( ptfhost, 'ptftests', diff --git a/tests/common/arp_utils.py b/tests/common/arp_utils.py index 280819ae413..05186267f84 100644 --- a/tests/common/arp_utils.py +++ b/tests/common/arp_utils.py @@ -179,7 +179,7 @@ def tear_down(duthost, route, ptfIp, gwIp): teardownRouteToPtfhost(duthost, route, ptfIp, gwIp) -def testWrArp(request, duthost, ptfhost, creds, skip_traffic_test): +def testWrArp(request, duthost, ptfhost, creds): testDuration = request.config.getoption('--test_duration', default=DEFAULT_TEST_DURATION) ptfIp = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars['ansible_host'] dutIp = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'] @@ -187,8 +187,7 @@ def testWrArp(request, duthost, ptfhost, creds, skip_traffic_test): logger.info('Warm-Reboot Control-Plane assist feature') sonicadmin_alt_password = duthost.host.options['variable_manager']. \ _hostvars[duthost.hostname]['sonic_default_passwords'] - if skip_traffic_test is True: - return + ptf_runner( ptfhost, 'ptftests', diff --git a/tests/vxlan/test_vnet_vxlan.py b/tests/vxlan/test_vnet_vxlan.py index 41413003f9c..108b4283b70 100644 --- a/tests/vxlan/test_vnet_vxlan.py +++ b/tests/vxlan/test_vnet_vxlan.py @@ -15,8 +15,6 @@ from tests.common.flow_counter.flow_counter_utils import RouteFlowCounterTestContext, is_route_flow_counter_supported # noqa F401 from tests.common.arp_utils import set_up, tear_down, testWrArp -from tests.common.fixtures.ptfhost_utils import skip_traffic_test - from tests.common.config_reload import config_reload logger = logging.getLogger(__name__) @@ -159,7 +157,7 @@ def vxlan_status(setup, request, duthosts, rand_one_dut_hostname, elif request.param == "WR_ARP": route, ptfIp, gwIp = set_up(duthost, ptfhost, tbinfo) try: - testWrArp(request, duthost, ptfhost, creds, skip_traffic_test) + testWrArp(request, duthost, ptfhost, creds) finally: tear_down(duthost, route, ptfIp, gwIp) @@ -190,7 +188,7 @@ def is_neigh_reachable(duthost, vnet_config): def test_vnet_vxlan(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfhost, - vnet_test_params, creds, is_route_flow_counter_supported, skip_traffic_test): # noqa F811 + vnet_test_params, creds, is_route_flow_counter_supported): # noqa F811 """ Test case for VNET VxLAN @@ -229,9 +227,6 @@ def test_vnet_vxlan(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfhos logger.info("Skipping cleanup") pytest.skip("Skip cleanup specified") - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return logger.debug("Starting PTF runner") if scenario == 'Enabled' and vxlan_enabled: route_pattern = 'Vnet1|100.1.1.1/32' From 4467b33a479d4da6afebbb9b1ca0cdf4f8d6c72e Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:33:43 +0800 Subject: [PATCH 037/175] Remove skip_traffic_test fixture in dualtor tests (#15392) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test in dualtor testcases How did you verify/test it? Any platform specific information? --- tests/common/dualtor/data_plane_utils.py | 44 ++++---- tests/common/dualtor/dual_tor_utils.py | 29 ++--- tests/common/dualtor/server_traffic_utils.py | 10 +- tests/common/dualtor/tunnel_traffic_utils.py | 6 +- tests/dualtor/test_ipinip.py | 16 +-- .../test_orchagent_active_tor_downstream.py | 23 ++-- tests/dualtor/test_orchagent_mac_move.py | 24 ++-- tests/dualtor/test_orchagent_slb.py | 23 ++-- .../test_orchagent_standby_tor_downstream.py | 24 ++-- .../test_standby_tor_upstream_mux_toggle.py | 13 +-- tests/dualtor/test_tor_ecn.py | 32 +++--- tests/dualtor/test_tunnel_memory_leak.py | 12 +- tests/dualtor_io/test_heartbeat_failure.py | 35 +++--- tests/dualtor_io/test_link_drop.py | 38 +++---- tests/dualtor_io/test_link_failure.py | 67 +++++------- tests/dualtor_io/test_normal_op.py | 103 ++++++------------ tests/dualtor_io/test_tor_bgp_failure.py | 29 ++--- tests/dualtor_mgmt/test_ingress_drop.py | 8 +- 18 files changed, 210 insertions(+), 326 deletions(-) diff --git a/tests/common/dualtor/data_plane_utils.py b/tests/common/dualtor/data_plane_utils.py index cb15313a9e7..febaa97d841 100644 --- a/tests/common/dualtor/data_plane_utils.py +++ b/tests/common/dualtor/data_plane_utils.py @@ -241,7 +241,7 @@ def save_pcap(request, pytestconfig): @pytest.fixture def send_t1_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 """ Starts IO test from T1 router to server. As part of IO test the background thread sends and sniffs packets. @@ -263,8 +263,7 @@ def send_t1_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo, def t1_to_server_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.1, - stop_after=None, allow_disruption_before_traffic=False, - skip_traffic_test=False): + stop_after=None, allow_disruption_before_traffic=False): """ Helper method for `send_t1_to_server_with_action`. Starts sender and sniffer before performing the action on the tor host. @@ -299,9 +298,6 @@ def t1_to_server_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") - return return verify_and_report(tor_IO, verify, delay, allowed_disruption, allow_disruption_before_traffic) yield t1_to_server_io_test @@ -311,7 +307,7 @@ def t1_to_server_io_test(activehost, tor_vlan_port=None, @pytest.fixture def send_server_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 """ Starts IO test from server to T1 router. As part of IO test the background thread sends and sniffs packets. @@ -334,7 +330,7 @@ def send_server_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo, def server_to_t1_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, - stop_after=None, skip_traffic_test=False): + stop_after=None): """ Helper method for `send_server_to_t1_with_action`. Starts sender and sniffer before performing the action on the tor host. @@ -368,8 +364,9 @@ def server_to_t1_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) @@ -380,13 +377,13 @@ def server_to_t1_io_test(activehost, tor_vlan_port=None, @pytest.fixture def send_soc_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 arp_setup(ptfhost) def soc_to_t1_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, - stop_after=None, skip_traffic_test=False): + stop_after=None): tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, tor_vlan_port, send_interval, @@ -396,8 +393,9 @@ def soc_to_t1_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) @@ -408,13 +406,13 @@ def soc_to_t1_io_test(activehost, tor_vlan_port=None, @pytest.fixture def send_t1_to_soc_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 arp_setup(ptfhost) def t1_to_soc_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, - stop_after=None, skip_traffic_test=False): + stop_after=None): tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, tor_vlan_port, send_interval, @@ -426,8 +424,9 @@ def t1_to_soc_io_test(activehost, tor_vlan_port=None, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) @@ -454,13 +453,13 @@ def _select_test_mux_ports(cable_type, count): @pytest.fixture def send_server_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo, - cable_type, vmhost, save_pcap, skip_traffic_test=False): # noqa F811 + cable_type, vmhost, save_pcap): # noqa F811 arp_setup(ptfhost) def server_to_server_io_test(activehost, test_mux_ports, delay=0, allowed_disruption=0, action=None, - verify=False, send_interval=0.01, stop_after=None, skip_traffic_test=False): + verify=False, send_interval=0.01, stop_after=None): tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, test_mux_ports, send_interval, traffic_direction="server_to_server", stop_after=stop_after, @@ -471,8 +470,9 @@ def server_to_server_io_test(activehost, test_mux_ports, delay=0, if delay and not allowed_disruption: allowed_disruption = 1 - if skip_traffic_test is True: - logging.info("Skipping traffic test") + asic_type = duthosts[0].facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping verify on VS platform") return return verify_and_report(tor_IO, verify, delay, allowed_disruption) diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py index bb028a1144c..ee76923be5d 100644 --- a/tests/common/dualtor/dual_tor_utils.py +++ b/tests/common/dualtor/dual_tor_utils.py @@ -883,7 +883,7 @@ def mux_cable_server_ip(dut): def check_tunnel_balance(ptfhost, standby_tor_mac, vlan_mac, active_tor_ip, standby_tor_ip, selected_port, target_server_ip, target_server_ipv6, target_server_port, ptf_portchannel_indices, - completeness_level, check_ipv6=False, skip_traffic_test=False): + completeness_level, check_ipv6=False): """ Function for testing traffic distribution among all avtive T1. A test script will be running on ptf to generate traffic to standby interface, and the traffic will be forwarded to @@ -901,9 +901,6 @@ def check_tunnel_balance(ptfhost, standby_tor_mac, vlan_mac, active_tor_ip, Returns: None. """ - if skip_traffic_test is True: - logging.info("Skip checking tunnel balance due to traffic test was skipped") - return HASH_KEYS = ["src-port", "dst-port", "src-ip"] params = { "server_ip": target_server_ip, @@ -1158,7 +1155,7 @@ def check_nexthops_balance(rand_selected_dut, ptfadapter, dst_server_addr, pc)) -def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num, skip_traffic_test=False): +def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num): for pc, intfs in portchannel_ports.items(): count = 0 # Collect the packets count within a single portchannel @@ -1167,16 +1164,14 @@ def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_pa count = count + port_packet_count.get(uplink_int, 0) logging.info("Packets received on portchannel {}: {}".format(pc, count)) - if skip_traffic_test is True: - logging.info("Skip checking single uplink balance due to traffic test was skipped") - continue if count > 0 and count != expect_packet_num: pytest.fail("Packets not sent up single standby port {}".format(pc)) # verify nexthops are only sent to single active or standby mux def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, downlink_ints, skip_traffic_test=False): + tbinfo, downlink_ints): + asic_type = rand_selected_dut.facts["asic_type"] HASH_KEYS = ["src-port", "dst-port", "src-ip"] expect_packet_num = 1000 expect_packet_num_high = expect_packet_num * (0.90) @@ -1191,9 +1186,7 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add port_packet_count = dict() packets_to_send = generate_hashed_packet_to_server(ptfadapter, rand_selected_dut, HASH_KEYS, dst_server_addr, expect_packet_num) - if skip_traffic_test is True: - logging.info("Skip checking single downlink balance due to traffic test was skipped") - return + for send_packet, exp_pkt, exp_tunnel_pkt in packets_to_send: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), send_packet, count=1) # expect multi-mux nexthops to focus packets to one downlink @@ -1204,6 +1197,10 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add for ptf_idx, pkt_count in ptf_port_count.items(): port_packet_count[ptf_idx] = port_packet_count.get(ptf_idx, 0) + pkt_count + if asic_type == "vs": + logging.info("Skipping validation on VS platform") + return + logging.info("Received packets in ports: {}".format(str(port_packet_count))) for downlink_int in expected_downlink_ports: # packets should be either 0 or expect_packet_num: @@ -1215,11 +1212,11 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add if len(downlink_ints) == 0: # All nexthops are now connected to standby mux, and the packets will be sent towards a single portchanel int # Check if uplink distribution is towards a single portchannel - check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num, skip_traffic_test) + check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num) def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, - pkt_num=100, drop=False, skip_traffic_test=False): + pkt_num=100, drop=False): """ @summary: Helper function for verifying upstream packets @param host: The dut host @@ -1272,9 +1269,7 @@ def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, logger.info("Verifying upstream traffic. packet number = {} interface = {} \ server_ip = {} expect_drop = {}".format(pkt_num, itfs, server_ip, drop)) - if skip_traffic_test is True: - logger.info("Skip verifying upstream traffic due to traffic test was skipped") - return + for i in range(0, pkt_num): ptfadapter.dataplane.flush() testutils.send(ptfadapter, tx_port, pkt, count=1) diff --git a/tests/common/dualtor/server_traffic_utils.py b/tests/common/dualtor/server_traffic_utils.py index 33e0293b3ce..a0be517354d 100644 --- a/tests/common/dualtor/server_traffic_utils.py +++ b/tests/common/dualtor/server_traffic_utils.py @@ -56,8 +56,7 @@ class ServerTrafficMonitor(object): VLAN_INTERFACE_TEMPLATE = "{external_port}.{vlan_id}" def __init__(self, duthost, ptfhost, vmhost, tbinfo, dut_iface, - conn_graph_facts, exp_pkt, existing=True, is_mocked=False, - skip_traffic_test=False): + conn_graph_facts, exp_pkt, existing=True, is_mocked=False): """ @summary: Initialize the monitor. @@ -82,7 +81,7 @@ def __init__(self, duthost, ptfhost, vmhost, tbinfo, dut_iface, self.conn_graph_facts = conn_graph_facts self.captured_packets = [] self.matched_packets = [] - self.skip_traffic_test = skip_traffic_test + if is_mocked: mg_facts = self.duthost.get_extended_minigraph_facts(self.tbinfo) ptf_iface = "eth%s" % mg_facts['minigraph_ptf_indices'][self.dut_iface] @@ -128,8 +127,9 @@ def __exit__(self, exc_type, exc_value, traceback): logging.info("the expected packet:\n%s", str(self.exp_pkt)) self.matched_packets = [p for p in self.captured_packets if match_exp_pkt(self.exp_pkt, p)] logging.info("received %d matched packets", len(self.matched_packets)) - if self.skip_traffic_test is True: - logging.info("Skip matched_packets verify due to traffic test was skipped.") + asic_type = self.duthost.facts["asic_type"] + if asic_type == "vs": + logging.info("Skipping matched_packets verify on VS platform.") return if self.matched_packets: logging.info( diff --git a/tests/common/dualtor/tunnel_traffic_utils.py b/tests/common/dualtor/tunnel_traffic_utils.py index 14b4f7786e4..059ca8ad703 100644 --- a/tests/common/dualtor/tunnel_traffic_utils.py +++ b/tests/common/dualtor/tunnel_traffic_utils.py @@ -250,7 +250,7 @@ def _disassemble_ip_tos(tos): return " ,".join(check_res) def __init__(self, standby_tor, active_tor=None, existing=True, inner_packet=None, - check_items=("ttl", "tos", "queue"), packet_count=10, skip_traffic_test=False): + check_items=("ttl", "tos", "queue"), packet_count=10): """ Init the tunnel traffic monitor. @@ -262,7 +262,6 @@ def __init__(self, standby_tor, active_tor=None, existing=True, inner_packet=Non self.listen_ports = sorted(self._get_t1_ptf_port_indexes(standby_tor, tbinfo)) self.ptfadapter = ptfadapter self.packet_count = packet_count - self.skip_traffic_test = skip_traffic_test standby_tor_cfg_facts = self.standby_tor.config_facts( host=self.standby_tor.hostname, source="running" @@ -293,9 +292,6 @@ def __enter__(self): def __exit__(self, *exc_info): if exc_info[0]: return - if self.skip_traffic_test is True: - logging.info("Skip tunnel traffic verify due to traffic test was skipped.") - return try: result = testutils.verify_packet_any_port( ptfadapter, diff --git a/tests/dualtor/test_ipinip.py b/tests/dualtor/test_ipinip.py index d5e0b15476b..33d62946d75 100644 --- a/tests/dualtor/test_ipinip.py +++ b/tests/dualtor/test_ipinip.py @@ -28,7 +28,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output from tests.common.dualtor.dual_tor_utils import config_active_active_dualtor_active_standby # noqa F401 from tests.common.dualtor.dual_tor_utils import validate_active_active_dualtor_setup # noqa F401 @@ -105,7 +104,7 @@ def build_expected_packet_to_server(encapsulated_packet, decrease_ttl=False): def test_decap_active_tor( build_encapsulated_packet, request, ptfhost, rand_selected_interface, ptfadapter, # noqa F401 - tbinfo, rand_selected_dut, tunnel_traffic_monitor, skip_traffic_test): # noqa F811 + tbinfo, rand_selected_dut, tunnel_traffic_monitor): # noqa F811 @contextlib.contextmanager def stop_garp(ptfhost): @@ -129,9 +128,6 @@ def stop_garp(ptfhost): ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following traffic test") - return with stop_garp(ptfhost): ptfadapter.dataplane.flush() testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet) @@ -141,7 +137,7 @@ def stop_garp(ptfhost): def test_decap_standby_tor( build_encapsulated_packet, request, rand_selected_interface, ptfadapter, # noqa F401 - tbinfo, rand_selected_dut, tunnel_traffic_monitor, skip_traffic_test # noqa F401 + tbinfo, rand_selected_dut, tunnel_traffic_monitor # noqa F401 ): def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt): @@ -170,9 +166,6 @@ def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt): ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following traffic test") - return with tunnel_traffic_monitor(tor, existing=False): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet, count=10) time.sleep(2) @@ -302,7 +295,7 @@ def setup_active_active_ports(active_active_ports, rand_selected_dut, rand_unsel def test_encap_with_mirror_session(rand_selected_dut, rand_selected_interface, # noqa F811 ptfadapter, tbinfo, setup_mirror_session, toggle_all_simulator_ports_to_rand_unselected_tor, # noqa F811 - tunnel_traffic_monitor, skip_traffic_test, # noqa F811 + tunnel_traffic_monitor, # noqa F811 setup_standby_ports_on_rand_selected_tor): # noqa F811 """ A test case to verify the bounced back packet from Standby ToR to T1 doesn't have an unexpected vlan id (4095) @@ -321,8 +314,5 @@ def test_encap_with_mirror_session(rand_selected_dut, rand_selected_interface, logging.info("Sending packet from ptf t1 interface {}".format(src_port_id)) inner_packet = pkt_to_server[scapy.all.IP].copy() inner_packet[IP].ttl -= 1 - if skip_traffic_test is True: - logging.info("Skip following traffic test") - return with tunnel_traffic_monitor(rand_selected_dut, inner_packet=inner_packet, check_items=()): testutils.send(ptfadapter, src_port_id, pkt_to_server) diff --git a/tests/dualtor/test_orchagent_active_tor_downstream.py b/tests/dualtor/test_orchagent_active_tor_downstream.py index 45d3506eaf8..ddb6854a34a 100644 --- a/tests/dualtor/test_orchagent_active_tor_downstream.py +++ b/tests/dualtor/test_orchagent_active_tor_downstream.py @@ -5,7 +5,6 @@ from ipaddress import ip_address from ptf import testutils -from tests.common.dualtor.dual_tor_mock import * # noqa F403 from tests.common.dualtor.dual_tor_utils import dualtor_info from tests.common.dualtor.dual_tor_utils import flush_neighbor from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports @@ -15,13 +14,13 @@ from tests.common.dualtor.dual_tor_utils import check_nexthops_single_downlink from tests.common.dualtor.dual_tor_utils import add_nexthop_routes, remove_static_routes from tests.common.dualtor.dual_tor_mock import set_mux_state +from tests.common.dualtor.dual_tor_mock import is_mocked_dualtor from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports # noqa F401 from tests.common.dualtor.server_traffic_utils import ServerTrafficMonitor from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until @@ -68,7 +67,7 @@ def neighbor_reachable(duthost, neighbor_ip): def test_active_tor_remove_neighbor_downstream_active( conn_graph_facts, ptfadapter, ptfhost, testbed_setup, rand_selected_dut, tbinfo, set_crm_polling_interval, - tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 + tunnel_traffic_monitor, vmhost # noqa F811 ): """ @Verify those two scenarios: @@ -103,9 +102,9 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): logging.info("send traffic to server %s from ptf t1 interface %s", server_ip, ptf_t1_intf) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) @@ -113,7 +112,7 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): logging.info("send traffic to server %s after removing neighbor entry", server_ip) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) remove_neighbor_ct = remove_neighbor(ptfhost, tor, server_ip, ip_version, removed_neighbor) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ @@ -126,7 +125,7 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): logging.info("send traffic to server %s after neighbor entry is restored", server_ip) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ tunnel_monitor, server_traffic_monitor: @@ -146,10 +145,10 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): def test_downstream_ecmp_nexthops( ptfadapter, rand_selected_dut, tbinfo, - toggle_all_simulator_ports, tor_mux_intfs, ip_version, skip_traffic_test # noqa F811 + toggle_all_simulator_ports, tor_mux_intfs, ip_version # noqa F811 ): nexthops_count = 4 - set_mux_state(rand_selected_dut, tbinfo, 'active', tor_mux_intfs, toggle_all_simulator_ports) # noqa F405 + set_mux_state(rand_selected_dut, tbinfo, 'active', tor_mux_intfs, toggle_all_simulator_ports) iface_server_map = get_interface_server_map(rand_selected_dut, nexthops_count) if ip_version == "ipv4": @@ -172,7 +171,7 @@ def test_downstream_ecmp_nexthops( try: logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces, skip_traffic_test) + tbinfo, nexthop_interfaces) nexthop_interfaces_copy = nexthop_interfaces.copy() @@ -183,7 +182,7 @@ def test_downstream_ecmp_nexthops( nexthop_interfaces_copy.remove(interface) logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces_copy, skip_traffic_test) + tbinfo, nexthop_interfaces_copy) # Revert two mux states to active for index, interface in reversed(list(enumerate(nexthop_interfaces))): @@ -192,7 +191,7 @@ def test_downstream_ecmp_nexthops( nexthop_interfaces_copy.append(interface) logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces_copy, skip_traffic_test) + tbinfo, nexthop_interfaces_copy) finally: # Remove the nexthop route remove_static_routes(rand_selected_dut, dst_server_addr) diff --git a/tests/dualtor/test_orchagent_mac_move.py b/tests/dualtor/test_orchagent_mac_move.py index 7aa0a25e39d..93c18c4d14b 100644 --- a/tests/dualtor/test_orchagent_mac_move.py +++ b/tests/dualtor/test_orchagent_mac_move.py @@ -3,7 +3,8 @@ import random from ptf import testutils -from tests.common.dualtor.dual_tor_mock import * # noqa F403 +from tests.common.dualtor.dual_tor_mock import is_mocked_dualtor +from tests.common.dualtor.dual_tor_mock import set_dual_tor_state_to_orchagent from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports from tests.common.dualtor.dual_tor_utils import crm_neighbor_checker from tests.common.dualtor.dual_tor_utils import build_packet_to_server @@ -13,7 +14,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output @@ -85,7 +85,7 @@ def test_mac_move( announce_new_neighbor, apply_active_state_to_orchagent, conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, set_crm_polling_interval, - tbinfo, tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 + tbinfo, tunnel_traffic_monitor, vmhost # noqa F811 ): tor = rand_selected_dut ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) @@ -96,23 +96,23 @@ def test_mac_move( announce_new_neighbor.send(None) logging.info("let new neighbor learnt on active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) # mac move to a standby port test_port = next(announce_new_neighbor) - announce_new_neighbor.send(lambda iface: set_dual_tor_state_to_orchagent(tor, "standby", [iface])) # noqa F405 + announce_new_neighbor.send(lambda iface: set_dual_tor_state_to_orchagent(tor, "standby", [iface])) logging.info("mac move to a standby port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=True, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -121,7 +121,7 @@ def test_mac_move( tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -131,10 +131,10 @@ def test_mac_move( announce_new_neighbor.send(None) logging.info("mac move to another active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=True, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -145,7 +145,7 @@ def test_mac_move( tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) diff --git a/tests/dualtor/test_orchagent_slb.py b/tests/dualtor/test_orchagent_slb.py index 4b0aeb89627..19e88996746 100644 --- a/tests/dualtor/test_orchagent_slb.py +++ b/tests/dualtor/test_orchagent_slb.py @@ -2,7 +2,6 @@ import pytest import random import time -import logging import scapy.all as scapyall from ptf import testutils @@ -20,7 +19,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers import bgp from tests.common.utilities import is_ipv4_address @@ -217,7 +215,7 @@ def test_orchagent_slb( force_active_tor, upper_tor_host, lower_tor_host, # noqa F811 ptfadapter, ptfhost, setup_interfaces, toggle_all_simulator_ports_to_upper_tor, tbinfo, # noqa F811 - tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 + tunnel_traffic_monitor, vmhost # noqa F811 ): def verify_bgp_session(duthost, bgp_neighbor): @@ -235,11 +233,8 @@ def verify_route(duthost, route, existing=True): else: assert len(existing_route["nexthops"]) == 0 - def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True, - skip_traffic_test=skip_traffic_test): - if skip_traffic_test is True: - logging.info("Skip traffic test.") - return + def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True): + prefix = ipaddress.ip_network(route["prefix"]) dst_host = str(next(prefix.hosts())) pkt, exp_pkt = build_packet_to_server(duthost, ptfadapter, dst_host) @@ -295,11 +290,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 3: verify the route by sending some downstream traffic verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=True, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=True, is_route_existed=True ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=False, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=False, is_route_existed=True ) # STEP 4: withdraw the announced route to both ToRs @@ -314,11 +309,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 5: verify the route is removed by verifying that downstream traffic is dropped verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=True, is_route_existed=False, skip_traffic_test=skip_traffic_test + is_duthost_active=True, is_route_existed=False ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=False, is_route_existed=False, skip_traffic_test=skip_traffic_test + is_duthost_active=False, is_route_existed=False ) # STEP 6: toggle mux state change @@ -341,11 +336,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 8: verify the route by sending some downstream traffic verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=False, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=False, is_route_existed=True ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=True, is_route_existed=True, skip_traffic_test=skip_traffic_test + is_duthost_active=True, is_route_existed=True ) # STEP 9: verify teardown diff --git a/tests/dualtor/test_orchagent_standby_tor_downstream.py b/tests/dualtor/test_orchagent_standby_tor_downstream.py index 1d26d74187b..b59e6f4cc1b 100644 --- a/tests/dualtor/test_orchagent_standby_tor_downstream.py +++ b/tests/dualtor/test_orchagent_standby_tor_downstream.py @@ -19,7 +19,6 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert as pt_assert from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor # noqa F401 from tests.common.dualtor.server_traffic_utils import ServerTrafficMonitor @@ -62,13 +61,12 @@ def get_function_completeness_level(pytestconfig): @pytest.fixture def get_testbed_params(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - ip_version, setup_testbed_ipv6, get_function_completeness_level, skip_traffic_test): # noqa F811 + ip_version, setup_testbed_ipv6, get_function_completeness_level): # noqa F811 """Return a function to get testbed params.""" def _get_testbed_params(): params = dualtor_info(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, get_function_completeness_level) params["check_ipv6"] = (ip_version == "ipv6") - params["skip_traffic_test"] = skip_traffic_test return params return _get_testbed_params @@ -275,8 +273,7 @@ def test_standby_tor_remove_neighbor_downstream_standby( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, set_crm_polling_interval, tunnel_traffic_monitor, # noqa: F811 - vmhost, get_testbed_params, - ip_version, skip_traffic_test # noqa: F811 + vmhost, get_testbed_params, ip_version ): """ @summary: Verify that after removing neighbor entry for a server over standby @@ -307,7 +304,7 @@ def stop_neighbor_advertiser(ptfhost, ip_version): pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, target_server) ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send traffic to server %s from ptf t1 interface %s", target_server, ptf_t1_intf) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=True, skip_traffic_test=skip_traffic_test) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) with tunnel_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) @@ -315,7 +312,7 @@ def stop_neighbor_advertiser(ptfhost, ip_version): tunnel_monitor.existing = False server_traffic_monitor = ServerTrafficMonitor( tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, - existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test + existing=False, is_mocked=is_mocked_dualtor(tbinfo) ) # for real dualtor testbed, leave the neighbor restoration to garp service flush_neighbor_ct = flush_neighbor(tor, target_server, restore=is_t0_mocked_dualtor) @@ -334,7 +331,7 @@ def test_downstream_standby_mux_toggle_active( rand_selected_dut, rand_unselected_dut, tbinfo, tunnel_traffic_monitor, vmhost, # noqa: F811 toggle_all_simulator_ports, tor_mux_intfs, # noqa: F811 - ip_version, get_testbed_params, skip_traffic_test # noqa: F811 + ip_version, get_testbed_params ): # set rand_selected_dut as standby and rand_unselected_dut to active tor test_params = get_testbed_params() @@ -349,9 +346,8 @@ def test_downstream_standby_mux_toggle_active( ptf_t1_intf = random.choice(get_t1_ptf_ports(rand_selected_dut, tbinfo)) def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, - expect_server_traffic=True, skip_traffic_test=False): - if skip_traffic_test is True: - return + expect_server_traffic=True): + tunnel_monitor = tunnel_traffic_monitor(rand_selected_dut, existing=True) server_traffic_monitor = ServerTrafficMonitor( torhost, ptfhost, vmhost, tbinfo, test_params["selected_port"], @@ -370,7 +366,7 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, logger.info("Step 1.2: Verify traffic to this route dst is forwarded to Active ToR and equally distributed") check_tunnel_balance(**test_params) monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, - expect_tunnel_traffic=True, skip_traffic_test=skip_traffic_test) + expect_tunnel_traffic=True) logger.info("Stage 2: Verify Active Forwarding") logger.info("Step 2.1: Simulate Mux state change to active") @@ -378,7 +374,7 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, time.sleep(30) logger.info("Step 2.2: Verify traffic to this route dst is forwarded directly to server") monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=True, - expect_tunnel_traffic=False, skip_traffic_test=skip_traffic_test) + expect_tunnel_traffic=False) logger.info("Stage 3: Verify Standby Forwarding Again") logger.info("Step 3.1: Simulate Mux state change to standby") @@ -387,7 +383,7 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, logger.info("Step 3.2: Verify traffic to this route dst \ is now redirected back to Active ToR and equally distributed") monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, - expect_tunnel_traffic=True, skip_traffic_test=skip_traffic_test) + expect_tunnel_traffic=True) check_tunnel_balance(**test_params) remove_static_routes(rand_selected_dut, random_dst_ip) diff --git a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py index e7765412292..99ff89f88b6 100644 --- a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py +++ b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py @@ -10,7 +10,7 @@ from tests.common.config_reload import config_reload from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service, \ - run_icmp_responder, skip_traffic_test # noqa F401 + run_icmp_responder # noqa F401 logger = logging.getLogger(__file__) @@ -34,7 +34,7 @@ def test_cleanup(rand_selected_dut): def test_standby_tor_upstream_mux_toggle( rand_selected_dut, tbinfo, ptfadapter, rand_selected_interface, # noqa F811 - toggle_all_simulator_ports, set_crm_polling_interval, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports, set_crm_polling_interval): # noqa F811 itfs, ip = rand_selected_interface PKT_NUM = 100 # Step 1. Set mux state to standby and verify traffic is dropped by ACL rule and drop counters incremented @@ -49,8 +49,7 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=True, - skip_traffic_test=skip_traffic_test) + drop=True) time.sleep(5) # Step 2. Toggle mux state to active, and verify traffic is not dropped by ACL and fwd-ed to uplinks; @@ -65,8 +64,7 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=False, - skip_traffic_test=skip_traffic_test) + drop=False) # Step 3. Toggle mux state to standby, and verify traffic is dropped by ACL; # verify CRM show and no nexthop objects are stale @@ -80,8 +78,7 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=True, - skip_traffic_test=skip_traffic_test) + drop=True) crm_facts1 = rand_selected_dut.get_crm_facts() unmatched_crm_facts = compare_crm_facts(crm_facts0, crm_facts1) pt_assert(len(unmatched_crm_facts) == 0, 'Unmatched CRM facts: {}' diff --git a/tests/dualtor/test_tor_ecn.py b/tests/dualtor/test_tor_ecn.py index 5e965dde253..07e5aafc710 100644 --- a/tests/dualtor/test_tor_ecn.py +++ b/tests/dualtor/test_tor_ecn.py @@ -28,7 +28,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output from tests.common.dualtor.tunnel_traffic_utils import derive_queue_id_from_dscp, derive_out_dscp_from_inner_dscp from tests.common.dualtor.dual_tor_utils import config_active_active_dualtor_active_standby # noqa F401 @@ -254,7 +253,12 @@ def verify_ecn_on_received_packet( """ Verify ECN value on the received packet w.r.t expected packet """ - _, rec_pkt = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + result = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + if isinstance(result, tuple): + _, rec_pkt = result + elif isinstance(result, bool): + logging.info("Using dummy testutils to skip traffic test, skip following verify steps.") + return rec_pkt = Ether(rec_pkt) logging.info("received packet:\n%s", dump_scapy_packet_show_output(rec_pkt)) @@ -276,7 +280,7 @@ def test_dscp_to_queue_during_decap_on_active( inner_dscp, ptfhost, setup_dualtor_tor_active, request, rand_selected_interface, ptfadapter, # noqa F811 tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 - duthosts, rand_one_dut_hostname, skip_traffic_test # noqa F811 + duthosts, rand_one_dut_hostname ): """ Test if DSCP to Q mapping for inner header is matching with outer header during decap on active @@ -296,9 +300,6 @@ def test_dscp_to_queue_during_decap_on_active( duthost.shell('sonic-clear queuecounters') logging.info("Clearing queue counters before starting traffic") - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with stop_garp(ptfhost): ptfadapter.dataplane.flush() ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) @@ -309,7 +310,12 @@ def test_dscp_to_queue_during_decap_on_active( exp_dscp = exp_tos >> 2 exp_queue = derive_queue_id_from_dscp(duthost, exp_dscp, False) - _, rec_pkt = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + result = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index], timeout=10) + if isinstance(result, tuple): + _, rec_pkt = result + elif isinstance(result, bool): + logging.info("Using dummy testutils to skip traffic test, skip following verify steps.") + return rec_pkt = Ether(rec_pkt) logging.info("received decap packet:\n%s", dump_scapy_packet_show_output(rec_pkt)) @@ -351,7 +357,6 @@ def test_dscp_to_queue_during_encap_on_standby( rand_one_dut_hostname, write_standby, setup_standby_ports_on_rand_selected_tor, # noqa F811 - skip_traffic_test # noqa F811 ): """ Test if DSCP to Q mapping for outer header is matching with inner header during encap on standby @@ -372,9 +377,6 @@ def test_dscp_to_queue_during_encap_on_standby( ptfadapter.dataplane.flush() ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send IP packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with tunnel_traffic_monitor(tor, existing=True, packet_count=PACKET_NUM): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), non_encapsulated_packet, count=PACKET_NUM) @@ -384,7 +386,6 @@ def test_ecn_during_decap_on_active( inner_dscp, ptfhost, setup_dualtor_tor_active, request, rand_selected_interface, ptfadapter, # noqa F811 tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 - skip_traffic_test # noqa F811 ): """ Test if the ECN stamping on inner header is matching with outer during decap on active @@ -405,9 +406,6 @@ def test_ecn_during_decap_on_active( exp_tos = encapsulated_packet[IP].payload[IP].tos exp_ecn = exp_tos & 3 - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with stop_garp(ptfhost): tor.shell("portstat -c") tor.shell("show arp") @@ -425,7 +423,6 @@ def test_ecn_during_encap_on_standby( tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 write_standby, setup_standby_ports_on_rand_selected_tor, # noqa F811 - skip_traffic_test # noqa F811 ): """ Test if the ECN stamping on outer header is matching with inner during encap on standby @@ -440,8 +437,5 @@ def test_ecn_during_encap_on_standby( ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send IP packet from ptf t1 interface %s", ptf_t1_intf) - if skip_traffic_test is True: - logging.info("Skip following test due traffic test skipped") - return with tunnel_traffic_monitor(tor, existing=True, packet_count=PACKET_NUM): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), non_encapsulated_packet, count=PACKET_NUM) diff --git a/tests/dualtor/test_tunnel_memory_leak.py b/tests/dualtor/test_tunnel_memory_leak.py index dbb46638433..357554cc00a 100644 --- a/tests/dualtor/test_tunnel_memory_leak.py +++ b/tests/dualtor/test_tunnel_memory_leak.py @@ -22,7 +22,6 @@ from tests.common.dualtor.dual_tor_utils import delete_neighbor from tests.common.helpers.dut_utils import get_program_info from tests.common.fixtures.ptfhost_utils import run_garp_service, run_icmp_responder # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import wait_until @@ -118,8 +117,7 @@ def _check_memory(duthost): def test_tunnel_memory_leak(toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - ptfhost, ptfadapter, conn_graph_facts, tbinfo, vmhost, run_arp_responder, # noqa F811 - skip_traffic_test): # noqa F811 + ptfhost, ptfadapter, conn_graph_facts, tbinfo, vmhost, run_arp_responder): # noqa F811 """ Test if there is memory leak for service tunnel_packet_handler. Send ip packets from standby TOR T1 to Server, standby TOR will @@ -155,6 +153,7 @@ def prepare_services(ptfhost): all_servers_ips = mux_cable_server_ip(upper_tor_host) unexpected_count = 0 expected_count = 0 + asic_type = upper_tor_host.facts["asic_type"] with prepare_services(ptfhost): # Delete the neighbors @@ -173,9 +172,10 @@ def prepare_services(ptfhost): pkt, exp_pkt = build_packet_to_server(lower_tor_host, ptfadapter, server_ipv4) - if skip_traffic_test is True: - logging.info("Skip traffic test.") - continue + if asic_type == "vs": + logging.info("ServerTrafficMonitor do not support on KVM dualtor, skip following steps.") + return + server_traffic_monitor = ServerTrafficMonitor( upper_tor_host, ptfhost, vmhost, tbinfo, iface, conn_graph_facts, exp_pkt, existing=True, is_mocked=False diff --git a/tests/dualtor_io/test_heartbeat_failure.py b/tests/dualtor_io/test_heartbeat_failure.py index 49afb7994ad..eddcf71e51b 100644 --- a/tests/dualtor_io/test_heartbeat_failure.py +++ b/tests/dualtor_io/test_heartbeat_failure.py @@ -1,5 +1,4 @@ import pytest -import logging from tests.common.dualtor.control_plane_utils import verify_tor_states from tests.common.dualtor.data_plane_utils import send_t1_to_server_with_action, \ @@ -10,7 +9,6 @@ from tests.common.dualtor.tor_failure_utils import shutdown_tor_heartbeat # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import cable_type # noqa F401 from tests.common.dualtor.dual_tor_common import CableType @@ -36,18 +34,16 @@ def ignore_expected_loganalyzer_exception(loganalyzer, duthosts): def test_active_tor_heartbeat_failure_upstream( - toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_server_to_t1_with_action, shutdown_tor_heartbeat, cable_type, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 + send_server_to_t1_with_action, shutdown_tor_heartbeat, cable_type # noqa F811 ): """ Send upstream traffic and stop the LinkProber module on the active ToR. Confirm switchover and disruption lasts < 1 second. """ - logging.info("skip_traffic_test: {}".format(skip_traffic_test)) send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_tor_heartbeat(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(upper_tor_host) ) if cable_type == CableType.active_standby: @@ -68,7 +64,7 @@ def test_active_tor_heartbeat_failure_upstream( @pytest.mark.enable_active_active def test_active_tor_heartbeat_failure_downstream_active( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, cable_type, skip_traffic_test # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat, cable_type # noqa F811 ): """ Send downstream traffic from T1 to the active ToR and stop the LinkProber module on the active ToR. @@ -76,8 +72,7 @@ def test_active_tor_heartbeat_failure_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_tor_heartbeat(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(upper_tor_host) ) if cable_type == CableType.active_standby: @@ -97,15 +92,14 @@ def test_active_tor_heartbeat_failure_downstream_active( def test_active_tor_heartbeat_failure_downstream_standby( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send downstream traffic from T1 to the standby ToR and stop the LinkProber module on the active ToR. Confirm switchover and disruption lasts < 1 second. """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_tor_heartbeat(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(upper_tor_host) ) verify_tor_states( expected_active_host=lower_tor_host, @@ -115,15 +109,14 @@ def test_active_tor_heartbeat_failure_downstream_standby( def test_standby_tor_heartbeat_failure_upstream( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_server_to_t1_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_server_to_t1_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send upstream traffic and stop the LinkProber module on the standby ToR. Confirm no switchover and no disruption. """ send_server_to_t1_with_action( upper_tor_host, verify=True, - action=lambda: shutdown_tor_heartbeat(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -133,15 +126,14 @@ def test_standby_tor_heartbeat_failure_upstream( def test_standby_tor_heartbeat_failure_downstream_active( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send downstream traffic from T1 to the active ToR and stop the LinkProber module on the standby ToR. Confirm no switchover and no disruption. """ send_t1_to_server_with_action( upper_tor_host, verify=True, - action=lambda: shutdown_tor_heartbeat(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -151,15 +143,14 @@ def test_standby_tor_heartbeat_failure_downstream_active( def test_standby_tor_heartbeat_failure_downstream_standby( toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 - send_t1_to_server_with_action, shutdown_tor_heartbeat, skip_traffic_test): # noqa F811 + send_t1_to_server_with_action, shutdown_tor_heartbeat): # noqa F811 """ Send downstream traffic from T1 to the standby ToR and stop the LinkProber module on the standby ToR. Confirm no switchover and no disruption. """ send_t1_to_server_with_action( lower_tor_host, verify=True, - action=lambda: shutdown_tor_heartbeat(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_tor_heartbeat(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, diff --git a/tests/dualtor_io/test_link_drop.py b/tests/dualtor_io/test_link_drop.py index 27909f13026..5330af7d6a8 100644 --- a/tests/dualtor_io/test_link_drop.py +++ b/tests/dualtor_io/test_link_drop.py @@ -18,7 +18,6 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import ActiveActivePortID from tests.common.dualtor.dual_tor_common import active_active_ports # noqa F401 @@ -97,7 +96,7 @@ def _drop_flow_upper_tor_active_active(): def test_active_link_drop_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all, # noqa F811 - drop_flow_upper_tor_active_active, cable_type, skip_traffic_test # noqa F811 + drop_flow_upper_tor_active_active, cable_type # noqa F811 ): """ Send traffic from servers to T1 and remove the flow between the servers and the active ToR. @@ -109,8 +108,7 @@ def test_active_link_drop_upstream( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=3, - action=drop_flow_upper_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_all ) verify_tor_states( expected_active_host=lower_tor_host, @@ -125,8 +123,7 @@ def test_active_link_drop_upstream( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=1, - action=drop_flow_upper_tor_active_active, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_active_active ) verify_tor_states( expected_active_host=lower_tor_host, @@ -141,7 +138,7 @@ def test_active_link_drop_upstream( def test_active_link_drop_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all, # noqa F811 - drop_flow_upper_tor_active_active, cable_type, skip_traffic_test # noqa F811 + drop_flow_upper_tor_active_active, cable_type # noqa F811 ): """ Send traffic from the T1s to the servers via the active Tor and remove the flow between the @@ -154,8 +151,7 @@ def test_active_link_drop_downstream_active( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=3, - action=drop_flow_upper_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_all ) verify_tor_states( expected_active_host=lower_tor_host, @@ -170,8 +166,7 @@ def test_active_link_drop_downstream_active( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=1, - action=drop_flow_upper_tor_active_active, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_active_active ) verify_tor_states( expected_active_host=lower_tor_host, @@ -184,8 +179,7 @@ def test_active_link_drop_downstream_active( def test_active_link_drop_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all, # noqa F811 - skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor, drop_flow_upper_tor_all # noqa F811 ): """ Send traffic from the T1s to the servers via the standby Tor and remove the flow between the @@ -197,8 +191,7 @@ def test_active_link_drop_downstream_standby( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=3, - action=drop_flow_upper_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_upper_tor_all ) verify_tor_states( expected_active_host=lower_tor_host, @@ -210,7 +203,7 @@ def test_active_link_drop_downstream_standby( def test_standby_link_drop_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 check_simulator_flap_counter, drop_flow_lower_tor_all, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor # noqa F811 ): """ Send traffic from servers to T1 and remove the flow between the servers and the standby ToR. @@ -221,8 +214,7 @@ def test_standby_link_drop_upstream( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=2, - action=drop_flow_lower_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_lower_tor_all ) verify_tor_states( expected_active_host=upper_tor_host, @@ -235,7 +227,7 @@ def test_standby_link_drop_upstream( def test_standby_link_drop_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 check_simulator_flap_counter, drop_flow_lower_tor_all, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor # noqa F811 ): """ Send traffic from the T1s to the servers via the active Tor and remove the flow between the @@ -247,8 +239,7 @@ def test_standby_link_drop_downstream_active( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=2, - action=drop_flow_lower_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_lower_tor_all ) verify_tor_states( expected_active_host=upper_tor_host, @@ -261,7 +252,7 @@ def test_standby_link_drop_downstream_active( def test_standby_link_drop_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 check_simulator_flap_counter, drop_flow_lower_tor_all, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, skip_traffic_test # noqa F811 + toggle_all_simulator_ports_to_upper_tor # noqa F811 ): """ Send traffic from the T1s to the servers via the standby Tor and remove the flow between the @@ -273,8 +264,7 @@ def test_standby_link_drop_downstream_standby( verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, allowed_disruption=2, - action=drop_flow_lower_tor_all, - skip_traffic_test=skip_traffic_test + action=drop_flow_lower_tor_all ) verify_tor_states( expected_active_host=upper_tor_host, diff --git a/tests/dualtor_io/test_link_failure.py b/tests/dualtor_io/test_link_failure.py index 580f73d805d..54aada394b5 100644 --- a/tests/dualtor_io/test_link_failure.py +++ b/tests/dualtor_io/test_link_failure.py @@ -11,7 +11,6 @@ from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_upper_tor # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import active_active_ports # noqa F401 from tests.common.dualtor.dual_tor_common import cable_type # noqa F401 @@ -28,7 +27,7 @@ def test_active_link_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_upper_tor_intfs, cable_type, skip_traffic_test # noqa F811 + shutdown_fanout_upper_tor_intfs, cable_type # noqa F811 ): """ Send traffic from server to T1 and shutdown the active ToR link. @@ -37,8 +36,7 @@ def test_active_link_down_upstream( if cable_type == CableType.active_active: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -51,8 +49,7 @@ def test_active_link_down_upstream( if cable_type == CableType.active_standby: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( @@ -67,7 +64,7 @@ def test_active_link_down_upstream( def test_active_link_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_upper_tor_intfs, cable_type, skip_traffic_test # noqa F811 + shutdown_fanout_upper_tor_intfs, cable_type # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the active ToR link. @@ -76,8 +73,7 @@ def test_active_link_down_downstream_active( if cable_type == CableType.active_standby: send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -88,8 +84,7 @@ def test_active_link_down_downstream_active( if cable_type == CableType.active_active: send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -103,7 +98,7 @@ def test_active_link_down_downstream_active( def test_active_link_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_upper_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_upper_tor_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdown the active ToR link. @@ -111,8 +106,7 @@ def test_active_link_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=3, action=shutdown_fanout_upper_tor_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -124,7 +118,7 @@ def test_active_link_down_downstream_standby( def test_standby_link_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_lower_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_lower_tor_intfs # noqa F811 ): """ Send traffic from server to T1 and shutdown the standby ToR link. @@ -132,8 +126,7 @@ def test_standby_link_down_upstream( """ send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -145,7 +138,7 @@ def test_standby_link_down_upstream( def test_standby_link_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_lower_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_lower_tor_intfs # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the standby ToR link. @@ -153,8 +146,7 @@ def test_standby_link_down_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -166,7 +158,7 @@ def test_standby_link_down_downstream_active( def test_standby_link_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_fanout_lower_tor_intfs, skip_traffic_test # noqa F811 + shutdown_fanout_lower_tor_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdwon the standby ToR link. @@ -174,8 +166,7 @@ def test_standby_link_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=2, action=shutdown_fanout_lower_tor_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -187,7 +178,7 @@ def test_standby_link_down_downstream_standby( def test_active_tor_downlink_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_upper_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_upper_tor_downlink_intfs # noqa F811 ): """ Send traffic from server to T1 and shutdown the active ToR downlink on DUT. @@ -195,8 +186,7 @@ def test_active_tor_downlink_down_upstream( """ send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -208,7 +198,7 @@ def test_active_tor_downlink_down_upstream( def test_active_tor_downlink_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_upper_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_upper_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the active ToR downlink on DUT. @@ -216,8 +206,7 @@ def test_active_tor_downlink_down_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -229,7 +218,7 @@ def test_active_tor_downlink_down_downstream_active( def test_active_tor_downlink_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_upper_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_upper_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdown the active ToR downlink on DUT. @@ -237,8 +226,7 @@ def test_active_tor_downlink_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_upper_tor_downlink_intfs ) verify_tor_states( expected_active_host=lower_tor_host, @@ -250,7 +238,7 @@ def test_active_tor_downlink_down_downstream_standby( def test_standby_tor_downlink_down_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_lower_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_lower_tor_downlink_intfs # noqa F811 ): """ Send traffic from server to T1 and shutdown the standby ToR downlink on DUT. @@ -258,8 +246,7 @@ def test_standby_tor_downlink_down_upstream( """ send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -271,7 +258,7 @@ def test_standby_tor_downlink_down_upstream( def test_standby_tor_downlink_down_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_lower_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_lower_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to active ToR and shutdown the standby ToR downlink on DUT. @@ -279,8 +266,7 @@ def test_standby_tor_downlink_down_downstream_active( """ send_t1_to_server_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs ) verify_tor_states( expected_active_host=upper_tor_host, @@ -292,7 +278,7 @@ def test_standby_tor_downlink_down_downstream_active( def test_standby_tor_downlink_down_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_lower_tor_downlink_intfs, skip_traffic_test # noqa F811 + shutdown_lower_tor_downlink_intfs # noqa F811 ): """ Send traffic from T1 to standby ToR and shutdwon the standby ToR downlink on DUT. @@ -300,8 +286,7 @@ def test_standby_tor_downlink_down_downstream_standby( """ send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs, - skip_traffic_test=skip_traffic_test + allowed_disruption=1, action=shutdown_lower_tor_downlink_intfs ) verify_tor_states( expected_active_host=upper_tor_host, diff --git a/tests/dualtor_io/test_normal_op.py b/tests/dualtor_io/test_normal_op.py index 20781585e6e..e71df5097e6 100644 --- a/tests/dualtor_io/test_normal_op.py +++ b/tests/dualtor_io/test_normal_op.py @@ -14,7 +14,6 @@ from tests.common.dualtor.dual_tor_utils import check_simulator_flap_counter # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC, CONFIG_RELOAD_ALLOWED_DISRUPTION_SEC from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert @@ -29,19 +28,16 @@ def test_normal_op_upstream(upper_tor_host, lower_tor_host, # noqa F811 send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """Send upstream traffic and confirm no disruption or switchover occurs""" if cable_type == CableType.active_standby: - send_server_to_t1_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_t1_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host, skip_tunnel_route=False) if cable_type == CableType.active_active: - send_server_to_t1_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_t1_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type, @@ -52,21 +48,18 @@ def test_normal_op_upstream(upper_tor_host, lower_tor_host, # noqa F def test_normal_op_downstream_upper_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the upper ToR and confirm no disruption or switchover occurs """ if cable_type == CableType.active_standby: - send_t1_to_server_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) if cable_type == CableType.active_active: - send_t1_to_server_with_action(upper_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(upper_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -76,21 +69,18 @@ def test_normal_op_downstream_upper_tor(upper_tor_host, lower_tor_host, def test_normal_op_downstream_lower_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the lower ToR and confirm no disruption or switchover occurs """ if cable_type == CableType.active_standby: - send_t1_to_server_with_action(lower_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(lower_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) if cable_type == CableType.active_active: - send_t1_to_server_with_action(lower_tor_host, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_t1_to_server_with_action(lower_tor_host, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -101,8 +91,7 @@ def test_normal_op_active_server_to_active_server(upper_tor_host, lower_tor_host send_server_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 cable_type, # noqa F811 - select_test_mux_ports, # noqa F811 - skip_traffic_test): # noqa F811 + select_test_mux_ports): # noqa F811 """ Send server to server traffic in active-active setup and confirm no disruption or switchover occurs. """ @@ -110,15 +99,13 @@ def test_normal_op_active_server_to_active_server(upper_tor_host, lower_tor_host test_mux_ports = select_test_mux_ports(cable_type, 2) if cable_type == CableType.active_standby: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host, skip_tunnel_route=False) if cable_type == CableType.active_active: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type, @@ -130,8 +117,7 @@ def test_normal_op_active_server_to_standby_server(upper_tor_host, lower_tor_hos send_server_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 cable_type, force_standby_tor, # noqa F811 - select_test_mux_ports, # noqa F811 - skip_traffic_test): # noqa F811 + select_test_mux_ports): # noqa F811 """ Send server to server traffic in active-standby setup and confirm no disruption or switchover occurs. """ @@ -147,12 +133,10 @@ def _is_mux_port_standby(duthost, mux_port): "failed to toggle mux port %s to standby on DUT %s" % (tx_mux_port, upper_tor_host.hostname)) if cable_type == CableType.active_standby: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) if cable_type == CableType.active_active: - send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, - stop_after=60, skip_traffic_test=skip_traffic_test) + send_server_to_server_with_action(upper_tor_host, test_mux_ports, verify=True, stop_after=60) # TODO: Add per-port db check @@ -162,8 +146,7 @@ def _is_mux_port_standby(duthost, mux_port): def test_upper_tor_config_reload_upstream(upper_tor_host, lower_tor_host, # noqa F811 send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send upstream traffic and `config reload` the active ToR. Confirm switchover occurs and disruption lasted < 1 second for active-standby ports. @@ -171,15 +154,13 @@ def test_upper_tor_config_reload_upstream(upper_tor_host, lower_tor_host, """ if cable_type == CableType.active_standby: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=CONFIG_RELOAD_ALLOWED_DISRUPTION_SEC, - action=lambda: config_reload(upper_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(upper_tor_host, wait=0)) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: config_reload(upper_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(upper_tor_host, wait=0)) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -189,16 +170,14 @@ def test_upper_tor_config_reload_upstream(upper_tor_host, lower_tor_host, def test_lower_tor_config_reload_upstream(upper_tor_host, lower_tor_host, # noqa F811 send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send upstream traffic and `config reload` the lower ToR. Confirm no switchover occurs and no disruption. """ if cable_type == CableType.active_standby: send_server_to_t1_with_action(upper_tor_host, verify=True, - action=lambda: config_reload(lower_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(lower_tor_host, wait=0)) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) @@ -208,23 +187,20 @@ def test_lower_tor_config_reload_upstream(upper_tor_host, lower_tor_host, def test_lower_tor_config_reload_downstream_upper_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the upper ToR and `config reload` the lower ToR. Confirm no switchover occurs and no disruption """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(upper_tor_host, verify=True, - action=lambda: config_reload(lower_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(lower_tor_host, wait=0)) verify_tor_states(expected_active_host=upper_tor_host, expected_standby_host=lower_tor_host) if cable_type == CableType.active_active: send_t1_to_server_with_action(upper_tor_host, verify=True, - action=lambda: config_reload(lower_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(lower_tor_host, wait=0)) verify_tor_states(expected_active_host=[upper_tor_host, lower_tor_host], expected_standby_host=None, cable_type=cable_type) @@ -234,8 +210,7 @@ def test_lower_tor_config_reload_downstream_upper_tor(upper_tor_host, lower_tor_ def test_upper_tor_config_reload_downstream_lower_tor(upper_tor_host, lower_tor_host, # noqa F811 send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the lower ToR and `config reload` the upper ToR. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -243,8 +218,7 @@ def test_upper_tor_config_reload_downstream_lower_tor(upper_tor_host, lower_tor_ """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(lower_tor_host, verify=True, delay=CONFIG_RELOAD_ALLOWED_DISRUPTION_SEC, - action=lambda: config_reload(upper_tor_host, wait=0), - skip_traffic_test=skip_traffic_test) + action=lambda: config_reload(upper_tor_host, wait=0)) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) @@ -254,8 +228,7 @@ def test_tor_switch_upstream(upper_tor_host, lower_tor_host, # no send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 force_active_tor, force_standby_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send upstream traffic and perform switchover via CLI. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -263,15 +236,13 @@ def test_tor_switch_upstream(upper_tor_host, lower_tor_host, # no """ if cable_type == CableType.active_standby: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_active_tor(lower_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_active_tor(lower_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_server_to_t1_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_standby_tor(upper_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_standby_tor(upper_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host, expected_standby_health="healthy", @@ -283,8 +254,7 @@ def test_tor_switch_downstream_active(upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 force_active_tor, force_standby_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the upper ToR and perform switchover via CLI. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -292,15 +262,13 @@ def test_tor_switch_downstream_active(upper_tor_host, lower_tor_host, """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_active_tor(lower_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_active_tor(lower_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_t1_to_server_with_action(upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_standby_tor(upper_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_standby_tor(upper_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host, expected_standby_health="healthy", @@ -312,8 +280,7 @@ def test_tor_switch_downstream_standby(upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 force_active_tor, force_standby_tor, # noqa F811 - cable_type, # noqa F811 - skip_traffic_test): # noqa F811 + cable_type): # noqa F811 """ Send downstream traffic to the lower ToR and perform switchover via CLI. Confirm switchover occurs and disruption lasts < 1 second for active-standby ports. @@ -321,15 +288,13 @@ def test_tor_switch_downstream_standby(upper_tor_host, lower_tor_host, """ if cable_type == CableType.active_standby: send_t1_to_server_with_action(lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: force_active_tor(lower_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_active_tor(lower_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host) if cable_type == CableType.active_active: send_t1_to_server_with_action(lower_tor_host, verify=True, - action=lambda: force_standby_tor(upper_tor_host, 'all'), - skip_traffic_test=skip_traffic_test) + action=lambda: force_standby_tor(upper_tor_host, 'all')) verify_tor_states(expected_active_host=lower_tor_host, expected_standby_host=upper_tor_host, expected_standby_health="healthy", diff --git a/tests/dualtor_io/test_tor_bgp_failure.py b/tests/dualtor_io/test_tor_bgp_failure.py index 91783bd14fe..c6643a08134 100644 --- a/tests/dualtor_io/test_tor_bgp_failure.py +++ b/tests/dualtor_io/test_tor_bgp_failure.py @@ -11,7 +11,6 @@ from tests.common.dualtor.tor_failure_utils import shutdown_bgp_sessions_on_duthost from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, \ copy_ptftests_directory, change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor # noqa F401 from tests.common.dualtor.constants import MUX_SIM_ALLOWED_DISRUPTION_SEC from tests.common.dualtor.dual_tor_common import cable_type # noqa F401 @@ -80,7 +79,7 @@ def ignore_expected_loganalyzer_exception(loganalyzer, duthosts): def test_active_tor_kill_bgpd_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, kill_bgpd, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_upper_tor, kill_bgpd): # noqa F811 ''' Case: Server -> ToR -> T1 (Active ToR BGP Down) Action: Shutdown all BGP sessions on the active ToR @@ -92,8 +91,7 @@ def test_active_tor_kill_bgpd_upstream( ''' send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: kill_bgpd(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(upper_tor_host) ) verify_tor_states( expected_active_host=lower_tor_host, @@ -103,7 +101,7 @@ def test_active_tor_kill_bgpd_upstream( def test_standby_tor_kill_bgpd_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 - toggle_all_simulator_ports_to_upper_tor, kill_bgpd, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_upper_tor, kill_bgpd): # noqa F811 ''' Case: Server -> ToR -> T1 (Standby ToR BGP Down) Action: Shutdown all BGP sessions on the standby ToR @@ -114,8 +112,7 @@ def test_standby_tor_kill_bgpd_upstream( ''' send_server_to_t1_with_action( upper_tor_host, verify=True, - action=lambda: kill_bgpd(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -126,7 +123,7 @@ def test_standby_tor_kill_bgpd_upstream( def test_standby_tor_kill_bgpd_downstream_active( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, kill_bgpd, # noqa F811 - tunnel_traffic_monitor, skip_traffic_test): # noqa F811 + tunnel_traffic_monitor): # noqa F811 ''' Case: T1 -> Active ToR -> Server (Standby ToR BGP Down) Action: Shutdown all BGP sessions on the standby ToR @@ -137,8 +134,7 @@ def test_standby_tor_kill_bgpd_downstream_active( with tunnel_traffic_monitor(lower_tor_host, existing=False): send_t1_to_server_with_action( upper_tor_host, verify=True, - action=lambda: kill_bgpd(lower_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(lower_tor_host) ) verify_tor_states( expected_active_host=upper_tor_host, @@ -149,7 +145,7 @@ def test_standby_tor_kill_bgpd_downstream_active( def test_active_tor_kill_bgpd_downstream_standby( upper_tor_host, lower_tor_host, send_t1_to_server_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, kill_bgpd, # noqa F811 - tunnel_traffic_monitor, skip_traffic_test): # noqa F811 + tunnel_traffic_monitor): # noqa F811 ''' Case: T1 -> Standby ToR -> Server (Active ToR BGP Down) Action: Shutdown all BGP sessions on the active ToR @@ -160,8 +156,7 @@ def test_active_tor_kill_bgpd_downstream_standby( ''' send_t1_to_server_with_action( lower_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: kill_bgpd(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: kill_bgpd(upper_tor_host) ) verify_tor_states( expected_active_host=lower_tor_host, @@ -173,7 +168,7 @@ def test_active_tor_kill_bgpd_downstream_standby( def test_active_tor_shutdown_bgp_sessions_upstream( upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811 toggle_all_simulator_ports_to_upper_tor, # noqa F811 - shutdown_bgp_sessions, cable_type, skip_traffic_test # noqa F811 + shutdown_bgp_sessions, cable_type # noqa F811 ): """ Case: Server -> ToR -> T1 (Active ToR BGP Down) @@ -187,15 +182,13 @@ def test_active_tor_shutdown_bgp_sessions_upstream( if cable_type == CableType.active_standby: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_bgp_sessions(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_bgp_sessions(upper_tor_host) ) if cable_type == CableType.active_active: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_bgp_sessions(upper_tor_host), - skip_traffic_test=skip_traffic_test + action=lambda: shutdown_bgp_sessions(upper_tor_host) ) if cable_type == CableType.active_active: diff --git a/tests/dualtor_mgmt/test_ingress_drop.py b/tests/dualtor_mgmt/test_ingress_drop.py index c98be9db041..75169847743 100644 --- a/tests/dualtor_mgmt/test_ingress_drop.py +++ b/tests/dualtor_mgmt/test_ingress_drop.py @@ -16,8 +16,6 @@ from tests.common.dualtor.nic_simulator_control import mux_status_from_nic_simulator # noqa F401 from tests.common.dualtor.nic_simulator_control import stop_nic_simulator # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 - from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until @@ -104,7 +102,7 @@ def selected_mux_port(cable_type, active_active_ports, active_standby_ports): @pytest.mark.enable_active_active -def test_ingress_drop(cable_type, ptfadapter, setup_mux, tbinfo, selected_mux_port, upper_tor_host, skip_traffic_test): # noqa F811 +def test_ingress_drop(cable_type, ptfadapter, setup_mux, tbinfo, selected_mux_port, upper_tor_host): # noqa F811 """ Aims to verify if orchagent installs ingress drop ACL when the port comes to standby. @@ -131,7 +129,7 @@ def test_ingress_drop(cable_type, ptfadapter, setup_mux, tbinfo, selected_mux_po if cable_type == CableType.active_active: verify_upstream_traffic(upper_tor_host, ptfadapter, tbinfo, selected_mux_port, - server_ip, pkt_num=10, drop=False, skip_traffic_test=skip_traffic_test) + server_ip, pkt_num=10, drop=False) elif cable_type == CableType.active_standby: verify_upstream_traffic(upper_tor_host, ptfadapter, tbinfo, selected_mux_port, - server_ip, pkt_num=10, drop=True, skip_traffic_test=skip_traffic_test) + server_ip, pkt_num=10, drop=True) From 1a10ecdf5a6529a354366366d305ff9616ea99fe Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 08:34:06 +0800 Subject: [PATCH 038/175] Remove skip_traffic_test fixture in ecmp tests (#15431) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in ecmp tests How did you verify/test it? --- .../ecmp/inner_hashing/test_inner_hashing.py | 40 ++++---- .../inner_hashing/test_inner_hashing_lag.py | 5 +- .../inner_hashing/test_wr_inner_hashing.py | 93 +++++++++---------- .../test_wr_inner_hashing_lag.py | 50 +++++----- 4 files changed, 88 insertions(+), 100 deletions(-) diff --git a/tests/ecmp/inner_hashing/test_inner_hashing.py b/tests/ecmp/inner_hashing/test_inner_hashing.py index fe45fe66169..896520b26a5 100644 --- a/tests/ecmp/inner_hashing/test_inner_hashing.py +++ b/tests/ecmp/inner_hashing/test_inner_hashing.py @@ -13,7 +13,6 @@ from tests.ptf_runner import ptf_runner from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST,\ VXLAN_PORT, PTF_QLEN, check_pbh_counters, OUTER_ENCAP_FORMATS, NVGRE_TNI, IP_VERSIONS_LIST, config_pbh -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -35,7 +34,7 @@ def setup_dynamic_pbh(self, duthost, vlan_ptf_ports, tbinfo): def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, duthost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing dynamic inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) with allure.step('Run ptf test InnerHashTest'): @@ -73,22 +72,22 @@ def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipv "symmetric_hashing": symmetric_hashing} duthost.shell("sonic-clear pbh statistics") - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params=ptf_params, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) - retry_call(check_pbh_counters, - fargs=[duthost, outer_ipver, inner_ipver, balancing_test_times, - symmetric_hashing, hash_keys, lag_mem_ptf_ports_groups], - tries=5, - delay=5) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params=ptf_params, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) + + retry_call(check_pbh_counters, + fargs=[duthost, outer_ipver, inner_ipver, balancing_test_times, + symmetric_hashing, hash_keys, lag_mem_ptf_ports_groups], + tries=5, + delay=5) if update_outer_ipver == outer_ipver and update_inner_ipver == inner_ipver: logging.info("Validate dynamic inner hash Edit Flow for outer {} and inner {} ip versions with" @@ -105,8 +104,7 @@ def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipv with allure.step('Run again the ptf test InnerHashTest after updating the rules'): logging.info('Run again the ptf test InnerHashTest after updating the rules') duthost.shell("sonic-clear pbh statistics") - if skip_traffic_test is True: - return + ptf_runner(ptfhost, "ptftests", "inner_hash_test.InnerHashTest", @@ -128,7 +126,7 @@ def test_inner_hashing(self, request, hash_keys, ptfhost, outer_ipver, inner_ipv class TestStaticInnerHashing(): def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, - vlan_ptf_ports, symmetric_hashing, lag_mem_ptf_ports_groups, skip_traffic_test): # noqa F811 + vlan_ptf_ports, symmetric_hashing, lag_mem_ptf_ports_groups): logging.info("Executing static inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') @@ -138,8 +136,6 @@ def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, route outer_src_ip_range, outer_dst_ip_range = get_src_dst_ip_range(outer_ipver) inner_src_ip_range, inner_dst_ip_range = get_src_dst_ip_range(inner_ipver) - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "inner_hash_test.InnerHashTest", diff --git a/tests/ecmp/inner_hashing/test_inner_hashing_lag.py b/tests/ecmp/inner_hashing/test_inner_hashing_lag.py index ed616569865..7c1ccc2f00f 100644 --- a/tests/ecmp/inner_hashing/test_inner_hashing_lag.py +++ b/tests/ecmp/inner_hashing/test_inner_hashing_lag.py @@ -12,7 +12,6 @@ from tests.ptf_runner import ptf_runner from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST,\ VXLAN_PORT, PTF_QLEN, check_pbh_counters, OUTER_ENCAP_FORMATS, NVGRE_TNI, setup_lag_config, config_pbh_lag -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -33,7 +32,7 @@ def setup_dynamic_pbh(self, duthost, lag_port_map, lag_ip_map): def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, duthost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing dynamic inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) with allure.step('Run ptf test InnerHashTest'): @@ -54,8 +53,6 @@ def test_inner_hashing(self, hash_keys, ptfhost, outer_ipver, inner_ipver, route balancing_test_times = 20 balancing_range = 0.5 - if skip_traffic_test is True: - return ptf_runner(ptfhost, "ptftests", "inner_hash_test.InnerHashTest", diff --git a/tests/ecmp/inner_hashing/test_wr_inner_hashing.py b/tests/ecmp/inner_hashing/test_wr_inner_hashing.py index 02d697cea33..28325423dc3 100644 --- a/tests/ecmp/inner_hashing/test_wr_inner_hashing.py +++ b/tests/ecmp/inner_hashing/test_wr_inner_hashing.py @@ -9,7 +9,6 @@ from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST, VXLAN_PORT,\ PTF_QLEN, OUTER_ENCAP_FORMATS, NVGRE_TNI, config_pbh from tests.ptf_runner import ptf_runner -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -29,7 +28,7 @@ def setup_dynamic_pbh(self, duthost, vlan_ptf_ports, tbinfo): def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing warm boot dynamic inner hash test for outer {} and inner {} with symmetric_hashing" " set to {}".format(outer_ipver, inner_ipver, str(symmetric_hashing))) with allure.step('Run ptf test InnerHashTest and warm-reboot in parallel'): @@ -57,30 +56,29 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv reboot_thr = threading.Thread(target=reboot, args=(duthost, localhost, 'warm', 10, 0, 0, True, True,)) reboot_thr.start() - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params={"fib_info": FIB_INFO_FILE_DST, - "router_mac": router_mac, - "src_ports": vlan_ptf_ports, - "exp_port_groups": lag_mem_ptf_ports_groups, - "hash_keys": hash_keys, - "vxlan_port": VXLAN_PORT, - "inner_src_ip_range": ",".join(inner_src_ip_range), - "inner_dst_ip_range": ",".join(inner_dst_ip_range), - "outer_src_ip_range": ",".join(outer_src_ip_range), - "outer_dst_ip_range": ",".join(outer_dst_ip_range), - "balancing_test_times": balancing_test_times, - "balancing_range": balancing_range, - "outer_encap_formats": outer_encap_format, - "nvgre_tni": NVGRE_TNI, - "symmetric_hashing": symmetric_hashing}, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params={"fib_info": FIB_INFO_FILE_DST, + "router_mac": router_mac, + "src_ports": vlan_ptf_ports, + "exp_port_groups": lag_mem_ptf_ports_groups, + "hash_keys": hash_keys, + "vxlan_port": VXLAN_PORT, + "inner_src_ip_range": ",".join(inner_src_ip_range), + "inner_dst_ip_range": ",".join(inner_dst_ip_range), + "outer_src_ip_range": ",".join(outer_src_ip_range), + "outer_dst_ip_range": ",".join(outer_dst_ip_range), + "balancing_test_times": balancing_test_times, + "balancing_range": balancing_range, + "outer_encap_formats": outer_encap_format, + "nvgre_tni": NVGRE_TNI, + "symmetric_hashing": symmetric_hashing}, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) reboot_thr.join() @@ -88,7 +86,7 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv class TestWRStaticInnerHashing(): def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, - vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups, skip_traffic_test): # noqa F811 + vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups): logging.info("Executing static inner hash test for outer {} and inner {} with symmetric_hashing set to {}" .format(outer_ipver, inner_ipver, str(symmetric_hashing))) timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') @@ -102,25 +100,24 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv reboot_thr = threading.Thread(target=reboot, args=(duthost, localhost, 'warm', 10, 0, 0, True, True,)) reboot_thr.start() - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params={"fib_info": FIB_INFO_FILE_DST, - "router_mac": router_mac, - "src_ports": vlan_ptf_ports, - "exp_port_groups": lag_mem_ptf_ports_groups, - "hash_keys": hash_keys, - "vxlan_port": VXLAN_PORT, - "inner_src_ip_range": ",".join(inner_src_ip_range), - "inner_dst_ip_range": ",".join(inner_dst_ip_range), - "outer_src_ip_range": ",".join(outer_src_ip_range), - "outer_dst_ip_range": ",".join(outer_dst_ip_range), - "outer_encap_formats": OUTER_ENCAP_FORMATS, - "symmetric_hashing": symmetric_hashing}, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params={"fib_info": FIB_INFO_FILE_DST, + "router_mac": router_mac, + "src_ports": vlan_ptf_ports, + "exp_port_groups": lag_mem_ptf_ports_groups, + "hash_keys": hash_keys, + "vxlan_port": VXLAN_PORT, + "inner_src_ip_range": ",".join(inner_src_ip_range), + "inner_dst_ip_range": ",".join(inner_dst_ip_range), + "outer_src_ip_range": ",".join(outer_src_ip_range), + "outer_dst_ip_range": ",".join(outer_dst_ip_range), + "outer_encap_formats": OUTER_ENCAP_FORMATS, + "symmetric_hashing": symmetric_hashing}, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) reboot_thr.join() diff --git a/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py b/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py index 6ce69b57d71..e6371e9a06d 100644 --- a/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py +++ b/tests/ecmp/inner_hashing/test_wr_inner_hashing_lag.py @@ -9,7 +9,6 @@ from tests.ecmp.inner_hashing.conftest import get_src_dst_ip_range, FIB_INFO_FILE_DST, VXLAN_PORT,\ PTF_QLEN, OUTER_ENCAP_FORMATS, NVGRE_TNI, setup_lag_config, config_pbh_lag from tests.ptf_runner import ptf_runner -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -31,7 +30,7 @@ def setup_dynamic_pbh(self, duthost, lag_port_map, lag_ip_map): def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipver, router_mac, vlan_ptf_ports, symmetric_hashing, localhost, lag_mem_ptf_ports_groups, - get_function_completeness_level, skip_traffic_test): # noqa F811 + get_function_completeness_level): logging.info("Executing warm boot dynamic inner hash test for outer {} and inner {} with symmetric_hashing" " set to {}".format(outer_ipver, inner_ipver, str(symmetric_hashing))) timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') @@ -58,28 +57,27 @@ def test_inner_hashing(self, duthost, hash_keys, ptfhost, outer_ipver, inner_ipv reboot_thr = threading.Thread(target=reboot, args=(duthost, localhost, 'warm', 10, 0, 0, True, True,)) reboot_thr.start() - if not skip_traffic_test: - ptf_runner(ptfhost, - "ptftests", - "inner_hash_test.InnerHashTest", - platform_dir="ptftests", - params={"fib_info": FIB_INFO_FILE_DST, - "router_mac": router_mac, - "src_ports": vlan_ptf_ports, - "exp_port_groups": lag_mem_ptf_ports_groups, - "hash_keys": hash_keys, - "vxlan_port": VXLAN_PORT, - "inner_src_ip_range": ",".join(inner_src_ip_range), - "inner_dst_ip_range": ",".join(inner_dst_ip_range), - "outer_src_ip_range": ",".join(outer_src_ip_range), - "outer_dst_ip_range": ",".join(outer_dst_ip_range), - "balancing_test_times": balancing_test_times, - "balancing_range": balancing_range, - "outer_encap_formats": outer_encap_format, - "nvgre_tni": NVGRE_TNI, - "symmetric_hashing": symmetric_hashing}, - log_file=log_file, - qlen=PTF_QLEN, - socket_recv_size=16384, - is_python3=True) + ptf_runner(ptfhost, + "ptftests", + "inner_hash_test.InnerHashTest", + platform_dir="ptftests", + params={"fib_info": FIB_INFO_FILE_DST, + "router_mac": router_mac, + "src_ports": vlan_ptf_ports, + "exp_port_groups": lag_mem_ptf_ports_groups, + "hash_keys": hash_keys, + "vxlan_port": VXLAN_PORT, + "inner_src_ip_range": ",".join(inner_src_ip_range), + "inner_dst_ip_range": ",".join(inner_dst_ip_range), + "outer_src_ip_range": ",".join(outer_src_ip_range), + "outer_dst_ip_range": ",".join(outer_dst_ip_range), + "balancing_test_times": balancing_test_times, + "balancing_range": balancing_range, + "outer_encap_formats": outer_encap_format, + "nvgre_tni": NVGRE_TNI, + "symmetric_hashing": symmetric_hashing}, + log_file=log_file, + qlen=PTF_QLEN, + socket_recv_size=16384, + is_python3=True) reboot_thr.join() From 7a193b7ada25b20b8d8610f90d164759999492a1 Mon Sep 17 00:00:00 2001 From: Riff Date: Thu, 7 Nov 2024 16:48:37 -0800 Subject: [PATCH 039/175] Add topo generator and t1-isolated-d128 topo. (#15402) * Add topo generator and isolated d128 topo. * minor update. * Minor fix and regenerate the topo. * Fix downlink asn to make them rotate from the base number. --- ansible/generate_topo.py | 194 ++ ansible/templates/topo_t1-isolated.j2 | 47 + ansible/vars/topo_t1-isolated-d128.yml | 2964 ++++++++++++++++++++++++ 3 files changed, 3205 insertions(+) create mode 100755 ansible/generate_topo.py create mode 100644 ansible/templates/topo_t1-isolated.j2 create mode 100644 ansible/vars/topo_t1-isolated-d128.yml diff --git a/ansible/generate_topo.py b/ansible/generate_topo.py new file mode 100755 index 00000000000..b78b15bf724 --- /dev/null +++ b/ansible/generate_topo.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 + +from typing import Any, Dict, List +import ipaddress +import click +import jinja2 + +# Define the roles for the devices in the topology +roles_cfg = { + "t0": { + "asn": 65100, + "downlink": None, + "uplink": {"role": "t1", "asn": 64600}, + "peer": {"role": "pt0", "asn": 65100}, + }, + "t1": { + "asn": 65100, + "downlink": {"role": "t0", "asn": 64000}, + "uplink": {"role": "t2", "asn": 65200}, + "peer": None, + }, +} + + +# Utility functions to calculate IP addresses +def calc_ipv4_pair(subnet_str, port_id): + subnet = ipaddress.IPv4Network(subnet_str) + return (str(subnet.network_address + 2*port_id), str(subnet.network_address + 2*port_id + 1)) + + +def calc_ipv6_pair(subnet_str, port_id): + subnet = ipaddress.IPv6Network(subnet_str) + return (str(subnet.network_address + 4*port_id+1), str(subnet.network_address + 4*port_id + 2)) + + +def calc_ipv4(subnet_str, port_id): + subnet = ipaddress.IPv4Network(subnet_str) + return str(subnet.network_address + port_id) + + +def calc_ipv6(subnet_str, port_id): + subnet = ipaddress.IPv6Network(subnet_str) + return str(subnet.network_address + port_id) + + +class VM: + """ Class to represent a VM in the topology """ + def __init__(self, + port_id: int, + vm_id: int, + name_id: int, + dut_asn: int, + role_cfg: Dict[str, Any], + ip_offset: int = None): + + self.role = role_cfg["role"] + + # IDs of the VM + self.port_id = port_id + self.vm_offset = vm_id + self.ip_offset = vm_id if ip_offset is None else ip_offset + self.name = f"ARISTA{name_id:02d}{self.role.upper()}" + + # VLAN configuration + self.vlans = [port_id] + + # BGP configuration + self.asn = role_cfg["asn"] + self.peer_asn = dut_asn + + # IP addresses + self.dut_intf_ipv4, self.pc_intf_ipv4 = calc_ipv4_pair("10.0.0.0", self.ip_offset) + self.dut_intf_ipv6, self.pc_intf_ipv6 = calc_ipv6_pair("FC00::", self.ip_offset) + self.loopback_ipv4 = calc_ipv4("100.1.0.0", self.ip_offset+1) + self.loopback_ipv6 = calc_ipv6("2064:100::", self.ip_offset+1) + + # Backplane IPs will go with the VM ID + self.bp_ipv4 = calc_ipv4("10.10.246.1", self.vm_offset+1) + self.bp_ipv6 = calc_ipv6("fc0a::1", (self.vm_offset+1)) + + +class HostInterface: + """ Class to represent a host interface in the topology """ + def __init__(self, port_id: int): + self.port_id = port_id + + +def generate_topo(role: str, port_count: int, uplink_ports: List[int], peer_ports: List[int]): + dut_role_cfg = roles_cfg[role] + + vm_list = [] + hostif_list = [] + per_role_vm_count = {} + for port_id in range(0, port_count): + vm = None + hostif = None + + # Get the VM configuration based on the port ID + vm_role_cfg = None + if port_id in uplink_ports: + if dut_role_cfg["uplink"] is None: + raise ValueError("Uplink port specified for a role that doesn't have an uplink") + + vm_role_cfg = dut_role_cfg["uplink"] + + elif port_id in peer_ports: + if dut_role_cfg["peer"] is None: + raise ValueError("Peer port specified for a role that doesn't have a peer") + + vm_role_cfg = dut_role_cfg["peer"] + + else: + # If downlink is not specified, we consider it is host interface + if dut_role_cfg["downlink"] is not None: + vm_role_cfg = dut_role_cfg["downlink"] + vm_role_cfg["asn"] += 1 + + # Create the VM or host interface based on the configuration + if vm_role_cfg is not None: + if vm_role_cfg["role"] not in per_role_vm_count: + per_role_vm_count[vm_role_cfg["role"]] = 0 + per_role_vm_count[vm_role_cfg["role"]] += 1 + + vm = VM(port_id, len(vm_list), per_role_vm_count[vm_role_cfg["role"]], dut_role_cfg["asn"], vm_role_cfg) + vm_list.append(vm) + + else: + hostif = HostInterface(port_id) + hostif_list.append(hostif) + + return vm_list, hostif_list + + +def generate_topo_file_content(role: str, + template_file: str, + vm_list: List[VM], + hostif_list: List[HostInterface]): + + with open(template_file) as f: + template = jinja2.Template(f.read()) + + output = template.render(role=role, + dut=roles_cfg[role], + vm_list=vm_list, + hostif_list=hostif_list) + + return output + + +def output_topo_file(role: str, + keyword: str, + downlink_port_count: int, + uplink_port_count: int, + peer_port_count: int, + file_content: str): + downlink_keyword = f"d{downlink_port_count}" if downlink_port_count > 0 else "" + uplink_keyword = f"u{uplink_port_count}" if uplink_port_count > 0 else "" + peer_keyword = f"s{peer_port_count}" if peer_port_count > 0 else "" + + file_path = f"vars/topo_{role}-{keyword}-{downlink_keyword}{uplink_keyword}{peer_keyword}.yml" + + with open(file_path, "w") as f: + f.write(file_content) + + print(f"Generated topology file: {file_path}") + + +@click.command() +@click.option("--role", "-r", required=True, type=click.Choice(['t1']), help="Role of the device") +@click.option("--keyword", "-k", required=True, type=str, help="Keyword for the topology file") +@click.option("--template", "-t", required=True, type=str, help="Path to the Jinja template file") +@click.option("--port-count", "-c", required=True, type=int, help="Number of ports on the device") +@click.option("--uplinks", "-u", required=False, type=str, default="", help="Comma-separated list of uplink ports") +@click.option("--peers", "-p", required=False, type=str, default="", help="Comma-separated list of peer ports") +def main(role: str, keyword: str, template: str, port_count: int, uplinks: str, peers: str): + """ + Generate a topology file for a device: + + \b + Examples (in the ansible directory): + - ./generate_topo.py -r t1 -k isolated -t t1-isolated -c 128 + - ./generate_topo.py -r t1 -k isolated -t t1-isolated -c 232 -u 48,49,58,59,164,165,174,175 + """ + uplink_ports = [int(port) for port in uplinks.split(",")] if uplinks != "" else [] + peer_ports = [int(port) for port in peers.split(",")] if peers != "" else [] + + vm_list, hostif_list = generate_topo(role, port_count, uplink_ports, peer_ports) + file_content = generate_topo_file_content(role, f"templates/topo_{template}.j2", vm_list, hostif_list) + output_topo_file(role, keyword, port_count - len(uplink_ports) - len(peer_ports), len(uplink_ports), + len(peer_ports), file_content) + + +if __name__ == "__main__": + main() diff --git a/ansible/templates/topo_t1-isolated.j2 b/ansible/templates/topo_t1-isolated.j2 new file mode 100644 index 00000000000..0c58680063d --- /dev/null +++ b/ansible/templates/topo_t1-isolated.j2 @@ -0,0 +1,47 @@ +topology: + VMs: +{%- for vm in vm_list %} + {{ vm.name }}: + vlans: + - {{ vm.vlans[0] }} + vm_offset: {{ vm.vm_offset }} +{%- endfor %} + +configuration_properties: + common: + dut_asn: {{ dut.asn }} + dut_type: LeafRouter + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine: + swrole: spine + tor: + swrole: tor + +configuration: +{%- for vm in vm_list %} + {{vm.name}}: + properties: + - common + bgp: + asn: {{vm.asn}} + peers: + {{vm.peer_asn}}: + - {{vm.dut_intf_ipv4}} + - {{vm.dut_intf_ipv6}} + interfaces: + Loopback0: + ipv4: {{vm.loopback_ipv4}}/32 + ipv6: {{vm.loopback_ipv6}}/128 + Ethernet1: + ipv4: {{vm.pc_intf_ipv4}}/31 + ipv6: {{vm.pc_intf_ipv6}}/126 + bp_interfaces: + ipv4: {{vm.bp_ipv4}}/24 + ipv6: {{vm.bp_ipv6}}/64 +{%- endfor %} diff --git a/ansible/vars/topo_t1-isolated-d128.yml b/ansible/vars/topo_t1-isolated-d128.yml new file mode 100644 index 00000000000..1873728bd2e --- /dev/null +++ b/ansible/vars/topo_t1-isolated-d128.yml @@ -0,0 +1,2964 @@ +topology: + VMs: + ARISTA01T0: + vlans: + - 0 + vm_offset: 0 + ARISTA02T0: + vlans: + - 1 + vm_offset: 1 + ARISTA03T0: + vlans: + - 2 + vm_offset: 2 + ARISTA04T0: + vlans: + - 3 + vm_offset: 3 + ARISTA05T0: + vlans: + - 4 + vm_offset: 4 + ARISTA06T0: + vlans: + - 5 + vm_offset: 5 + ARISTA07T0: + vlans: + - 6 + vm_offset: 6 + ARISTA08T0: + vlans: + - 7 + vm_offset: 7 + ARISTA09T0: + vlans: + - 8 + vm_offset: 8 + ARISTA10T0: + vlans: + - 9 + vm_offset: 9 + ARISTA11T0: + vlans: + - 10 + vm_offset: 10 + ARISTA12T0: + vlans: + - 11 + vm_offset: 11 + ARISTA13T0: + vlans: + - 12 + vm_offset: 12 + ARISTA14T0: + vlans: + - 13 + vm_offset: 13 + ARISTA15T0: + vlans: + - 14 + vm_offset: 14 + ARISTA16T0: + vlans: + - 15 + vm_offset: 15 + ARISTA17T0: + vlans: + - 16 + vm_offset: 16 + ARISTA18T0: + vlans: + - 17 + vm_offset: 17 + ARISTA19T0: + vlans: + - 18 + vm_offset: 18 + ARISTA20T0: + vlans: + - 19 + vm_offset: 19 + ARISTA21T0: + vlans: + - 20 + vm_offset: 20 + ARISTA22T0: + vlans: + - 21 + vm_offset: 21 + ARISTA23T0: + vlans: + - 22 + vm_offset: 22 + ARISTA24T0: + vlans: + - 23 + vm_offset: 23 + ARISTA25T0: + vlans: + - 24 + vm_offset: 24 + ARISTA26T0: + vlans: + - 25 + vm_offset: 25 + ARISTA27T0: + vlans: + - 26 + vm_offset: 26 + ARISTA28T0: + vlans: + - 27 + vm_offset: 27 + ARISTA29T0: + vlans: + - 28 + vm_offset: 28 + ARISTA30T0: + vlans: + - 29 + vm_offset: 29 + ARISTA31T0: + vlans: + - 30 + vm_offset: 30 + ARISTA32T0: + vlans: + - 31 + vm_offset: 31 + ARISTA33T0: + vlans: + - 32 + vm_offset: 32 + ARISTA34T0: + vlans: + - 33 + vm_offset: 33 + ARISTA35T0: + vlans: + - 34 + vm_offset: 34 + ARISTA36T0: + vlans: + - 35 + vm_offset: 35 + ARISTA37T0: + vlans: + - 36 + vm_offset: 36 + ARISTA38T0: + vlans: + - 37 + vm_offset: 37 + ARISTA39T0: + vlans: + - 38 + vm_offset: 38 + ARISTA40T0: + vlans: + - 39 + vm_offset: 39 + ARISTA41T0: + vlans: + - 40 + vm_offset: 40 + ARISTA42T0: + vlans: + - 41 + vm_offset: 41 + ARISTA43T0: + vlans: + - 42 + vm_offset: 42 + ARISTA44T0: + vlans: + - 43 + vm_offset: 43 + ARISTA45T0: + vlans: + - 44 + vm_offset: 44 + ARISTA46T0: + vlans: + - 45 + vm_offset: 45 + ARISTA47T0: + vlans: + - 46 + vm_offset: 46 + ARISTA48T0: + vlans: + - 47 + vm_offset: 47 + ARISTA49T0: + vlans: + - 48 + vm_offset: 48 + ARISTA50T0: + vlans: + - 49 + vm_offset: 49 + ARISTA51T0: + vlans: + - 50 + vm_offset: 50 + ARISTA52T0: + vlans: + - 51 + vm_offset: 51 + ARISTA53T0: + vlans: + - 52 + vm_offset: 52 + ARISTA54T0: + vlans: + - 53 + vm_offset: 53 + ARISTA55T0: + vlans: + - 54 + vm_offset: 54 + ARISTA56T0: + vlans: + - 55 + vm_offset: 55 + ARISTA57T0: + vlans: + - 56 + vm_offset: 56 + ARISTA58T0: + vlans: + - 57 + vm_offset: 57 + ARISTA59T0: + vlans: + - 58 + vm_offset: 58 + ARISTA60T0: + vlans: + - 59 + vm_offset: 59 + ARISTA61T0: + vlans: + - 60 + vm_offset: 60 + ARISTA62T0: + vlans: + - 61 + vm_offset: 61 + ARISTA63T0: + vlans: + - 62 + vm_offset: 62 + ARISTA64T0: + vlans: + - 63 + vm_offset: 63 + ARISTA65T0: + vlans: + - 64 + vm_offset: 64 + ARISTA66T0: + vlans: + - 65 + vm_offset: 65 + ARISTA67T0: + vlans: + - 66 + vm_offset: 66 + ARISTA68T0: + vlans: + - 67 + vm_offset: 67 + ARISTA69T0: + vlans: + - 68 + vm_offset: 68 + ARISTA70T0: + vlans: + - 69 + vm_offset: 69 + ARISTA71T0: + vlans: + - 70 + vm_offset: 70 + ARISTA72T0: + vlans: + - 71 + vm_offset: 71 + ARISTA73T0: + vlans: + - 72 + vm_offset: 72 + ARISTA74T0: + vlans: + - 73 + vm_offset: 73 + ARISTA75T0: + vlans: + - 74 + vm_offset: 74 + ARISTA76T0: + vlans: + - 75 + vm_offset: 75 + ARISTA77T0: + vlans: + - 76 + vm_offset: 76 + ARISTA78T0: + vlans: + - 77 + vm_offset: 77 + ARISTA79T0: + vlans: + - 78 + vm_offset: 78 + ARISTA80T0: + vlans: + - 79 + vm_offset: 79 + ARISTA81T0: + vlans: + - 80 + vm_offset: 80 + ARISTA82T0: + vlans: + - 81 + vm_offset: 81 + ARISTA83T0: + vlans: + - 82 + vm_offset: 82 + ARISTA84T0: + vlans: + - 83 + vm_offset: 83 + ARISTA85T0: + vlans: + - 84 + vm_offset: 84 + ARISTA86T0: + vlans: + - 85 + vm_offset: 85 + ARISTA87T0: + vlans: + - 86 + vm_offset: 86 + ARISTA88T0: + vlans: + - 87 + vm_offset: 87 + ARISTA89T0: + vlans: + - 88 + vm_offset: 88 + ARISTA90T0: + vlans: + - 89 + vm_offset: 89 + ARISTA91T0: + vlans: + - 90 + vm_offset: 90 + ARISTA92T0: + vlans: + - 91 + vm_offset: 91 + ARISTA93T0: + vlans: + - 92 + vm_offset: 92 + ARISTA94T0: + vlans: + - 93 + vm_offset: 93 + ARISTA95T0: + vlans: + - 94 + vm_offset: 94 + ARISTA96T0: + vlans: + - 95 + vm_offset: 95 + ARISTA97T0: + vlans: + - 96 + vm_offset: 96 + ARISTA98T0: + vlans: + - 97 + vm_offset: 97 + ARISTA99T0: + vlans: + - 98 + vm_offset: 98 + ARISTA100T0: + vlans: + - 99 + vm_offset: 99 + ARISTA101T0: + vlans: + - 100 + vm_offset: 100 + ARISTA102T0: + vlans: + - 101 + vm_offset: 101 + ARISTA103T0: + vlans: + - 102 + vm_offset: 102 + ARISTA104T0: + vlans: + - 103 + vm_offset: 103 + ARISTA105T0: + vlans: + - 104 + vm_offset: 104 + ARISTA106T0: + vlans: + - 105 + vm_offset: 105 + ARISTA107T0: + vlans: + - 106 + vm_offset: 106 + ARISTA108T0: + vlans: + - 107 + vm_offset: 107 + ARISTA109T0: + vlans: + - 108 + vm_offset: 108 + ARISTA110T0: + vlans: + - 109 + vm_offset: 109 + ARISTA111T0: + vlans: + - 110 + vm_offset: 110 + ARISTA112T0: + vlans: + - 111 + vm_offset: 111 + ARISTA113T0: + vlans: + - 112 + vm_offset: 112 + ARISTA114T0: + vlans: + - 113 + vm_offset: 113 + ARISTA115T0: + vlans: + - 114 + vm_offset: 114 + ARISTA116T0: + vlans: + - 115 + vm_offset: 115 + ARISTA117T0: + vlans: + - 116 + vm_offset: 116 + ARISTA118T0: + vlans: + - 117 + vm_offset: 117 + ARISTA119T0: + vlans: + - 118 + vm_offset: 118 + ARISTA120T0: + vlans: + - 119 + vm_offset: 119 + ARISTA121T0: + vlans: + - 120 + vm_offset: 120 + ARISTA122T0: + vlans: + - 121 + vm_offset: 121 + ARISTA123T0: + vlans: + - 122 + vm_offset: 122 + ARISTA124T0: + vlans: + - 123 + vm_offset: 123 + ARISTA125T0: + vlans: + - 124 + vm_offset: 124 + ARISTA126T0: + vlans: + - 125 + vm_offset: 125 + ARISTA127T0: + vlans: + - 126 + vm_offset: 126 + ARISTA128T0: + vlans: + - 127 + vm_offset: 127 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: LeafRouter + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine: + swrole: spine + tor: + swrole: tor + +configuration: + ARISTA01T0: + properties: + - common + bgp: + asn: 64001 + peers: + 65100: + - 10.0.0.0 + - fc00::1 + interfaces: + Loopback0: + ipv4: 100.1.0.1/32 + ipv6: 2064:100::1/128 + Ethernet1: + ipv4: 10.0.0.1/31 + ipv6: fc00::2/126 + bp_interfaces: + ipv4: 10.10.246.2/24 + ipv6: fc0a::2/64 + ARISTA02T0: + properties: + - common + bgp: + asn: 64002 + peers: + 65100: + - 10.0.0.2 + - fc00::5 + interfaces: + Loopback0: + ipv4: 100.1.0.2/32 + ipv6: 2064:100::2/128 + Ethernet1: + ipv4: 10.0.0.3/31 + ipv6: fc00::6/126 + bp_interfaces: + ipv4: 10.10.246.3/24 + ipv6: fc0a::3/64 + ARISTA03T0: + properties: + - common + bgp: + asn: 64003 + peers: + 65100: + - 10.0.0.4 + - fc00::9 + interfaces: + Loopback0: + ipv4: 100.1.0.3/32 + ipv6: 2064:100::3/128 + Ethernet1: + ipv4: 10.0.0.5/31 + ipv6: fc00::a/126 + bp_interfaces: + ipv4: 10.10.246.4/24 + ipv6: fc0a::4/64 + ARISTA04T0: + properties: + - common + bgp: + asn: 64004 + peers: + 65100: + - 10.0.0.6 + - fc00::d + interfaces: + Loopback0: + ipv4: 100.1.0.4/32 + ipv6: 2064:100::4/128 + Ethernet1: + ipv4: 10.0.0.7/31 + ipv6: fc00::e/126 + bp_interfaces: + ipv4: 10.10.246.5/24 + ipv6: fc0a::5/64 + ARISTA05T0: + properties: + - common + bgp: + asn: 64005 + peers: + 65100: + - 10.0.0.8 + - fc00::11 + interfaces: + Loopback0: + ipv4: 100.1.0.5/32 + ipv6: 2064:100::5/128 + Ethernet1: + ipv4: 10.0.0.9/31 + ipv6: fc00::12/126 + bp_interfaces: + ipv4: 10.10.246.6/24 + ipv6: fc0a::6/64 + ARISTA06T0: + properties: + - common + bgp: + asn: 64006 + peers: + 65100: + - 10.0.0.10 + - fc00::15 + interfaces: + Loopback0: + ipv4: 100.1.0.6/32 + ipv6: 2064:100::6/128 + Ethernet1: + ipv4: 10.0.0.11/31 + ipv6: fc00::16/126 + bp_interfaces: + ipv4: 10.10.246.7/24 + ipv6: fc0a::7/64 + ARISTA07T0: + properties: + - common + bgp: + asn: 64007 + peers: + 65100: + - 10.0.0.12 + - fc00::19 + interfaces: + Loopback0: + ipv4: 100.1.0.7/32 + ipv6: 2064:100::7/128 + Ethernet1: + ipv4: 10.0.0.13/31 + ipv6: fc00::1a/126 + bp_interfaces: + ipv4: 10.10.246.8/24 + ipv6: fc0a::8/64 + ARISTA08T0: + properties: + - common + bgp: + asn: 64008 + peers: + 65100: + - 10.0.0.14 + - fc00::1d + interfaces: + Loopback0: + ipv4: 100.1.0.8/32 + ipv6: 2064:100::8/128 + Ethernet1: + ipv4: 10.0.0.15/31 + ipv6: fc00::1e/126 + bp_interfaces: + ipv4: 10.10.246.9/24 + ipv6: fc0a::9/64 + ARISTA09T0: + properties: + - common + bgp: + asn: 64009 + peers: + 65100: + - 10.0.0.16 + - fc00::21 + interfaces: + Loopback0: + ipv4: 100.1.0.9/32 + ipv6: 2064:100::9/128 + Ethernet1: + ipv4: 10.0.0.17/31 + ipv6: fc00::22/126 + bp_interfaces: + ipv4: 10.10.246.10/24 + ipv6: fc0a::a/64 + ARISTA10T0: + properties: + - common + bgp: + asn: 64010 + peers: + 65100: + - 10.0.0.18 + - fc00::25 + interfaces: + Loopback0: + ipv4: 100.1.0.10/32 + ipv6: 2064:100::a/128 + Ethernet1: + ipv4: 10.0.0.19/31 + ipv6: fc00::26/126 + bp_interfaces: + ipv4: 10.10.246.11/24 + ipv6: fc0a::b/64 + ARISTA11T0: + properties: + - common + bgp: + asn: 64011 + peers: + 65100: + - 10.0.0.20 + - fc00::29 + interfaces: + Loopback0: + ipv4: 100.1.0.11/32 + ipv6: 2064:100::b/128 + Ethernet1: + ipv4: 10.0.0.21/31 + ipv6: fc00::2a/126 + bp_interfaces: + ipv4: 10.10.246.12/24 + ipv6: fc0a::c/64 + ARISTA12T0: + properties: + - common + bgp: + asn: 64012 + peers: + 65100: + - 10.0.0.22 + - fc00::2d + interfaces: + Loopback0: + ipv4: 100.1.0.12/32 + ipv6: 2064:100::c/128 + Ethernet1: + ipv4: 10.0.0.23/31 + ipv6: fc00::2e/126 + bp_interfaces: + ipv4: 10.10.246.13/24 + ipv6: fc0a::d/64 + ARISTA13T0: + properties: + - common + bgp: + asn: 64013 + peers: + 65100: + - 10.0.0.24 + - fc00::31 + interfaces: + Loopback0: + ipv4: 100.1.0.13/32 + ipv6: 2064:100::d/128 + Ethernet1: + ipv4: 10.0.0.25/31 + ipv6: fc00::32/126 + bp_interfaces: + ipv4: 10.10.246.14/24 + ipv6: fc0a::e/64 + ARISTA14T0: + properties: + - common + bgp: + asn: 64014 + peers: + 65100: + - 10.0.0.26 + - fc00::35 + interfaces: + Loopback0: + ipv4: 100.1.0.14/32 + ipv6: 2064:100::e/128 + Ethernet1: + ipv4: 10.0.0.27/31 + ipv6: fc00::36/126 + bp_interfaces: + ipv4: 10.10.246.15/24 + ipv6: fc0a::f/64 + ARISTA15T0: + properties: + - common + bgp: + asn: 64015 + peers: + 65100: + - 10.0.0.28 + - fc00::39 + interfaces: + Loopback0: + ipv4: 100.1.0.15/32 + ipv6: 2064:100::f/128 + Ethernet1: + ipv4: 10.0.0.29/31 + ipv6: fc00::3a/126 + bp_interfaces: + ipv4: 10.10.246.16/24 + ipv6: fc0a::10/64 + ARISTA16T0: + properties: + - common + bgp: + asn: 64016 + peers: + 65100: + - 10.0.0.30 + - fc00::3d + interfaces: + Loopback0: + ipv4: 100.1.0.16/32 + ipv6: 2064:100::10/128 + Ethernet1: + ipv4: 10.0.0.31/31 + ipv6: fc00::3e/126 + bp_interfaces: + ipv4: 10.10.246.17/24 + ipv6: fc0a::11/64 + ARISTA17T0: + properties: + - common + bgp: + asn: 64017 + peers: + 65100: + - 10.0.0.32 + - fc00::41 + interfaces: + Loopback0: + ipv4: 100.1.0.17/32 + ipv6: 2064:100::11/128 + Ethernet1: + ipv4: 10.0.0.33/31 + ipv6: fc00::42/126 + bp_interfaces: + ipv4: 10.10.246.18/24 + ipv6: fc0a::12/64 + ARISTA18T0: + properties: + - common + bgp: + asn: 64018 + peers: + 65100: + - 10.0.0.34 + - fc00::45 + interfaces: + Loopback0: + ipv4: 100.1.0.18/32 + ipv6: 2064:100::12/128 + Ethernet1: + ipv4: 10.0.0.35/31 + ipv6: fc00::46/126 + bp_interfaces: + ipv4: 10.10.246.19/24 + ipv6: fc0a::13/64 + ARISTA19T0: + properties: + - common + bgp: + asn: 64019 + peers: + 65100: + - 10.0.0.36 + - fc00::49 + interfaces: + Loopback0: + ipv4: 100.1.0.19/32 + ipv6: 2064:100::13/128 + Ethernet1: + ipv4: 10.0.0.37/31 + ipv6: fc00::4a/126 + bp_interfaces: + ipv4: 10.10.246.20/24 + ipv6: fc0a::14/64 + ARISTA20T0: + properties: + - common + bgp: + asn: 64020 + peers: + 65100: + - 10.0.0.38 + - fc00::4d + interfaces: + Loopback0: + ipv4: 100.1.0.20/32 + ipv6: 2064:100::14/128 + Ethernet1: + ipv4: 10.0.0.39/31 + ipv6: fc00::4e/126 + bp_interfaces: + ipv4: 10.10.246.21/24 + ipv6: fc0a::15/64 + ARISTA21T0: + properties: + - common + bgp: + asn: 64021 + peers: + 65100: + - 10.0.0.40 + - fc00::51 + interfaces: + Loopback0: + ipv4: 100.1.0.21/32 + ipv6: 2064:100::15/128 + Ethernet1: + ipv4: 10.0.0.41/31 + ipv6: fc00::52/126 + bp_interfaces: + ipv4: 10.10.246.22/24 + ipv6: fc0a::16/64 + ARISTA22T0: + properties: + - common + bgp: + asn: 64022 + peers: + 65100: + - 10.0.0.42 + - fc00::55 + interfaces: + Loopback0: + ipv4: 100.1.0.22/32 + ipv6: 2064:100::16/128 + Ethernet1: + ipv4: 10.0.0.43/31 + ipv6: fc00::56/126 + bp_interfaces: + ipv4: 10.10.246.23/24 + ipv6: fc0a::17/64 + ARISTA23T0: + properties: + - common + bgp: + asn: 64023 + peers: + 65100: + - 10.0.0.44 + - fc00::59 + interfaces: + Loopback0: + ipv4: 100.1.0.23/32 + ipv6: 2064:100::17/128 + Ethernet1: + ipv4: 10.0.0.45/31 + ipv6: fc00::5a/126 + bp_interfaces: + ipv4: 10.10.246.24/24 + ipv6: fc0a::18/64 + ARISTA24T0: + properties: + - common + bgp: + asn: 64024 + peers: + 65100: + - 10.0.0.46 + - fc00::5d + interfaces: + Loopback0: + ipv4: 100.1.0.24/32 + ipv6: 2064:100::18/128 + Ethernet1: + ipv4: 10.0.0.47/31 + ipv6: fc00::5e/126 + bp_interfaces: + ipv4: 10.10.246.25/24 + ipv6: fc0a::19/64 + ARISTA25T0: + properties: + - common + bgp: + asn: 64025 + peers: + 65100: + - 10.0.0.48 + - fc00::61 + interfaces: + Loopback0: + ipv4: 100.1.0.25/32 + ipv6: 2064:100::19/128 + Ethernet1: + ipv4: 10.0.0.49/31 + ipv6: fc00::62/126 + bp_interfaces: + ipv4: 10.10.246.26/24 + ipv6: fc0a::1a/64 + ARISTA26T0: + properties: + - common + bgp: + asn: 64026 + peers: + 65100: + - 10.0.0.50 + - fc00::65 + interfaces: + Loopback0: + ipv4: 100.1.0.26/32 + ipv6: 2064:100::1a/128 + Ethernet1: + ipv4: 10.0.0.51/31 + ipv6: fc00::66/126 + bp_interfaces: + ipv4: 10.10.246.27/24 + ipv6: fc0a::1b/64 + ARISTA27T0: + properties: + - common + bgp: + asn: 64027 + peers: + 65100: + - 10.0.0.52 + - fc00::69 + interfaces: + Loopback0: + ipv4: 100.1.0.27/32 + ipv6: 2064:100::1b/128 + Ethernet1: + ipv4: 10.0.0.53/31 + ipv6: fc00::6a/126 + bp_interfaces: + ipv4: 10.10.246.28/24 + ipv6: fc0a::1c/64 + ARISTA28T0: + properties: + - common + bgp: + asn: 64028 + peers: + 65100: + - 10.0.0.54 + - fc00::6d + interfaces: + Loopback0: + ipv4: 100.1.0.28/32 + ipv6: 2064:100::1c/128 + Ethernet1: + ipv4: 10.0.0.55/31 + ipv6: fc00::6e/126 + bp_interfaces: + ipv4: 10.10.246.29/24 + ipv6: fc0a::1d/64 + ARISTA29T0: + properties: + - common + bgp: + asn: 64029 + peers: + 65100: + - 10.0.0.56 + - fc00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interfaces: + ipv4: 10.10.246.30/24 + ipv6: fc0a::1e/64 + ARISTA30T0: + properties: + - common + bgp: + asn: 64030 + peers: + 65100: + - 10.0.0.58 + - fc00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interfaces: + ipv4: 10.10.246.31/24 + ipv6: fc0a::1f/64 + ARISTA31T0: + properties: + - common + bgp: + asn: 64031 + peers: + 65100: + - 10.0.0.60 + - fc00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interfaces: + ipv4: 10.10.246.32/24 + ipv6: fc0a::20/64 + ARISTA32T0: + properties: + - common + bgp: + asn: 64032 + peers: + 65100: + - 10.0.0.62 + - fc00::7d + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100::20/128 + Ethernet1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + bp_interfaces: + ipv4: 10.10.246.33/24 + ipv6: fc0a::21/64 + ARISTA33T0: + properties: + - common + bgp: + asn: 64033 + peers: + 65100: + - 10.0.0.64 + - fc00::81 + interfaces: + Loopback0: + ipv4: 100.1.0.33/32 + ipv6: 2064:100::21/128 + Ethernet1: + ipv4: 10.0.0.65/31 + ipv6: fc00::82/126 + bp_interfaces: + ipv4: 10.10.246.34/24 + ipv6: fc0a::22/64 + ARISTA34T0: + properties: + - common + bgp: + asn: 64034 + peers: + 65100: + - 10.0.0.66 + - fc00::85 + interfaces: + Loopback0: + ipv4: 100.1.0.34/32 + ipv6: 2064:100::22/128 + Ethernet1: + ipv4: 10.0.0.67/31 + ipv6: fc00::86/126 + bp_interfaces: + ipv4: 10.10.246.35/24 + ipv6: fc0a::23/64 + ARISTA35T0: + properties: + - common + bgp: + asn: 64035 + peers: + 65100: + - 10.0.0.68 + - fc00::89 + interfaces: + Loopback0: + ipv4: 100.1.0.35/32 + ipv6: 2064:100::23/128 + Ethernet1: + ipv4: 10.0.0.69/31 + ipv6: fc00::8a/126 + bp_interfaces: + ipv4: 10.10.246.36/24 + ipv6: fc0a::24/64 + ARISTA36T0: + properties: + - common + bgp: + asn: 64036 + peers: + 65100: + - 10.0.0.70 + - fc00::8d + interfaces: + Loopback0: + ipv4: 100.1.0.36/32 + ipv6: 2064:100::24/128 + Ethernet1: + ipv4: 10.0.0.71/31 + ipv6: fc00::8e/126 + bp_interfaces: + ipv4: 10.10.246.37/24 + ipv6: fc0a::25/64 + ARISTA37T0: + properties: + - common + bgp: + asn: 64037 + peers: + 65100: + - 10.0.0.72 + - fc00::91 + interfaces: + Loopback0: + ipv4: 100.1.0.37/32 + ipv6: 2064:100::25/128 + Ethernet1: + ipv4: 10.0.0.73/31 + ipv6: fc00::92/126 + bp_interfaces: + ipv4: 10.10.246.38/24 + ipv6: fc0a::26/64 + ARISTA38T0: + properties: + - common + bgp: + asn: 64038 + peers: + 65100: + - 10.0.0.74 + - fc00::95 + interfaces: + Loopback0: + ipv4: 100.1.0.38/32 + ipv6: 2064:100::26/128 + Ethernet1: + ipv4: 10.0.0.75/31 + ipv6: fc00::96/126 + bp_interfaces: + ipv4: 10.10.246.39/24 + ipv6: fc0a::27/64 + ARISTA39T0: + properties: + - common + bgp: + asn: 64039 + peers: + 65100: + - 10.0.0.76 + - fc00::99 + interfaces: + Loopback0: + ipv4: 100.1.0.39/32 + ipv6: 2064:100::27/128 + Ethernet1: + ipv4: 10.0.0.77/31 + ipv6: fc00::9a/126 + bp_interfaces: + ipv4: 10.10.246.40/24 + ipv6: fc0a::28/64 + ARISTA40T0: + properties: + - common + bgp: + asn: 64040 + peers: + 65100: + - 10.0.0.78 + - fc00::9d + interfaces: + Loopback0: + ipv4: 100.1.0.40/32 + ipv6: 2064:100::28/128 + Ethernet1: + ipv4: 10.0.0.79/31 + ipv6: fc00::9e/126 + bp_interfaces: + ipv4: 10.10.246.41/24 + ipv6: fc0a::29/64 + ARISTA41T0: + properties: + - common + bgp: + asn: 64041 + peers: + 65100: + - 10.0.0.80 + - fc00::a1 + interfaces: + Loopback0: + ipv4: 100.1.0.41/32 + ipv6: 2064:100::29/128 + Ethernet1: + ipv4: 10.0.0.81/31 + ipv6: fc00::a2/126 + bp_interfaces: + ipv4: 10.10.246.42/24 + ipv6: fc0a::2a/64 + ARISTA42T0: + properties: + - common + bgp: + asn: 64042 + peers: + 65100: + - 10.0.0.82 + - fc00::a5 + interfaces: + Loopback0: + ipv4: 100.1.0.42/32 + ipv6: 2064:100::2a/128 + Ethernet1: + ipv4: 10.0.0.83/31 + ipv6: fc00::a6/126 + bp_interfaces: + ipv4: 10.10.246.43/24 + ipv6: fc0a::2b/64 + ARISTA43T0: + properties: + - common + bgp: + asn: 64043 + peers: + 65100: + - 10.0.0.84 + - fc00::a9 + interfaces: + Loopback0: + ipv4: 100.1.0.43/32 + ipv6: 2064:100::2b/128 + Ethernet1: + ipv4: 10.0.0.85/31 + ipv6: fc00::aa/126 + bp_interfaces: + ipv4: 10.10.246.44/24 + ipv6: fc0a::2c/64 + ARISTA44T0: + properties: + - common + bgp: + asn: 64044 + peers: + 65100: + - 10.0.0.86 + - fc00::ad + interfaces: + Loopback0: + ipv4: 100.1.0.44/32 + ipv6: 2064:100::2c/128 + Ethernet1: + ipv4: 10.0.0.87/31 + ipv6: fc00::ae/126 + bp_interfaces: + ipv4: 10.10.246.45/24 + ipv6: fc0a::2d/64 + ARISTA45T0: + properties: + - common + bgp: + asn: 64045 + peers: + 65100: + - 10.0.0.88 + - fc00::b1 + interfaces: + Loopback0: + ipv4: 100.1.0.45/32 + ipv6: 2064:100::2d/128 + Ethernet1: + ipv4: 10.0.0.89/31 + ipv6: fc00::b2/126 + bp_interfaces: + ipv4: 10.10.246.46/24 + ipv6: fc0a::2e/64 + ARISTA46T0: + properties: + - common + bgp: + asn: 64046 + peers: + 65100: + - 10.0.0.90 + - fc00::b5 + interfaces: + Loopback0: + ipv4: 100.1.0.46/32 + ipv6: 2064:100::2e/128 + Ethernet1: + ipv4: 10.0.0.91/31 + ipv6: fc00::b6/126 + bp_interfaces: + ipv4: 10.10.246.47/24 + ipv6: fc0a::2f/64 + ARISTA47T0: + properties: + - common + bgp: + asn: 64047 + peers: + 65100: + - 10.0.0.92 + - fc00::b9 + interfaces: + Loopback0: + ipv4: 100.1.0.47/32 + ipv6: 2064:100::2f/128 + Ethernet1: + ipv4: 10.0.0.93/31 + ipv6: fc00::ba/126 + bp_interfaces: + ipv4: 10.10.246.48/24 + ipv6: fc0a::30/64 + ARISTA48T0: + properties: + - common + bgp: + asn: 64048 + peers: + 65100: + - 10.0.0.94 + - fc00::bd + interfaces: + Loopback0: + ipv4: 100.1.0.48/32 + ipv6: 2064:100::30/128 + Ethernet1: + ipv4: 10.0.0.95/31 + ipv6: fc00::be/126 + bp_interfaces: + ipv4: 10.10.246.49/24 + ipv6: fc0a::31/64 + ARISTA49T0: + properties: + - common + bgp: + asn: 64049 + peers: + 65100: + - 10.0.0.96 + - fc00::c1 + interfaces: + Loopback0: + ipv4: 100.1.0.49/32 + ipv6: 2064:100::31/128 + Ethernet1: + ipv4: 10.0.0.97/31 + ipv6: fc00::c2/126 + bp_interfaces: + ipv4: 10.10.246.50/24 + ipv6: fc0a::32/64 + ARISTA50T0: + properties: + - common + bgp: + asn: 64050 + peers: + 65100: + - 10.0.0.98 + - fc00::c5 + interfaces: + Loopback0: + ipv4: 100.1.0.50/32 + ipv6: 2064:100::32/128 + Ethernet1: + ipv4: 10.0.0.99/31 + ipv6: fc00::c6/126 + bp_interfaces: + ipv4: 10.10.246.51/24 + ipv6: fc0a::33/64 + ARISTA51T0: + properties: + - common + bgp: + asn: 64051 + peers: + 65100: + - 10.0.0.100 + - fc00::c9 + interfaces: + Loopback0: + ipv4: 100.1.0.51/32 + ipv6: 2064:100::33/128 + Ethernet1: + ipv4: 10.0.0.101/31 + ipv6: fc00::ca/126 + bp_interfaces: + ipv4: 10.10.246.52/24 + ipv6: fc0a::34/64 + ARISTA52T0: + properties: + - common + bgp: + asn: 64052 + peers: + 65100: + - 10.0.0.102 + - fc00::cd + interfaces: + Loopback0: + ipv4: 100.1.0.52/32 + ipv6: 2064:100::34/128 + Ethernet1: + ipv4: 10.0.0.103/31 + ipv6: fc00::ce/126 + bp_interfaces: + ipv4: 10.10.246.53/24 + ipv6: fc0a::35/64 + ARISTA53T0: + properties: + - common + bgp: + asn: 64053 + peers: + 65100: + - 10.0.0.104 + - fc00::d1 + interfaces: + Loopback0: + ipv4: 100.1.0.53/32 + ipv6: 2064:100::35/128 + Ethernet1: + ipv4: 10.0.0.105/31 + ipv6: fc00::d2/126 + bp_interfaces: + ipv4: 10.10.246.54/24 + ipv6: fc0a::36/64 + ARISTA54T0: + properties: + - common + bgp: + asn: 64054 + peers: + 65100: + - 10.0.0.106 + - fc00::d5 + interfaces: + Loopback0: + ipv4: 100.1.0.54/32 + ipv6: 2064:100::36/128 + Ethernet1: + ipv4: 10.0.0.107/31 + ipv6: fc00::d6/126 + bp_interfaces: + ipv4: 10.10.246.55/24 + ipv6: fc0a::37/64 + ARISTA55T0: + properties: + - common + bgp: + asn: 64055 + peers: + 65100: + - 10.0.0.108 + - fc00::d9 + interfaces: + Loopback0: + ipv4: 100.1.0.55/32 + ipv6: 2064:100::37/128 + Ethernet1: + ipv4: 10.0.0.109/31 + ipv6: fc00::da/126 + bp_interfaces: + ipv4: 10.10.246.56/24 + ipv6: fc0a::38/64 + ARISTA56T0: + properties: + - common + bgp: + asn: 64056 + peers: + 65100: + - 10.0.0.110 + - fc00::dd + interfaces: + Loopback0: + ipv4: 100.1.0.56/32 + ipv6: 2064:100::38/128 + Ethernet1: + ipv4: 10.0.0.111/31 + ipv6: fc00::de/126 + bp_interfaces: + ipv4: 10.10.246.57/24 + ipv6: fc0a::39/64 + ARISTA57T0: + properties: + - common + bgp: + asn: 64057 + peers: + 65100: + - 10.0.0.112 + - fc00::e1 + interfaces: + Loopback0: + ipv4: 100.1.0.57/32 + ipv6: 2064:100::39/128 + Ethernet1: + ipv4: 10.0.0.113/31 + ipv6: fc00::e2/126 + bp_interfaces: + ipv4: 10.10.246.58/24 + ipv6: fc0a::3a/64 + ARISTA58T0: + properties: + - common + bgp: + asn: 64058 + peers: + 65100: + - 10.0.0.114 + - fc00::e5 + interfaces: + Loopback0: + ipv4: 100.1.0.58/32 + ipv6: 2064:100::3a/128 + Ethernet1: + ipv4: 10.0.0.115/31 + ipv6: fc00::e6/126 + bp_interfaces: + ipv4: 10.10.246.59/24 + ipv6: fc0a::3b/64 + ARISTA59T0: + properties: + - common + bgp: + asn: 64059 + peers: + 65100: + - 10.0.0.116 + - fc00::e9 + interfaces: + Loopback0: + ipv4: 100.1.0.59/32 + ipv6: 2064:100::3b/128 + Ethernet1: + ipv4: 10.0.0.117/31 + ipv6: fc00::ea/126 + bp_interfaces: + ipv4: 10.10.246.60/24 + ipv6: fc0a::3c/64 + ARISTA60T0: + properties: + - common + bgp: + asn: 64060 + peers: + 65100: + - 10.0.0.118 + - fc00::ed + interfaces: + Loopback0: + ipv4: 100.1.0.60/32 + ipv6: 2064:100::3c/128 + Ethernet1: + ipv4: 10.0.0.119/31 + ipv6: fc00::ee/126 + bp_interfaces: + ipv4: 10.10.246.61/24 + ipv6: fc0a::3d/64 + ARISTA61T0: + properties: + - common + bgp: + asn: 64061 + peers: + 65100: + - 10.0.0.120 + - fc00::f1 + interfaces: + Loopback0: + ipv4: 100.1.0.61/32 + ipv6: 2064:100::3d/128 + Ethernet1: + ipv4: 10.0.0.121/31 + ipv6: fc00::f2/126 + bp_interfaces: + ipv4: 10.10.246.62/24 + ipv6: fc0a::3e/64 + ARISTA62T0: + properties: + - common + bgp: + asn: 64062 + peers: + 65100: + - 10.0.0.122 + - fc00::f5 + interfaces: + Loopback0: + ipv4: 100.1.0.62/32 + ipv6: 2064:100::3e/128 + Ethernet1: + ipv4: 10.0.0.123/31 + ipv6: fc00::f6/126 + bp_interfaces: + ipv4: 10.10.246.63/24 + ipv6: fc0a::3f/64 + ARISTA63T0: + properties: + - common + bgp: + asn: 64063 + peers: + 65100: + - 10.0.0.124 + - fc00::f9 + interfaces: + Loopback0: + ipv4: 100.1.0.63/32 + ipv6: 2064:100::3f/128 + Ethernet1: + ipv4: 10.0.0.125/31 + ipv6: fc00::fa/126 + bp_interfaces: + ipv4: 10.10.246.64/24 + ipv6: fc0a::40/64 + ARISTA64T0: + properties: + - common + bgp: + asn: 64064 + peers: + 65100: + - 10.0.0.126 + - fc00::fd + interfaces: + Loopback0: + ipv4: 100.1.0.64/32 + ipv6: 2064:100::40/128 + Ethernet1: + ipv4: 10.0.0.127/31 + ipv6: fc00::fe/126 + bp_interfaces: + ipv4: 10.10.246.65/24 + ipv6: fc0a::41/64 + ARISTA65T0: + properties: + - common + bgp: + asn: 64065 + peers: + 65100: + - 10.0.0.128 + - fc00::101 + interfaces: + Loopback0: + ipv4: 100.1.0.65/32 + ipv6: 2064:100::41/128 + Ethernet1: + ipv4: 10.0.0.129/31 + ipv6: fc00::102/126 + bp_interfaces: + ipv4: 10.10.246.66/24 + ipv6: fc0a::42/64 + ARISTA66T0: + properties: + - common + bgp: + asn: 64066 + peers: + 65100: + - 10.0.0.130 + - fc00::105 + interfaces: + Loopback0: + ipv4: 100.1.0.66/32 + ipv6: 2064:100::42/128 + Ethernet1: + ipv4: 10.0.0.131/31 + ipv6: fc00::106/126 + bp_interfaces: + ipv4: 10.10.246.67/24 + ipv6: fc0a::43/64 + ARISTA67T0: + properties: + - common + bgp: + asn: 64067 + peers: + 65100: + - 10.0.0.132 + - fc00::109 + interfaces: + Loopback0: + ipv4: 100.1.0.67/32 + ipv6: 2064:100::43/128 + Ethernet1: + ipv4: 10.0.0.133/31 + ipv6: fc00::10a/126 + bp_interfaces: + ipv4: 10.10.246.68/24 + ipv6: fc0a::44/64 + ARISTA68T0: + properties: + - common + bgp: + asn: 64068 + peers: + 65100: + - 10.0.0.134 + - fc00::10d + interfaces: + Loopback0: + ipv4: 100.1.0.68/32 + ipv6: 2064:100::44/128 + Ethernet1: + ipv4: 10.0.0.135/31 + ipv6: fc00::10e/126 + bp_interfaces: + ipv4: 10.10.246.69/24 + ipv6: fc0a::45/64 + ARISTA69T0: + properties: + - common + bgp: + asn: 64069 + peers: + 65100: + - 10.0.0.136 + - fc00::111 + interfaces: + Loopback0: + ipv4: 100.1.0.69/32 + ipv6: 2064:100::45/128 + Ethernet1: + ipv4: 10.0.0.137/31 + ipv6: fc00::112/126 + bp_interfaces: + ipv4: 10.10.246.70/24 + ipv6: fc0a::46/64 + ARISTA70T0: + properties: + - common + bgp: + asn: 64070 + peers: + 65100: + - 10.0.0.138 + - fc00::115 + interfaces: + Loopback0: + ipv4: 100.1.0.70/32 + ipv6: 2064:100::46/128 + Ethernet1: + ipv4: 10.0.0.139/31 + ipv6: fc00::116/126 + bp_interfaces: + ipv4: 10.10.246.71/24 + ipv6: fc0a::47/64 + ARISTA71T0: + properties: + - common + bgp: + asn: 64071 + peers: + 65100: + - 10.0.0.140 + - fc00::119 + interfaces: + Loopback0: + ipv4: 100.1.0.71/32 + ipv6: 2064:100::47/128 + Ethernet1: + ipv4: 10.0.0.141/31 + ipv6: fc00::11a/126 + bp_interfaces: + ipv4: 10.10.246.72/24 + ipv6: fc0a::48/64 + ARISTA72T0: + properties: + - common + bgp: + asn: 64072 + peers: + 65100: + - 10.0.0.142 + - fc00::11d + interfaces: + Loopback0: + ipv4: 100.1.0.72/32 + ipv6: 2064:100::48/128 + Ethernet1: + ipv4: 10.0.0.143/31 + ipv6: fc00::11e/126 + bp_interfaces: + ipv4: 10.10.246.73/24 + ipv6: fc0a::49/64 + ARISTA73T0: + properties: + - common + bgp: + asn: 64073 + peers: + 65100: + - 10.0.0.144 + - fc00::121 + interfaces: + Loopback0: + ipv4: 100.1.0.73/32 + ipv6: 2064:100::49/128 + Ethernet1: + ipv4: 10.0.0.145/31 + ipv6: fc00::122/126 + bp_interfaces: + ipv4: 10.10.246.74/24 + ipv6: fc0a::4a/64 + ARISTA74T0: + properties: + - common + bgp: + asn: 64074 + peers: + 65100: + - 10.0.0.146 + - fc00::125 + interfaces: + Loopback0: + ipv4: 100.1.0.74/32 + ipv6: 2064:100::4a/128 + Ethernet1: + ipv4: 10.0.0.147/31 + ipv6: fc00::126/126 + bp_interfaces: + ipv4: 10.10.246.75/24 + ipv6: fc0a::4b/64 + ARISTA75T0: + properties: + - common + bgp: + asn: 64075 + peers: + 65100: + - 10.0.0.148 + - fc00::129 + interfaces: + Loopback0: + ipv4: 100.1.0.75/32 + ipv6: 2064:100::4b/128 + Ethernet1: + ipv4: 10.0.0.149/31 + ipv6: fc00::12a/126 + bp_interfaces: + ipv4: 10.10.246.76/24 + ipv6: fc0a::4c/64 + ARISTA76T0: + properties: + - common + bgp: + asn: 64076 + peers: + 65100: + - 10.0.0.150 + - fc00::12d + interfaces: + Loopback0: + ipv4: 100.1.0.76/32 + ipv6: 2064:100::4c/128 + Ethernet1: + ipv4: 10.0.0.151/31 + ipv6: fc00::12e/126 + bp_interfaces: + ipv4: 10.10.246.77/24 + ipv6: fc0a::4d/64 + ARISTA77T0: + properties: + - common + bgp: + asn: 64077 + peers: + 65100: + - 10.0.0.152 + - fc00::131 + interfaces: + Loopback0: + ipv4: 100.1.0.77/32 + ipv6: 2064:100::4d/128 + Ethernet1: + ipv4: 10.0.0.153/31 + ipv6: fc00::132/126 + bp_interfaces: + ipv4: 10.10.246.78/24 + ipv6: fc0a::4e/64 + ARISTA78T0: + properties: + - common + bgp: + asn: 64078 + peers: + 65100: + - 10.0.0.154 + - fc00::135 + interfaces: + Loopback0: + ipv4: 100.1.0.78/32 + ipv6: 2064:100::4e/128 + Ethernet1: + ipv4: 10.0.0.155/31 + ipv6: fc00::136/126 + bp_interfaces: + ipv4: 10.10.246.79/24 + ipv6: fc0a::4f/64 + ARISTA79T0: + properties: + - common + bgp: + asn: 64079 + peers: + 65100: + - 10.0.0.156 + - fc00::139 + interfaces: + Loopback0: + ipv4: 100.1.0.79/32 + ipv6: 2064:100::4f/128 + Ethernet1: + ipv4: 10.0.0.157/31 + ipv6: fc00::13a/126 + bp_interfaces: + ipv4: 10.10.246.80/24 + ipv6: fc0a::50/64 + ARISTA80T0: + properties: + - common + bgp: + asn: 64080 + peers: + 65100: + - 10.0.0.158 + - fc00::13d + interfaces: + Loopback0: + ipv4: 100.1.0.80/32 + ipv6: 2064:100::50/128 + Ethernet1: + ipv4: 10.0.0.159/31 + ipv6: fc00::13e/126 + bp_interfaces: + ipv4: 10.10.246.81/24 + ipv6: fc0a::51/64 + ARISTA81T0: + properties: + - common + bgp: + asn: 64081 + peers: + 65100: + - 10.0.0.160 + - fc00::141 + interfaces: + Loopback0: + ipv4: 100.1.0.81/32 + ipv6: 2064:100::51/128 + Ethernet1: + ipv4: 10.0.0.161/31 + ipv6: fc00::142/126 + bp_interfaces: + ipv4: 10.10.246.82/24 + ipv6: fc0a::52/64 + ARISTA82T0: + properties: + - common + bgp: + asn: 64082 + peers: + 65100: + - 10.0.0.162 + - fc00::145 + interfaces: + Loopback0: + ipv4: 100.1.0.82/32 + ipv6: 2064:100::52/128 + Ethernet1: + ipv4: 10.0.0.163/31 + ipv6: fc00::146/126 + bp_interfaces: + ipv4: 10.10.246.83/24 + ipv6: fc0a::53/64 + ARISTA83T0: + properties: + - common + bgp: + asn: 64083 + peers: + 65100: + - 10.0.0.164 + - fc00::149 + interfaces: + Loopback0: + ipv4: 100.1.0.83/32 + ipv6: 2064:100::53/128 + Ethernet1: + ipv4: 10.0.0.165/31 + ipv6: fc00::14a/126 + bp_interfaces: + ipv4: 10.10.246.84/24 + ipv6: fc0a::54/64 + ARISTA84T0: + properties: + - common + bgp: + asn: 64084 + peers: + 65100: + - 10.0.0.166 + - fc00::14d + interfaces: + Loopback0: + ipv4: 100.1.0.84/32 + ipv6: 2064:100::54/128 + Ethernet1: + ipv4: 10.0.0.167/31 + ipv6: fc00::14e/126 + bp_interfaces: + ipv4: 10.10.246.85/24 + ipv6: fc0a::55/64 + ARISTA85T0: + properties: + - common + bgp: + asn: 64085 + peers: + 65100: + - 10.0.0.168 + - fc00::151 + interfaces: + Loopback0: + ipv4: 100.1.0.85/32 + ipv6: 2064:100::55/128 + Ethernet1: + ipv4: 10.0.0.169/31 + ipv6: fc00::152/126 + bp_interfaces: + ipv4: 10.10.246.86/24 + ipv6: fc0a::56/64 + ARISTA86T0: + properties: + - common + bgp: + asn: 64086 + peers: + 65100: + - 10.0.0.170 + - fc00::155 + interfaces: + Loopback0: + ipv4: 100.1.0.86/32 + ipv6: 2064:100::56/128 + Ethernet1: + ipv4: 10.0.0.171/31 + ipv6: fc00::156/126 + bp_interfaces: + ipv4: 10.10.246.87/24 + ipv6: fc0a::57/64 + ARISTA87T0: + properties: + - common + bgp: + asn: 64087 + peers: + 65100: + - 10.0.0.172 + - fc00::159 + interfaces: + Loopback0: + ipv4: 100.1.0.87/32 + ipv6: 2064:100::57/128 + Ethernet1: + ipv4: 10.0.0.173/31 + ipv6: fc00::15a/126 + bp_interfaces: + ipv4: 10.10.246.88/24 + ipv6: fc0a::58/64 + ARISTA88T0: + properties: + - common + bgp: + asn: 64088 + peers: + 65100: + - 10.0.0.174 + - fc00::15d + interfaces: + Loopback0: + ipv4: 100.1.0.88/32 + ipv6: 2064:100::58/128 + Ethernet1: + ipv4: 10.0.0.175/31 + ipv6: fc00::15e/126 + bp_interfaces: + ipv4: 10.10.246.89/24 + ipv6: fc0a::59/64 + ARISTA89T0: + properties: + - common + bgp: + asn: 64089 + peers: + 65100: + - 10.0.0.176 + - fc00::161 + interfaces: + Loopback0: + ipv4: 100.1.0.89/32 + ipv6: 2064:100::59/128 + Ethernet1: + ipv4: 10.0.0.177/31 + ipv6: fc00::162/126 + bp_interfaces: + ipv4: 10.10.246.90/24 + ipv6: fc0a::5a/64 + ARISTA90T0: + properties: + - common + bgp: + asn: 64090 + peers: + 65100: + - 10.0.0.178 + - fc00::165 + interfaces: + Loopback0: + ipv4: 100.1.0.90/32 + ipv6: 2064:100::5a/128 + Ethernet1: + ipv4: 10.0.0.179/31 + ipv6: fc00::166/126 + bp_interfaces: + ipv4: 10.10.246.91/24 + ipv6: fc0a::5b/64 + ARISTA91T0: + properties: + - common + bgp: + asn: 64091 + peers: + 65100: + - 10.0.0.180 + - fc00::169 + interfaces: + Loopback0: + ipv4: 100.1.0.91/32 + ipv6: 2064:100::5b/128 + Ethernet1: + ipv4: 10.0.0.181/31 + ipv6: fc00::16a/126 + bp_interfaces: + ipv4: 10.10.246.92/24 + ipv6: fc0a::5c/64 + ARISTA92T0: + properties: + - common + bgp: + asn: 64092 + peers: + 65100: + - 10.0.0.182 + - fc00::16d + interfaces: + Loopback0: + ipv4: 100.1.0.92/32 + ipv6: 2064:100::5c/128 + Ethernet1: + ipv4: 10.0.0.183/31 + ipv6: fc00::16e/126 + bp_interfaces: + ipv4: 10.10.246.93/24 + ipv6: fc0a::5d/64 + ARISTA93T0: + properties: + - common + bgp: + asn: 64093 + peers: + 65100: + - 10.0.0.184 + - fc00::171 + interfaces: + Loopback0: + ipv4: 100.1.0.93/32 + ipv6: 2064:100::5d/128 + Ethernet1: + ipv4: 10.0.0.185/31 + ipv6: fc00::172/126 + bp_interfaces: + ipv4: 10.10.246.94/24 + ipv6: fc0a::5e/64 + ARISTA94T0: + properties: + - common + bgp: + asn: 64094 + peers: + 65100: + - 10.0.0.186 + - fc00::175 + interfaces: + Loopback0: + ipv4: 100.1.0.94/32 + ipv6: 2064:100::5e/128 + Ethernet1: + ipv4: 10.0.0.187/31 + ipv6: fc00::176/126 + bp_interfaces: + ipv4: 10.10.246.95/24 + ipv6: fc0a::5f/64 + ARISTA95T0: + properties: + - common + bgp: + asn: 64095 + peers: + 65100: + - 10.0.0.188 + - fc00::179 + interfaces: + Loopback0: + ipv4: 100.1.0.95/32 + ipv6: 2064:100::5f/128 + Ethernet1: + ipv4: 10.0.0.189/31 + ipv6: fc00::17a/126 + bp_interfaces: + ipv4: 10.10.246.96/24 + ipv6: fc0a::60/64 + ARISTA96T0: + properties: + - common + bgp: + asn: 64096 + peers: + 65100: + - 10.0.0.190 + - fc00::17d + interfaces: + Loopback0: + ipv4: 100.1.0.96/32 + ipv6: 2064:100::60/128 + Ethernet1: + ipv4: 10.0.0.191/31 + ipv6: fc00::17e/126 + bp_interfaces: + ipv4: 10.10.246.97/24 + ipv6: fc0a::61/64 + ARISTA97T0: + properties: + - common + bgp: + asn: 64097 + peers: + 65100: + - 10.0.0.192 + - fc00::181 + interfaces: + Loopback0: + ipv4: 100.1.0.97/32 + ipv6: 2064:100::61/128 + Ethernet1: + ipv4: 10.0.0.193/31 + ipv6: fc00::182/126 + bp_interfaces: + ipv4: 10.10.246.98/24 + ipv6: fc0a::62/64 + ARISTA98T0: + properties: + - common + bgp: + asn: 64098 + peers: + 65100: + - 10.0.0.194 + - fc00::185 + interfaces: + Loopback0: + ipv4: 100.1.0.98/32 + ipv6: 2064:100::62/128 + Ethernet1: + ipv4: 10.0.0.195/31 + ipv6: fc00::186/126 + bp_interfaces: + ipv4: 10.10.246.99/24 + ipv6: fc0a::63/64 + ARISTA99T0: + properties: + - common + bgp: + asn: 64099 + peers: + 65100: + - 10.0.0.196 + - fc00::189 + interfaces: + Loopback0: + ipv4: 100.1.0.99/32 + ipv6: 2064:100::63/128 + Ethernet1: + ipv4: 10.0.0.197/31 + ipv6: fc00::18a/126 + bp_interfaces: + ipv4: 10.10.246.100/24 + ipv6: fc0a::64/64 + ARISTA100T0: + properties: + - common + bgp: + asn: 64100 + peers: + 65100: + - 10.0.0.198 + - fc00::18d + interfaces: + Loopback0: + ipv4: 100.1.0.100/32 + ipv6: 2064:100::64/128 + Ethernet1: + ipv4: 10.0.0.199/31 + ipv6: fc00::18e/126 + bp_interfaces: + ipv4: 10.10.246.101/24 + ipv6: fc0a::65/64 + ARISTA101T0: + properties: + - common + bgp: + asn: 64101 + peers: + 65100: + - 10.0.0.200 + - fc00::191 + interfaces: + Loopback0: + ipv4: 100.1.0.101/32 + ipv6: 2064:100::65/128 + Ethernet1: + ipv4: 10.0.0.201/31 + ipv6: fc00::192/126 + bp_interfaces: + ipv4: 10.10.246.102/24 + ipv6: fc0a::66/64 + ARISTA102T0: + properties: + - common + bgp: + asn: 64102 + peers: + 65100: + - 10.0.0.202 + - fc00::195 + interfaces: + Loopback0: + ipv4: 100.1.0.102/32 + ipv6: 2064:100::66/128 + Ethernet1: + ipv4: 10.0.0.203/31 + ipv6: fc00::196/126 + bp_interfaces: + ipv4: 10.10.246.103/24 + ipv6: fc0a::67/64 + ARISTA103T0: + properties: + - common + bgp: + asn: 64103 + peers: + 65100: + - 10.0.0.204 + - fc00::199 + interfaces: + Loopback0: + ipv4: 100.1.0.103/32 + ipv6: 2064:100::67/128 + Ethernet1: + ipv4: 10.0.0.205/31 + ipv6: fc00::19a/126 + bp_interfaces: + ipv4: 10.10.246.104/24 + ipv6: fc0a::68/64 + ARISTA104T0: + properties: + - common + bgp: + asn: 64104 + peers: + 65100: + - 10.0.0.206 + - fc00::19d + interfaces: + Loopback0: + ipv4: 100.1.0.104/32 + ipv6: 2064:100::68/128 + Ethernet1: + ipv4: 10.0.0.207/31 + ipv6: fc00::19e/126 + bp_interfaces: + ipv4: 10.10.246.105/24 + ipv6: fc0a::69/64 + ARISTA105T0: + properties: + - common + bgp: + asn: 64105 + peers: + 65100: + - 10.0.0.208 + - fc00::1a1 + interfaces: + Loopback0: + ipv4: 100.1.0.105/32 + ipv6: 2064:100::69/128 + Ethernet1: + ipv4: 10.0.0.209/31 + ipv6: fc00::1a2/126 + bp_interfaces: + ipv4: 10.10.246.106/24 + ipv6: fc0a::6a/64 + ARISTA106T0: + properties: + - common + bgp: + asn: 64106 + peers: + 65100: + - 10.0.0.210 + - fc00::1a5 + interfaces: + Loopback0: + ipv4: 100.1.0.106/32 + ipv6: 2064:100::6a/128 + Ethernet1: + ipv4: 10.0.0.211/31 + ipv6: fc00::1a6/126 + bp_interfaces: + ipv4: 10.10.246.107/24 + ipv6: fc0a::6b/64 + ARISTA107T0: + properties: + - common + bgp: + asn: 64107 + peers: + 65100: + - 10.0.0.212 + - fc00::1a9 + interfaces: + Loopback0: + ipv4: 100.1.0.107/32 + ipv6: 2064:100::6b/128 + Ethernet1: + ipv4: 10.0.0.213/31 + ipv6: fc00::1aa/126 + bp_interfaces: + ipv4: 10.10.246.108/24 + ipv6: fc0a::6c/64 + ARISTA108T0: + properties: + - common + bgp: + asn: 64108 + peers: + 65100: + - 10.0.0.214 + - fc00::1ad + interfaces: + Loopback0: + ipv4: 100.1.0.108/32 + ipv6: 2064:100::6c/128 + Ethernet1: + ipv4: 10.0.0.215/31 + ipv6: fc00::1ae/126 + bp_interfaces: + ipv4: 10.10.246.109/24 + ipv6: fc0a::6d/64 + ARISTA109T0: + properties: + - common + bgp: + asn: 64109 + peers: + 65100: + - 10.0.0.216 + - fc00::1b1 + interfaces: + Loopback0: + ipv4: 100.1.0.109/32 + ipv6: 2064:100::6d/128 + Ethernet1: + ipv4: 10.0.0.217/31 + ipv6: fc00::1b2/126 + bp_interfaces: + ipv4: 10.10.246.110/24 + ipv6: fc0a::6e/64 + ARISTA110T0: + properties: + - common + bgp: + asn: 64110 + peers: + 65100: + - 10.0.0.218 + - fc00::1b5 + interfaces: + Loopback0: + ipv4: 100.1.0.110/32 + ipv6: 2064:100::6e/128 + Ethernet1: + ipv4: 10.0.0.219/31 + ipv6: fc00::1b6/126 + bp_interfaces: + ipv4: 10.10.246.111/24 + ipv6: fc0a::6f/64 + ARISTA111T0: + properties: + - common + bgp: + asn: 64111 + peers: + 65100: + - 10.0.0.220 + - fc00::1b9 + interfaces: + Loopback0: + ipv4: 100.1.0.111/32 + ipv6: 2064:100::6f/128 + Ethernet1: + ipv4: 10.0.0.221/31 + ipv6: fc00::1ba/126 + bp_interfaces: + ipv4: 10.10.246.112/24 + ipv6: fc0a::70/64 + ARISTA112T0: + properties: + - common + bgp: + asn: 64112 + peers: + 65100: + - 10.0.0.222 + - fc00::1bd + interfaces: + Loopback0: + ipv4: 100.1.0.112/32 + ipv6: 2064:100::70/128 + Ethernet1: + ipv4: 10.0.0.223/31 + ipv6: fc00::1be/126 + bp_interfaces: + ipv4: 10.10.246.113/24 + ipv6: fc0a::71/64 + ARISTA113T0: + properties: + - common + bgp: + asn: 64113 + peers: + 65100: + - 10.0.0.224 + - fc00::1c1 + interfaces: + Loopback0: + ipv4: 100.1.0.113/32 + ipv6: 2064:100::71/128 + Ethernet1: + ipv4: 10.0.0.225/31 + ipv6: fc00::1c2/126 + bp_interfaces: + ipv4: 10.10.246.114/24 + ipv6: fc0a::72/64 + ARISTA114T0: + properties: + - common + bgp: + asn: 64114 + peers: + 65100: + - 10.0.0.226 + - fc00::1c5 + interfaces: + Loopback0: + ipv4: 100.1.0.114/32 + ipv6: 2064:100::72/128 + Ethernet1: + ipv4: 10.0.0.227/31 + ipv6: fc00::1c6/126 + bp_interfaces: + ipv4: 10.10.246.115/24 + ipv6: fc0a::73/64 + ARISTA115T0: + properties: + - common + bgp: + asn: 64115 + peers: + 65100: + - 10.0.0.228 + - fc00::1c9 + interfaces: + Loopback0: + ipv4: 100.1.0.115/32 + ipv6: 2064:100::73/128 + Ethernet1: + ipv4: 10.0.0.229/31 + ipv6: fc00::1ca/126 + bp_interfaces: + ipv4: 10.10.246.116/24 + ipv6: fc0a::74/64 + ARISTA116T0: + properties: + - common + bgp: + asn: 64116 + peers: + 65100: + - 10.0.0.230 + - fc00::1cd + interfaces: + Loopback0: + ipv4: 100.1.0.116/32 + ipv6: 2064:100::74/128 + Ethernet1: + ipv4: 10.0.0.231/31 + ipv6: fc00::1ce/126 + bp_interfaces: + ipv4: 10.10.246.117/24 + ipv6: fc0a::75/64 + ARISTA117T0: + properties: + - common + bgp: + asn: 64117 + peers: + 65100: + - 10.0.0.232 + - fc00::1d1 + interfaces: + Loopback0: + ipv4: 100.1.0.117/32 + ipv6: 2064:100::75/128 + Ethernet1: + ipv4: 10.0.0.233/31 + ipv6: fc00::1d2/126 + bp_interfaces: + ipv4: 10.10.246.118/24 + ipv6: fc0a::76/64 + ARISTA118T0: + properties: + - common + bgp: + asn: 64118 + peers: + 65100: + - 10.0.0.234 + - fc00::1d5 + interfaces: + Loopback0: + ipv4: 100.1.0.118/32 + ipv6: 2064:100::76/128 + Ethernet1: + ipv4: 10.0.0.235/31 + ipv6: fc00::1d6/126 + bp_interfaces: + ipv4: 10.10.246.119/24 + ipv6: fc0a::77/64 + ARISTA119T0: + properties: + - common + bgp: + asn: 64119 + peers: + 65100: + - 10.0.0.236 + - fc00::1d9 + interfaces: + Loopback0: + ipv4: 100.1.0.119/32 + ipv6: 2064:100::77/128 + Ethernet1: + ipv4: 10.0.0.237/31 + ipv6: fc00::1da/126 + bp_interfaces: + ipv4: 10.10.246.120/24 + ipv6: fc0a::78/64 + ARISTA120T0: + properties: + - common + bgp: + asn: 64120 + peers: + 65100: + - 10.0.0.238 + - fc00::1dd + interfaces: + Loopback0: + ipv4: 100.1.0.120/32 + ipv6: 2064:100::78/128 + Ethernet1: + ipv4: 10.0.0.239/31 + ipv6: fc00::1de/126 + bp_interfaces: + ipv4: 10.10.246.121/24 + ipv6: fc0a::79/64 + ARISTA121T0: + properties: + - common + bgp: + asn: 64121 + peers: + 65100: + - 10.0.0.240 + - fc00::1e1 + interfaces: + Loopback0: + ipv4: 100.1.0.121/32 + ipv6: 2064:100::79/128 + Ethernet1: + ipv4: 10.0.0.241/31 + ipv6: fc00::1e2/126 + bp_interfaces: + ipv4: 10.10.246.122/24 + ipv6: fc0a::7a/64 + ARISTA122T0: + properties: + - common + bgp: + asn: 64122 + peers: + 65100: + - 10.0.0.242 + - fc00::1e5 + interfaces: + Loopback0: + ipv4: 100.1.0.122/32 + ipv6: 2064:100::7a/128 + Ethernet1: + ipv4: 10.0.0.243/31 + ipv6: fc00::1e6/126 + bp_interfaces: + ipv4: 10.10.246.123/24 + ipv6: fc0a::7b/64 + ARISTA123T0: + properties: + - common + bgp: + asn: 64123 + peers: + 65100: + - 10.0.0.244 + - fc00::1e9 + interfaces: + Loopback0: + ipv4: 100.1.0.123/32 + ipv6: 2064:100::7b/128 + Ethernet1: + ipv4: 10.0.0.245/31 + ipv6: fc00::1ea/126 + bp_interfaces: + ipv4: 10.10.246.124/24 + ipv6: fc0a::7c/64 + ARISTA124T0: + properties: + - common + bgp: + asn: 64124 + peers: + 65100: + - 10.0.0.246 + - fc00::1ed + interfaces: + Loopback0: + ipv4: 100.1.0.124/32 + ipv6: 2064:100::7c/128 + Ethernet1: + ipv4: 10.0.0.247/31 + ipv6: fc00::1ee/126 + bp_interfaces: + ipv4: 10.10.246.125/24 + ipv6: fc0a::7d/64 + ARISTA125T0: + properties: + - common + bgp: + asn: 64125 + peers: + 65100: + - 10.0.0.248 + - fc00::1f1 + interfaces: + Loopback0: + ipv4: 100.1.0.125/32 + ipv6: 2064:100::7d/128 + Ethernet1: + ipv4: 10.0.0.249/31 + ipv6: fc00::1f2/126 + bp_interfaces: + ipv4: 10.10.246.126/24 + ipv6: fc0a::7e/64 + ARISTA126T0: + properties: + - common + bgp: + asn: 64126 + peers: + 65100: + - 10.0.0.250 + - fc00::1f5 + interfaces: + Loopback0: + ipv4: 100.1.0.126/32 + ipv6: 2064:100::7e/128 + Ethernet1: + ipv4: 10.0.0.251/31 + ipv6: fc00::1f6/126 + bp_interfaces: + ipv4: 10.10.246.127/24 + ipv6: fc0a::7f/64 + ARISTA127T0: + properties: + - common + bgp: + asn: 64127 + peers: + 65100: + - 10.0.0.252 + - fc00::1f9 + interfaces: + Loopback0: + ipv4: 100.1.0.127/32 + ipv6: 2064:100::7f/128 + Ethernet1: + ipv4: 10.0.0.253/31 + ipv6: fc00::1fa/126 + bp_interfaces: + ipv4: 10.10.246.128/24 + ipv6: fc0a::80/64 + ARISTA128T0: + properties: + - common + bgp: + asn: 64128 + peers: + 65100: + - 10.0.0.254 + - fc00::1fd + interfaces: + Loopback0: + ipv4: 100.1.0.128/32 + ipv6: 2064:100::80/128 + Ethernet1: + ipv4: 10.0.0.255/31 + ipv6: fc00::1fe/126 + bp_interfaces: + ipv4: 10.10.246.129/24 + ipv6: fc0a::81/64 From 4a494feee9b75040df05e2209ab6e82532c58d70 Mon Sep 17 00:00:00 2001 From: Riff Date: Thu, 7 Nov 2024 16:50:52 -0800 Subject: [PATCH 040/175] Fix VM name and downlink ASN numbers in t1-isolated-d224u8 topology (#15447) * Rename. * Fix the VM names, downlink ASN and VLAN offset. --- ...224u8.yaml => topo_t1-isolated-d224u8.yml} | 3738 ++++++++--------- 1 file changed, 1869 insertions(+), 1869 deletions(-) rename ansible/vars/{topo_t1-isolated-d224u8.yaml => topo_t1-isolated-d224u8.yml} (91%) diff --git a/ansible/vars/topo_t1-isolated-d224u8.yaml b/ansible/vars/topo_t1-isolated-d224u8.yml similarity index 91% rename from ansible/vars/topo_t1-isolated-d224u8.yaml rename to ansible/vars/topo_t1-isolated-d224u8.yml index 5f97f7fd713..f2f809366d5 100644 --- a/ansible/vars/topo_t1-isolated-d224u8.yaml +++ b/ansible/vars/topo_t1-isolated-d224u8.yml @@ -192,741 +192,741 @@ topology: vlans: - 47 vm_offset: 47 - ARISTA49T2: + ARISTA01T2: vlans: - 48 vm_offset: 48 - ARISTA50T2: + ARISTA02T2: vlans: - 49 vm_offset: 49 - ARISTA51T0: + ARISTA49T0: vlans: - - 56 + - 50 vm_offset: 50 - ARISTA52T0: + ARISTA50T0: vlans: - - 57 + - 51 vm_offset: 51 - ARISTA53T0: + ARISTA51T0: vlans: - - 58 + - 52 vm_offset: 52 - ARISTA54T0: + ARISTA52T0: vlans: - - 59 + - 53 vm_offset: 53 - ARISTA55T0: + ARISTA53T0: vlans: - - 60 + - 54 vm_offset: 54 - ARISTA56T0: + ARISTA54T0: vlans: - - 61 + - 55 vm_offset: 55 - ARISTA57T0: + ARISTA55T0: vlans: - - 62 + - 56 vm_offset: 56 - ARISTA58T0: + ARISTA56T0: vlans: - - 63 + - 57 vm_offset: 57 - ARISTA59T2: + ARISTA03T2: vlans: - - 64 + - 58 vm_offset: 58 - ARISTA60T2: + ARISTA04T2: vlans: - - 65 + - 59 vm_offset: 59 - ARISTA61T0: + ARISTA57T0: vlans: - - 72 + - 60 vm_offset: 60 - ARISTA62T0: + ARISTA58T0: vlans: - - 73 + - 61 vm_offset: 61 - ARISTA63T0: + ARISTA59T0: vlans: - - 74 + - 62 vm_offset: 62 - ARISTA64T0: + ARISTA60T0: vlans: - - 75 + - 63 vm_offset: 63 - ARISTA65T0: + ARISTA61T0: vlans: - - 76 + - 64 vm_offset: 64 - ARISTA66T0: + ARISTA62T0: vlans: - - 77 + - 65 vm_offset: 65 - ARISTA67T0: + ARISTA63T0: vlans: - - 78 + - 66 vm_offset: 66 - ARISTA68T0: + ARISTA64T0: vlans: - - 79 + - 67 vm_offset: 67 - ARISTA69T0: + ARISTA65T0: vlans: - - 80 + - 68 vm_offset: 68 - ARISTA70T0: + ARISTA66T0: vlans: - - 81 + - 69 vm_offset: 69 - ARISTA71T0: + ARISTA67T0: vlans: - - 82 + - 70 vm_offset: 70 - ARISTA72T0: + ARISTA68T0: vlans: - - 83 + - 71 vm_offset: 71 - ARISTA73T0: + ARISTA69T0: vlans: - - 84 + - 72 vm_offset: 72 - ARISTA74T0: + ARISTA70T0: vlans: - - 85 + - 73 vm_offset: 73 - ARISTA75T0: + ARISTA71T0: vlans: - - 86 + - 74 vm_offset: 74 - ARISTA76T0: + ARISTA72T0: vlans: - - 87 + - 75 vm_offset: 75 - ARISTA77T0: + ARISTA73T0: vlans: - - 88 + - 76 vm_offset: 76 - ARISTA78T0: + ARISTA74T0: vlans: - - 89 + - 77 vm_offset: 77 - ARISTA79T0: + ARISTA75T0: vlans: - - 90 + - 78 vm_offset: 78 - ARISTA80T0: + ARISTA76T0: vlans: - - 91 + - 79 vm_offset: 79 - ARISTA81T0: + ARISTA77T0: vlans: - - 92 + - 80 vm_offset: 80 - ARISTA82T0: + ARISTA78T0: vlans: - - 93 + - 81 vm_offset: 81 - ARISTA83T0: + ARISTA79T0: vlans: - - 94 + - 82 vm_offset: 82 - ARISTA84T0: + ARISTA80T0: vlans: - - 95 + - 83 vm_offset: 83 - ARISTA85T0: + ARISTA81T0: vlans: - - 96 + - 84 vm_offset: 84 - ARISTA86T0: + ARISTA82T0: vlans: - - 97 + - 85 vm_offset: 85 - ARISTA87T0: + ARISTA83T0: vlans: - - 98 + - 86 vm_offset: 86 - ARISTA88T0: + ARISTA84T0: vlans: - - 99 + - 87 vm_offset: 87 - ARISTA89T0: + ARISTA85T0: vlans: - - 100 + - 88 vm_offset: 88 - ARISTA90T0: + ARISTA86T0: vlans: - - 101 + - 89 vm_offset: 89 - ARISTA91T0: + ARISTA87T0: vlans: - - 102 + - 90 vm_offset: 90 - ARISTA92T0: + ARISTA88T0: vlans: - - 103 + - 91 vm_offset: 91 - ARISTA93T0: + ARISTA89T0: vlans: - - 104 + - 92 vm_offset: 92 - ARISTA94T0: + ARISTA90T0: vlans: - - 105 + - 93 vm_offset: 93 - ARISTA95T0: + ARISTA91T0: vlans: - - 106 + - 94 vm_offset: 94 - ARISTA96T0: + ARISTA92T0: vlans: - - 107 + - 95 vm_offset: 95 - ARISTA97T0: + ARISTA93T0: vlans: - - 108 + - 96 vm_offset: 96 - ARISTA98T0: + ARISTA94T0: vlans: - - 109 + - 97 vm_offset: 97 - ARISTA99T0: + ARISTA95T0: vlans: - - 110 + - 98 vm_offset: 98 - ARISTA100T0: + ARISTA96T0: vlans: - - 111 + - 99 vm_offset: 99 - ARISTA101T0: + ARISTA97T0: vlans: - - 112 + - 100 vm_offset: 100 - ARISTA102T0: + ARISTA98T0: vlans: - - 113 + - 101 vm_offset: 101 - ARISTA103T0: + ARISTA99T0: vlans: - - 114 + - 102 vm_offset: 102 - ARISTA104T0: + ARISTA100T0: vlans: - - 115 + - 103 vm_offset: 103 - ARISTA105T0: + ARISTA101T0: vlans: - - 116 + - 104 vm_offset: 104 - ARISTA106T0: + ARISTA102T0: vlans: - - 117 + - 105 vm_offset: 105 - ARISTA107T0: + ARISTA103T0: vlans: - - 118 + - 106 vm_offset: 106 - ARISTA108T0: + ARISTA104T0: vlans: - - 119 + - 107 vm_offset: 107 - ARISTA109T0: + ARISTA105T0: vlans: - - 120 + - 108 vm_offset: 108 - ARISTA110T0: + ARISTA106T0: vlans: - - 121 + - 109 vm_offset: 109 - ARISTA111T0: + ARISTA107T0: vlans: - - 122 + - 110 vm_offset: 110 - ARISTA112T0: + ARISTA108T0: vlans: - - 123 + - 111 vm_offset: 111 - ARISTA113T0: + ARISTA109T0: vlans: - - 124 + - 112 vm_offset: 112 - ARISTA114T0: + ARISTA110T0: vlans: - - 125 + - 113 vm_offset: 113 - ARISTA115T0: + ARISTA111T0: vlans: - - 126 + - 114 vm_offset: 114 - ARISTA116T0: + ARISTA112T0: vlans: - - 127 + - 115 vm_offset: 115 - ARISTA117T0: + ARISTA113T0: vlans: - - 128 + - 116 vm_offset: 116 - ARISTA118T0: + ARISTA114T0: vlans: - - 129 + - 117 vm_offset: 117 - ARISTA119T0: + ARISTA115T0: vlans: - - 130 + - 118 vm_offset: 118 - ARISTA120T0: + ARISTA116T0: vlans: - - 131 + - 119 vm_offset: 119 - ARISTA121T0: + ARISTA117T0: vlans: - - 132 + - 120 vm_offset: 120 - ARISTA122T0: + ARISTA118T0: vlans: - - 133 + - 121 vm_offset: 121 - ARISTA123T0: + ARISTA119T0: vlans: - - 134 + - 122 vm_offset: 122 - ARISTA124T0: + ARISTA120T0: vlans: - - 135 + - 123 vm_offset: 123 - ARISTA125T0: + ARISTA121T0: vlans: - - 136 + - 124 vm_offset: 124 - ARISTA126T0: + ARISTA122T0: vlans: - - 137 + - 125 vm_offset: 125 - ARISTA127T0: + ARISTA123T0: vlans: - - 138 + - 126 vm_offset: 126 - ARISTA128T0: + ARISTA124T0: vlans: - - 139 + - 127 vm_offset: 127 - ARISTA129T0: + ARISTA125T0: vlans: - - 140 + - 128 vm_offset: 128 - ARISTA130T0: + ARISTA126T0: vlans: - - 141 + - 129 vm_offset: 129 - ARISTA131T0: + ARISTA127T0: vlans: - - 142 + - 130 vm_offset: 130 - ARISTA132T0: + ARISTA128T0: vlans: - - 143 + - 131 vm_offset: 131 - ARISTA133T0: + ARISTA129T0: vlans: - - 144 + - 132 vm_offset: 132 - ARISTA134T0: + ARISTA130T0: vlans: - - 145 + - 133 vm_offset: 133 - ARISTA135T0: + ARISTA131T0: vlans: - - 146 + - 134 vm_offset: 134 - ARISTA136T0: + ARISTA132T0: vlans: - - 147 + - 135 vm_offset: 135 - ARISTA137T0: + ARISTA133T0: vlans: - - 148 + - 136 vm_offset: 136 - ARISTA138T0: + ARISTA134T0: vlans: - - 149 + - 137 vm_offset: 137 - ARISTA139T0: + ARISTA135T0: vlans: - - 150 + - 138 vm_offset: 138 - ARISTA140T0: + ARISTA136T0: vlans: - - 151 + - 139 vm_offset: 139 - ARISTA141T0: + ARISTA137T0: vlans: - - 152 + - 140 vm_offset: 140 - ARISTA142T0: + ARISTA138T0: vlans: - - 153 + - 141 vm_offset: 141 - ARISTA143T0: + ARISTA139T0: vlans: - - 154 + - 142 vm_offset: 142 - ARISTA144T0: + ARISTA140T0: vlans: - - 155 + - 143 vm_offset: 143 - ARISTA145T0: + ARISTA141T0: vlans: - - 156 + - 144 vm_offset: 144 - ARISTA146T0: + ARISTA142T0: vlans: - - 157 + - 145 vm_offset: 145 - ARISTA147T0: + ARISTA143T0: vlans: - - 158 + - 146 vm_offset: 146 - ARISTA148T0: + ARISTA144T0: vlans: - - 159 + - 147 vm_offset: 147 - ARISTA149T0: + ARISTA145T0: vlans: - - 160 + - 148 vm_offset: 148 - ARISTA150T0: + ARISTA146T0: vlans: - - 161 + - 149 vm_offset: 149 - ARISTA151T0: + ARISTA147T0: vlans: - - 162 + - 150 vm_offset: 150 - ARISTA152T0: + ARISTA148T0: vlans: - - 163 + - 151 vm_offset: 151 - ARISTA153T0: + ARISTA149T0: vlans: - - 164 + - 152 vm_offset: 152 - ARISTA154T0: + ARISTA150T0: vlans: - - 165 + - 153 vm_offset: 153 - ARISTA155T0: + ARISTA151T0: vlans: - - 166 + - 154 vm_offset: 154 - ARISTA156T0: + ARISTA152T0: vlans: - - 167 + - 155 vm_offset: 155 - ARISTA157T0: + ARISTA153T0: vlans: - - 168 + - 156 vm_offset: 156 - ARISTA158T0: + ARISTA154T0: vlans: - - 169 + - 157 vm_offset: 157 - ARISTA159T0: + ARISTA155T0: vlans: - - 170 + - 158 vm_offset: 158 - ARISTA160T0: + ARISTA156T0: vlans: - - 171 + - 159 vm_offset: 159 - ARISTA161T0: + ARISTA157T0: vlans: - - 172 + - 160 vm_offset: 160 - ARISTA162T0: + ARISTA158T0: vlans: - - 173 + - 161 vm_offset: 161 - ARISTA163T0: + ARISTA159T0: vlans: - - 174 + - 162 vm_offset: 162 - ARISTA164T0: + ARISTA160T0: vlans: - - 175 + - 163 vm_offset: 163 - ARISTA165T2: + ARISTA05T2: vlans: - - 176 + - 164 vm_offset: 164 - ARISTA166T2: + ARISTA06T2: vlans: - - 177 + - 165 vm_offset: 165 - ARISTA167T0: + ARISTA161T0: vlans: - - 184 + - 166 vm_offset: 166 - ARISTA168T0: + ARISTA162T0: vlans: - - 185 + - 167 vm_offset: 167 - ARISTA169T0: + ARISTA163T0: vlans: - - 186 + - 168 vm_offset: 168 - ARISTA170T0: + ARISTA164T0: vlans: - - 187 + - 169 vm_offset: 169 - ARISTA171T0: + ARISTA165T0: vlans: - - 188 + - 170 vm_offset: 170 - ARISTA172T0: + ARISTA166T0: vlans: - - 189 + - 171 vm_offset: 171 - ARISTA173T0: + ARISTA167T0: vlans: - - 190 + - 172 vm_offset: 172 - ARISTA174T0: + ARISTA168T0: vlans: - - 191 + - 173 vm_offset: 173 - ARISTA175T2: + ARISTA07T2: vlans: - - 192 + - 174 vm_offset: 174 - ARISTA176T2: + ARISTA08T2: vlans: - - 193 + - 175 vm_offset: 175 - ARISTA177T0: + ARISTA169T0: vlans: - - 200 + - 176 vm_offset: 176 - ARISTA178T0: + ARISTA170T0: vlans: - - 201 + - 177 vm_offset: 177 - ARISTA179T0: + ARISTA171T0: vlans: - - 202 + - 178 vm_offset: 178 - ARISTA180T0: + ARISTA172T0: vlans: - - 203 + - 179 vm_offset: 179 - ARISTA181T0: + ARISTA173T0: vlans: - - 204 + - 180 vm_offset: 180 - ARISTA182T0: + ARISTA174T0: vlans: - - 205 + - 181 vm_offset: 181 - ARISTA183T0: + ARISTA175T0: vlans: - - 206 + - 182 vm_offset: 182 - ARISTA184T0: + ARISTA176T0: vlans: - - 207 + - 183 vm_offset: 183 - ARISTA185T0: + ARISTA177T0: vlans: - - 208 + - 184 vm_offset: 184 - ARISTA186T0: + ARISTA178T0: vlans: - - 209 + - 185 vm_offset: 185 - ARISTA187T0: + ARISTA179T0: vlans: - - 210 + - 186 vm_offset: 186 - ARISTA188T0: + ARISTA180T0: vlans: - - 211 + - 187 vm_offset: 187 - ARISTA189T0: + ARISTA181T0: vlans: - - 212 + - 188 vm_offset: 188 - ARISTA190T0: + ARISTA182T0: vlans: - - 213 + - 189 vm_offset: 189 - ARISTA191T0: + ARISTA183T0: vlans: - - 214 + - 190 vm_offset: 190 - ARISTA192T0: + ARISTA184T0: vlans: - - 215 + - 191 vm_offset: 191 - ARISTA193T0: + ARISTA185T0: vlans: - - 216 + - 192 vm_offset: 192 - ARISTA194T0: + ARISTA186T0: vlans: - - 217 + - 193 vm_offset: 193 - ARISTA195T0: + ARISTA187T0: vlans: - - 218 + - 194 vm_offset: 194 - ARISTA196T0: + ARISTA188T0: vlans: - - 219 + - 195 vm_offset: 195 - ARISTA197T0: + ARISTA189T0: vlans: - - 220 + - 196 vm_offset: 196 - ARISTA198T0: + ARISTA190T0: vlans: - - 221 + - 197 vm_offset: 197 - ARISTA199T0: + ARISTA191T0: vlans: - - 222 + - 198 vm_offset: 198 - ARISTA200T0: + ARISTA192T0: vlans: - - 223 + - 199 vm_offset: 199 - ARISTA201T0: + ARISTA193T0: vlans: - - 224 + - 200 vm_offset: 200 - ARISTA202T0: + ARISTA194T0: vlans: - - 225 + - 201 vm_offset: 201 - ARISTA203T0: + ARISTA195T0: vlans: - - 226 + - 202 vm_offset: 202 - ARISTA204T0: + ARISTA196T0: vlans: - - 227 + - 203 vm_offset: 203 - ARISTA205T0: + ARISTA197T0: vlans: - - 228 + - 204 vm_offset: 204 - ARISTA206T0: + ARISTA198T0: vlans: - - 229 + - 205 vm_offset: 205 - ARISTA207T0: + ARISTA199T0: vlans: - - 230 + - 206 vm_offset: 206 - ARISTA208T0: + ARISTA200T0: vlans: - - 231 + - 207 vm_offset: 207 - ARISTA209T0: + ARISTA201T0: vlans: - - 232 + - 208 vm_offset: 208 - ARISTA210T0: + ARISTA202T0: vlans: - - 233 + - 209 vm_offset: 209 - ARISTA211T0: + ARISTA203T0: vlans: - - 234 + - 210 vm_offset: 210 - ARISTA212T0: + ARISTA204T0: vlans: - - 235 + - 211 vm_offset: 211 - ARISTA213T0: + ARISTA205T0: vlans: - - 236 + - 212 vm_offset: 212 - ARISTA214T0: + ARISTA206T0: vlans: - - 237 + - 213 vm_offset: 213 - ARISTA215T0: + ARISTA207T0: vlans: - - 238 + - 214 vm_offset: 214 - ARISTA216T0: + ARISTA208T0: vlans: - - 239 + - 215 vm_offset: 215 - ARISTA217T0: + ARISTA209T0: vlans: - - 240 + - 216 vm_offset: 216 - ARISTA218T0: + ARISTA210T0: vlans: - - 241 + - 217 vm_offset: 217 - ARISTA219T0: + ARISTA211T0: vlans: - - 242 + - 218 vm_offset: 218 - ARISTA220T0: + ARISTA212T0: vlans: - - 243 + - 219 vm_offset: 219 - ARISTA221T0: + ARISTA213T0: vlans: - - 244 + - 220 vm_offset: 220 - ARISTA222T0: + ARISTA214T0: vlans: - - 245 + - 221 vm_offset: 221 - ARISTA223T0: + ARISTA215T0: vlans: - - 246 + - 222 vm_offset: 222 - ARISTA224T0: + ARISTA216T0: vlans: - - 247 + - 223 vm_offset: 223 - ARISTA225T0: + ARISTA217T0: vlans: - - 248 + - 224 vm_offset: 224 - ARISTA226T0: + ARISTA218T0: vlans: - - 249 + - 225 vm_offset: 225 - ARISTA227T0: + ARISTA219T0: vlans: - - 250 + - 226 vm_offset: 226 - ARISTA228T0: + ARISTA220T0: vlans: - - 251 + - 227 vm_offset: 227 - ARISTA229T0: + ARISTA221T0: vlans: - - 252 + - 228 vm_offset: 228 - ARISTA230T0: + ARISTA222T0: vlans: - - 253 + - 229 vm_offset: 229 - ARISTA231T0: + ARISTA223T0: vlans: - - 254 + - 230 vm_offset: 230 - ARISTA232T0: + ARISTA224T0: vlans: - - 255 + - 231 vm_offset: 231 configuration_properties: @@ -950,7 +950,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64001 peers: 65100: - 10.0.0.0 @@ -988,7 +988,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64003 peers: 65100: - 10.0.0.4 @@ -1007,7 +1007,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64004 peers: 65100: - 10.0.0.6 @@ -1026,7 +1026,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64005 peers: 65100: - 10.0.0.8 @@ -1045,7 +1045,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64006 peers: 65100: - 10.0.0.10 @@ -1064,7 +1064,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64007 peers: 65100: - 10.0.0.12 @@ -1083,7 +1083,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64008 peers: 65100: - 10.0.0.14 @@ -1102,7 +1102,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64009 peers: 65100: - 10.0.0.16 @@ -1121,7 +1121,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64010 peers: 65100: - 10.0.0.18 @@ -1140,7 +1140,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64011 peers: 65100: - 10.0.0.20 @@ -1159,7 +1159,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64012 peers: 65100: - 10.0.0.22 @@ -1178,7 +1178,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64013 peers: 65100: - 10.0.0.24 @@ -1197,7 +1197,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64014 peers: 65100: - 10.0.0.26 @@ -1216,7 +1216,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64015 peers: 65100: - 10.0.0.28 @@ -1235,7 +1235,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64016 peers: 65100: - 10.0.0.30 @@ -1254,7 +1254,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64017 peers: 65100: - 10.0.0.32 @@ -1273,7 +1273,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64018 peers: 65100: - 10.0.0.34 @@ -1292,7 +1292,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64019 peers: 65100: - 10.0.0.36 @@ -1311,7 +1311,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64020 peers: 65100: - 10.0.0.38 @@ -1330,7 +1330,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64021 peers: 65100: - 10.0.0.40 @@ -1349,7 +1349,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64022 peers: 65100: - 10.0.0.42 @@ -1368,7 +1368,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64023 peers: 65100: - 10.0.0.44 @@ -1387,7 +1387,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64024 peers: 65100: - 10.0.0.46 @@ -1406,7 +1406,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64025 peers: 65100: - 10.0.0.48 @@ -1425,7 +1425,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64026 peers: 65100: - 10.0.0.50 @@ -1444,7 +1444,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64027 peers: 65100: - 10.0.0.52 @@ -1463,7 +1463,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64028 peers: 65100: - 10.0.0.54 @@ -1482,7 +1482,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64029 peers: 65100: - 10.0.0.56 @@ -1501,7 +1501,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64030 peers: 65100: - 10.0.0.58 @@ -1520,7 +1520,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64031 peers: 65100: - 10.0.0.60 @@ -1539,7 +1539,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64032 peers: 65100: - 10.0.0.62 @@ -1558,7 +1558,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64033 peers: 65100: - 10.0.0.64 @@ -1577,7 +1577,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64034 peers: 65100: - 10.0.0.66 @@ -1596,7 +1596,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64035 peers: 65100: - 10.0.0.68 @@ -1615,7 +1615,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64036 peers: 65100: - 10.0.0.70 @@ -1634,7 +1634,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64037 peers: 65100: - 10.0.0.72 @@ -1653,7 +1653,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64038 peers: 65100: - 10.0.0.74 @@ -1672,7 +1672,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64039 peers: 65100: - 10.0.0.76 @@ -1691,7 +1691,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64040 peers: 65100: - 10.0.0.78 @@ -1710,7 +1710,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64041 peers: 65100: - 10.0.0.80 @@ -1729,7 +1729,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64042 peers: 65100: - 10.0.0.82 @@ -1748,7 +1748,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64043 peers: 65100: - 10.0.0.84 @@ -1767,7 +1767,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64044 peers: 65100: - 10.0.0.86 @@ -1786,7 +1786,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64045 peers: 65100: - 10.0.0.88 @@ -1805,7 +1805,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64046 peers: 65100: - 10.0.0.90 @@ -1824,7 +1824,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64047 peers: 65100: - 10.0.0.92 @@ -1843,7 +1843,7 @@ configuration: properties: - common bgp: - asn: 64002 + asn: 64048 peers: 65100: - 10.0.0.94 @@ -1858,7 +1858,7 @@ configuration: bp_interfaces: ipv4: 10.10.246.49/24 ipv6: fc0a::31/64 - ARISTA49T2: + ARISTA01T2: properties: - common bgp: @@ -1877,7 +1877,7 @@ configuration: bp_interfaces: ipv4: 10.10.246.50/24 ipv6: fc0a::32/64 - ARISTA50T2: + ARISTA02T2: properties: - common bgp: @@ -1896,3461 +1896,3461 @@ configuration: bp_interfaces: ipv4: 10.10.246.51/24 ipv6: fc0a::33/64 - ARISTA51T0: + ARISTA49T0: properties: - common bgp: - asn: 64002 + asn: 64049 peers: 65100: - - 10.0.0.112 - - fc00::e1 + - 10.0.0.100 + - fc00::c9 interfaces: Loopback0: - ipv4: 100.1.0.57/32 - ipv6: 2064:100::39/128 + ipv4: 100.1.0.51/32 + ipv6: 2064:100::33/128 Ethernet1: - ipv4: 10.0.0.113/31 - ipv6: fc00::e2/126 + ipv4: 10.0.0.101/31 + ipv6: fc00::ca/126 bp_interfaces: ipv4: 10.10.246.52/24 ipv6: fc0a::34/64 - ARISTA52T0: + ARISTA50T0: properties: - common bgp: - asn: 64002 + asn: 64050 peers: 65100: - - 10.0.0.114 - - fc00::e5 + - 10.0.0.102 + - fc00::cd interfaces: Loopback0: - ipv4: 100.1.0.58/32 - ipv6: 2064:100::3a/128 + ipv4: 100.1.0.52/32 + ipv6: 2064:100::34/128 Ethernet1: - ipv4: 10.0.0.115/31 - ipv6: fc00::e6/126 + ipv4: 10.0.0.103/31 + ipv6: fc00::ce/126 bp_interfaces: ipv4: 10.10.246.53/24 ipv6: fc0a::35/64 - ARISTA53T0: + ARISTA51T0: properties: - common bgp: - asn: 64002 + asn: 64051 peers: 65100: - - 10.0.0.116 - - fc00::e9 + - 10.0.0.104 + - fc00::d1 interfaces: Loopback0: - ipv4: 100.1.0.59/32 - ipv6: 2064:100::3b/128 + ipv4: 100.1.0.53/32 + ipv6: 2064:100::35/128 Ethernet1: - ipv4: 10.0.0.117/31 - ipv6: fc00::ea/126 + ipv4: 10.0.0.105/31 + ipv6: fc00::d2/126 bp_interfaces: ipv4: 10.10.246.54/24 ipv6: fc0a::36/64 - ARISTA54T0: + ARISTA52T0: properties: - common bgp: - asn: 64002 + asn: 64052 peers: 65100: - - 10.0.0.118 - - fc00::ed + - 10.0.0.106 + - fc00::d5 interfaces: Loopback0: - ipv4: 100.1.0.60/32 - ipv6: 2064:100::3c/128 + ipv4: 100.1.0.54/32 + ipv6: 2064:100::36/128 Ethernet1: - ipv4: 10.0.0.119/31 - ipv6: fc00::ee/126 + ipv4: 10.0.0.107/31 + ipv6: fc00::d6/126 bp_interfaces: ipv4: 10.10.246.55/24 ipv6: fc0a::37/64 - ARISTA55T0: + ARISTA53T0: properties: - common bgp: - asn: 64002 + asn: 64053 peers: 65100: - - 10.0.0.120 - - fc00::f1 + - 10.0.0.108 + - fc00::d9 interfaces: Loopback0: - ipv4: 100.1.0.61/32 - ipv6: 2064:100::3d/128 + ipv4: 100.1.0.55/32 + ipv6: 2064:100::37/128 Ethernet1: - ipv4: 10.0.0.121/31 - ipv6: fc00::f2/126 + ipv4: 10.0.0.109/31 + ipv6: fc00::da/126 bp_interfaces: ipv4: 10.10.246.56/24 ipv6: fc0a::38/64 - ARISTA56T0: + ARISTA54T0: properties: - common bgp: - asn: 64002 + asn: 64054 peers: 65100: - - 10.0.0.122 - - fc00::f5 + - 10.0.0.110 + - fc00::dd interfaces: Loopback0: - ipv4: 100.1.0.62/32 - ipv6: 2064:100::3e/128 + ipv4: 100.1.0.56/32 + ipv6: 2064:100::38/128 Ethernet1: - ipv4: 10.0.0.123/31 - ipv6: fc00::f6/126 + ipv4: 10.0.0.111/31 + ipv6: fc00::de/126 bp_interfaces: ipv4: 10.10.246.57/24 ipv6: fc0a::39/64 - ARISTA57T0: + ARISTA55T0: properties: - common bgp: - asn: 64002 + asn: 64055 peers: 65100: - - 10.0.0.124 - - fc00::f9 + - 10.0.0.112 + - fc00::e1 interfaces: Loopback0: - ipv4: 100.1.0.63/32 - ipv6: 2064:100::3f/128 + ipv4: 100.1.0.57/32 + ipv6: 2064:100::39/128 Ethernet1: - ipv4: 10.0.0.125/31 - ipv6: fc00::fa/126 + ipv4: 10.0.0.113/31 + ipv6: fc00::e2/126 bp_interfaces: ipv4: 10.10.246.58/24 ipv6: fc0a::3a/64 - ARISTA58T0: + ARISTA56T0: properties: - common bgp: - asn: 64002 + asn: 64056 peers: 65100: - - 10.0.0.126 - - fc00::fd + - 10.0.0.114 + - fc00::e5 interfaces: Loopback0: - ipv4: 100.1.0.64/32 - ipv6: 2064:100::40/128 + ipv4: 100.1.0.58/32 + ipv6: 2064:100::3a/128 Ethernet1: - ipv4: 10.0.0.127/31 - ipv6: fc00::fe/126 + ipv4: 10.0.0.115/31 + ipv6: fc00::e6/126 bp_interfaces: ipv4: 10.10.246.59/24 ipv6: fc0a::3b/64 - ARISTA59T2: + ARISTA03T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.0.128 - - fc00::101 + - 10.0.0.116 + - fc00::e9 interfaces: Loopback0: - ipv4: 100.1.0.65/32 - ipv6: 2064:100::41/128 + ipv4: 100.1.0.59/32 + ipv6: 2064:100::3b/128 Ethernet1: - ipv4: 10.0.0.129/31 - ipv6: fc00::102/126 + ipv4: 10.0.0.117/31 + ipv6: fc00::ea/126 bp_interfaces: ipv4: 10.10.246.60/24 ipv6: fc0a::3c/64 - ARISTA60T2: + ARISTA04T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.0.130 - - fc00::105 + - 10.0.0.118 + - fc00::ed interfaces: Loopback0: - ipv4: 100.1.0.66/32 - ipv6: 2064:100::42/128 + ipv4: 100.1.0.60/32 + ipv6: 2064:100::3c/128 Ethernet1: - ipv4: 10.0.0.131/31 - ipv6: fc00::106/126 + ipv4: 10.0.0.119/31 + ipv6: fc00::ee/126 bp_interfaces: ipv4: 10.10.246.61/24 ipv6: fc0a::3d/64 - ARISTA61T0: + ARISTA57T0: properties: - common bgp: - asn: 64002 + asn: 64057 peers: - 65100: - - 10.0.0.144 - - fc00::121 + 65100: + - 10.0.0.120 + - fc00::f1 interfaces: Loopback0: - ipv4: 100.1.0.73/32 - ipv6: 2064:100::49/128 + ipv4: 100.1.0.61/32 + ipv6: 2064:100::3d/128 Ethernet1: - ipv4: 10.0.0.145/31 - ipv6: fc00::122/126 + ipv4: 10.0.0.121/31 + ipv6: fc00::f2/126 bp_interfaces: ipv4: 10.10.246.62/24 ipv6: fc0a::3e/64 - ARISTA62T0: + ARISTA58T0: properties: - common bgp: - asn: 64002 + asn: 64058 peers: 65100: - - 10.0.0.146 - - fc00::125 + - 10.0.0.122 + - fc00::f5 interfaces: Loopback0: - ipv4: 100.1.0.74/32 - ipv6: 2064:100::4a/128 + ipv4: 100.1.0.62/32 + ipv6: 2064:100::3e/128 Ethernet1: - ipv4: 10.0.0.147/31 - ipv6: fc00::126/126 + ipv4: 10.0.0.123/31 + ipv6: fc00::f6/126 bp_interfaces: ipv4: 10.10.246.63/24 ipv6: fc0a::3f/64 - ARISTA63T0: + ARISTA59T0: properties: - common bgp: - asn: 64002 + asn: 64059 peers: 65100: - - 10.0.0.148 - - fc00::129 + - 10.0.0.124 + - fc00::f9 interfaces: Loopback0: - ipv4: 100.1.0.75/32 - ipv6: 2064:100::4b/128 + ipv4: 100.1.0.63/32 + ipv6: 2064:100::3f/128 Ethernet1: - ipv4: 10.0.0.149/31 - ipv6: fc00::12a/126 + ipv4: 10.0.0.125/31 + ipv6: fc00::fa/126 bp_interfaces: ipv4: 10.10.246.64/24 ipv6: fc0a::40/64 - ARISTA64T0: + ARISTA60T0: properties: - common bgp: - asn: 64002 + asn: 64060 peers: 65100: - - 10.0.0.150 - - fc00::12d + - 10.0.0.126 + - fc00::fd interfaces: Loopback0: - ipv4: 100.1.0.76/32 - ipv6: 2064:100::4c/128 + ipv4: 100.1.0.64/32 + ipv6: 2064:100::40/128 Ethernet1: - ipv4: 10.0.0.151/31 - ipv6: fc00::12e/126 + ipv4: 10.0.0.127/31 + ipv6: fc00::fe/126 bp_interfaces: ipv4: 10.10.246.65/24 ipv6: fc0a::41/64 - ARISTA65T0: + ARISTA61T0: properties: - common bgp: - asn: 64002 + asn: 64061 peers: 65100: - - 10.0.0.152 - - fc00::131 + - 10.0.0.128 + - fc00::101 interfaces: Loopback0: - ipv4: 100.1.0.77/32 - ipv6: 2064:100::4d/128 + ipv4: 100.1.0.65/32 + ipv6: 2064:100::41/128 Ethernet1: - ipv4: 10.0.0.153/31 - ipv6: fc00::132/126 + ipv4: 10.0.0.129/31 + ipv6: fc00::102/126 bp_interfaces: ipv4: 10.10.246.66/24 ipv6: fc0a::42/64 - ARISTA66T0: + ARISTA62T0: properties: - common bgp: - asn: 64002 + asn: 64062 peers: 65100: - - 10.0.0.154 - - fc00::135 + - 10.0.0.130 + - fc00::105 interfaces: Loopback0: - ipv4: 100.1.0.78/32 - ipv6: 2064:100::4e/128 + ipv4: 100.1.0.66/32 + ipv6: 2064:100::42/128 Ethernet1: - ipv4: 10.0.0.155/31 - ipv6: fc00::136/126 + ipv4: 10.0.0.131/31 + ipv6: fc00::106/126 bp_interfaces: ipv4: 10.10.246.67/24 ipv6: fc0a::43/64 - ARISTA67T0: + ARISTA63T0: properties: - common bgp: - asn: 64002 + asn: 64063 peers: 65100: - - 10.0.0.156 - - fc00::139 + - 10.0.0.132 + - fc00::109 interfaces: Loopback0: - ipv4: 100.1.0.79/32 - ipv6: 2064:100::4f/128 + ipv4: 100.1.0.67/32 + ipv6: 2064:100::43/128 Ethernet1: - ipv4: 10.0.0.157/31 - ipv6: fc00::13a/126 + ipv4: 10.0.0.133/31 + ipv6: fc00::10a/126 bp_interfaces: ipv4: 10.10.246.68/24 ipv6: fc0a::44/64 - ARISTA68T0: + ARISTA64T0: properties: - common bgp: - asn: 64002 + asn: 64064 peers: 65100: - - 10.0.0.158 - - fc00::13d + - 10.0.0.134 + - fc00::10d interfaces: Loopback0: - ipv4: 100.1.0.80/32 - ipv6: 2064:100::50/128 + ipv4: 100.1.0.68/32 + ipv6: 2064:100::44/128 Ethernet1: - ipv4: 10.0.0.159/31 - ipv6: fc00::13e/126 + ipv4: 10.0.0.135/31 + ipv6: fc00::10e/126 bp_interfaces: ipv4: 10.10.246.69/24 ipv6: fc0a::45/64 - ARISTA69T0: + ARISTA65T0: properties: - common bgp: - asn: 64002 + asn: 64065 peers: 65100: - - 10.0.0.160 - - fc00::141 + - 10.0.0.136 + - fc00::111 interfaces: Loopback0: - ipv4: 100.1.0.81/32 - ipv6: 2064:100::51/128 + ipv4: 100.1.0.69/32 + ipv6: 2064:100::45/128 Ethernet1: - ipv4: 10.0.0.161/31 - ipv6: fc00::142/126 + ipv4: 10.0.0.137/31 + ipv6: fc00::112/126 bp_interfaces: ipv4: 10.10.246.70/24 ipv6: fc0a::46/64 - ARISTA70T0: + ARISTA66T0: properties: - common bgp: - asn: 64002 + asn: 64066 peers: 65100: - - 10.0.0.162 - - fc00::145 + - 10.0.0.138 + - fc00::115 interfaces: Loopback0: - ipv4: 100.1.0.82/32 - ipv6: 2064:100::52/128 + ipv4: 100.1.0.70/32 + ipv6: 2064:100::46/128 Ethernet1: - ipv4: 10.0.0.163/31 - ipv6: fc00::146/126 + ipv4: 10.0.0.139/31 + ipv6: fc00::116/126 bp_interfaces: ipv4: 10.10.246.71/24 ipv6: fc0a::47/64 - ARISTA71T0: + ARISTA67T0: properties: - common bgp: - asn: 64002 + asn: 64067 peers: 65100: - - 10.0.0.164 - - fc00::149 + - 10.0.0.140 + - fc00::119 interfaces: Loopback0: - ipv4: 100.1.0.83/32 - ipv6: 2064:100::53/128 + ipv4: 100.1.0.71/32 + ipv6: 2064:100::47/128 Ethernet1: - ipv4: 10.0.0.165/31 - ipv6: fc00::14a/126 + ipv4: 10.0.0.141/31 + ipv6: fc00::11a/126 bp_interfaces: ipv4: 10.10.246.72/24 ipv6: fc0a::48/64 - ARISTA72T0: + ARISTA68T0: properties: - common bgp: - asn: 64002 + asn: 64068 peers: 65100: - - 10.0.0.166 - - fc00::14d + - 10.0.0.142 + - fc00::11d interfaces: Loopback0: - ipv4: 100.1.0.84/32 - ipv6: 2064:100::54/128 + ipv4: 100.1.0.72/32 + ipv6: 2064:100::48/128 Ethernet1: - ipv4: 10.0.0.167/31 - ipv6: fc00::14e/126 + ipv4: 10.0.0.143/31 + ipv6: fc00::11e/126 bp_interfaces: ipv4: 10.10.246.73/24 ipv6: fc0a::49/64 - ARISTA73T0: + ARISTA69T0: properties: - common bgp: - asn: 64002 + asn: 64069 peers: 65100: - - 10.0.0.168 - - fc00::151 + - 10.0.0.144 + - fc00::121 interfaces: Loopback0: - ipv4: 100.1.0.85/32 - ipv6: 2064:100::55/128 + ipv4: 100.1.0.73/32 + ipv6: 2064:100::49/128 Ethernet1: - ipv4: 10.0.0.169/31 - ipv6: fc00::152/126 + ipv4: 10.0.0.145/31 + ipv6: fc00::122/126 bp_interfaces: ipv4: 10.10.246.74/24 ipv6: fc0a::4a/64 - ARISTA74T0: + ARISTA70T0: properties: - common bgp: - asn: 64002 + asn: 64070 peers: 65100: - - 10.0.0.170 - - fc00::155 + - 10.0.0.146 + - fc00::125 interfaces: Loopback0: - ipv4: 100.1.0.86/32 - ipv6: 2064:100::56/128 + ipv4: 100.1.0.74/32 + ipv6: 2064:100::4a/128 Ethernet1: - ipv4: 10.0.0.171/31 - ipv6: fc00::156/126 + ipv4: 10.0.0.147/31 + ipv6: fc00::126/126 bp_interfaces: ipv4: 10.10.246.75/24 ipv6: fc0a::4b/64 - ARISTA75T0: + ARISTA71T0: properties: - common bgp: - asn: 64002 + asn: 64071 peers: 65100: - - 10.0.0.172 - - fc00::159 + - 10.0.0.148 + - fc00::129 interfaces: Loopback0: - ipv4: 100.1.0.87/32 - ipv6: 2064:100::57/128 + ipv4: 100.1.0.75/32 + ipv6: 2064:100::4b/128 Ethernet1: - ipv4: 10.0.0.173/31 - ipv6: fc00::15a/126 + ipv4: 10.0.0.149/31 + ipv6: fc00::12a/126 bp_interfaces: ipv4: 10.10.246.76/24 ipv6: fc0a::4c/64 - ARISTA76T0: + ARISTA72T0: properties: - common bgp: - asn: 64002 + asn: 64072 peers: 65100: - - 10.0.0.174 - - fc00::15d + - 10.0.0.150 + - fc00::12d interfaces: Loopback0: - ipv4: 100.1.0.88/32 - ipv6: 2064:100::58/128 + ipv4: 100.1.0.76/32 + ipv6: 2064:100::4c/128 Ethernet1: - ipv4: 10.0.0.175/31 - ipv6: fc00::15e/126 + ipv4: 10.0.0.151/31 + ipv6: fc00::12e/126 bp_interfaces: ipv4: 10.10.246.77/24 ipv6: fc0a::4d/64 - ARISTA77T0: + ARISTA73T0: properties: - common bgp: - asn: 64002 + asn: 64073 peers: 65100: - - 10.0.0.176 - - fc00::161 + - 10.0.0.152 + - fc00::131 interfaces: Loopback0: - ipv4: 100.1.0.89/32 - ipv6: 2064:100::59/128 + ipv4: 100.1.0.77/32 + ipv6: 2064:100::4d/128 Ethernet1: - ipv4: 10.0.0.177/31 - ipv6: fc00::162/126 + ipv4: 10.0.0.153/31 + ipv6: fc00::132/126 bp_interfaces: ipv4: 10.10.246.78/24 ipv6: fc0a::4e/64 - ARISTA78T0: + ARISTA74T0: properties: - common bgp: - asn: 64002 + asn: 64074 peers: 65100: - - 10.0.0.178 - - fc00::165 + - 10.0.0.154 + - fc00::135 interfaces: Loopback0: - ipv4: 100.1.0.90/32 - ipv6: 2064:100::5a/128 + ipv4: 100.1.0.78/32 + ipv6: 2064:100::4e/128 Ethernet1: - ipv4: 10.0.0.179/31 - ipv6: fc00::166/126 + ipv4: 10.0.0.155/31 + ipv6: fc00::136/126 bp_interfaces: ipv4: 10.10.246.79/24 ipv6: fc0a::4f/64 - ARISTA79T0: + ARISTA75T0: properties: - common bgp: - asn: 64002 + asn: 64075 peers: 65100: - - 10.0.0.180 - - fc00::169 + - 10.0.0.156 + - fc00::139 interfaces: Loopback0: - ipv4: 100.1.0.91/32 - ipv6: 2064:100::5b/128 + ipv4: 100.1.0.79/32 + ipv6: 2064:100::4f/128 Ethernet1: - ipv4: 10.0.0.181/31 - ipv6: fc00::16a/126 + ipv4: 10.0.0.157/31 + ipv6: fc00::13a/126 bp_interfaces: ipv4: 10.10.246.80/24 ipv6: fc0a::50/64 - ARISTA80T0: + ARISTA76T0: properties: - common bgp: - asn: 64002 + asn: 64076 peers: 65100: - - 10.0.0.182 - - fc00::16d + - 10.0.0.158 + - fc00::13d interfaces: Loopback0: - ipv4: 100.1.0.92/32 - ipv6: 2064:100::5c/128 + ipv4: 100.1.0.80/32 + ipv6: 2064:100::50/128 Ethernet1: - ipv4: 10.0.0.183/31 - ipv6: fc00::16e/126 + ipv4: 10.0.0.159/31 + ipv6: fc00::13e/126 bp_interfaces: ipv4: 10.10.246.81/24 ipv6: fc0a::51/64 - ARISTA81T0: + ARISTA77T0: properties: - common bgp: - asn: 64002 + asn: 64077 peers: 65100: - - 10.0.0.184 - - fc00::171 + - 10.0.0.160 + - fc00::141 interfaces: Loopback0: - ipv4: 100.1.0.93/32 - ipv6: 2064:100::5d/128 + ipv4: 100.1.0.81/32 + ipv6: 2064:100::51/128 Ethernet1: - ipv4: 10.0.0.185/31 - ipv6: fc00::172/126 + ipv4: 10.0.0.161/31 + ipv6: fc00::142/126 bp_interfaces: ipv4: 10.10.246.82/24 ipv6: fc0a::52/64 - ARISTA82T0: + ARISTA78T0: properties: - common bgp: - asn: 64002 + asn: 64078 peers: 65100: - - 10.0.0.186 - - fc00::175 + - 10.0.0.162 + - fc00::145 interfaces: Loopback0: - ipv4: 100.1.0.94/32 - ipv6: 2064:100::5e/128 + ipv4: 100.1.0.82/32 + ipv6: 2064:100::52/128 Ethernet1: - ipv4: 10.0.0.187/31 - ipv6: fc00::176/126 + ipv4: 10.0.0.163/31 + ipv6: fc00::146/126 bp_interfaces: ipv4: 10.10.246.83/24 ipv6: fc0a::53/64 - ARISTA83T0: + ARISTA79T0: properties: - common bgp: - asn: 64002 + asn: 64079 peers: 65100: - - 10.0.0.188 - - fc00::179 + - 10.0.0.164 + - fc00::149 interfaces: Loopback0: - ipv4: 100.1.0.95/32 - ipv6: 2064:100::5f/128 + ipv4: 100.1.0.83/32 + ipv6: 2064:100::53/128 Ethernet1: - ipv4: 10.0.0.189/31 - ipv6: fc00::17a/126 + ipv4: 10.0.0.165/31 + ipv6: fc00::14a/126 bp_interfaces: ipv4: 10.10.246.84/24 ipv6: fc0a::54/64 - ARISTA84T0: + ARISTA80T0: properties: - common bgp: - asn: 64002 + asn: 64080 peers: 65100: - - 10.0.0.190 - - fc00::17d + - 10.0.0.166 + - fc00::14d interfaces: Loopback0: - ipv4: 100.1.0.96/32 - ipv6: 2064:100::60/128 + ipv4: 100.1.0.84/32 + ipv6: 2064:100::54/128 Ethernet1: - ipv4: 10.0.0.191/31 - ipv6: fc00::17e/126 + ipv4: 10.0.0.167/31 + ipv6: fc00::14e/126 bp_interfaces: ipv4: 10.10.246.85/24 ipv6: fc0a::55/64 - ARISTA85T0: + ARISTA81T0: properties: - common bgp: - asn: 64002 + asn: 64081 peers: 65100: - - 10.0.0.192 - - fc00::181 + - 10.0.0.168 + - fc00::151 interfaces: Loopback0: - ipv4: 100.1.0.97/32 - ipv6: 2064:100::61/128 + ipv4: 100.1.0.85/32 + ipv6: 2064:100::55/128 Ethernet1: - ipv4: 10.0.0.193/31 - ipv6: fc00::182/126 + ipv4: 10.0.0.169/31 + ipv6: fc00::152/126 bp_interfaces: ipv4: 10.10.246.86/24 ipv6: fc0a::56/64 - ARISTA86T0: + ARISTA82T0: properties: - common bgp: - asn: 64002 + asn: 64082 peers: 65100: - - 10.0.0.194 - - fc00::185 + - 10.0.0.170 + - fc00::155 interfaces: Loopback0: - ipv4: 100.1.0.98/32 - ipv6: 2064:100::62/128 + ipv4: 100.1.0.86/32 + ipv6: 2064:100::56/128 Ethernet1: - ipv4: 10.0.0.195/31 - ipv6: fc00::186/126 + ipv4: 10.0.0.171/31 + ipv6: fc00::156/126 bp_interfaces: ipv4: 10.10.246.87/24 ipv6: fc0a::57/64 - ARISTA87T0: + ARISTA83T0: properties: - common bgp: - asn: 64002 + asn: 64083 peers: 65100: - - 10.0.0.196 - - fc00::189 + - 10.0.0.172 + - fc00::159 interfaces: Loopback0: - ipv4: 100.1.0.99/32 - ipv6: 2064:100::63/128 + ipv4: 100.1.0.87/32 + ipv6: 2064:100::57/128 Ethernet1: - ipv4: 10.0.0.197/31 - ipv6: fc00::18a/126 + ipv4: 10.0.0.173/31 + ipv6: fc00::15a/126 bp_interfaces: ipv4: 10.10.246.88/24 ipv6: fc0a::58/64 - ARISTA88T0: + ARISTA84T0: properties: - common bgp: - asn: 64002 + asn: 64084 peers: 65100: - - 10.0.0.198 - - fc00::18d + - 10.0.0.174 + - fc00::15d interfaces: Loopback0: - ipv4: 100.1.0.100/32 - ipv6: 2064:100::64/128 + ipv4: 100.1.0.88/32 + ipv6: 2064:100::58/128 Ethernet1: - ipv4: 10.0.0.199/31 - ipv6: fc00::18e/126 + ipv4: 10.0.0.175/31 + ipv6: fc00::15e/126 bp_interfaces: ipv4: 10.10.246.89/24 ipv6: fc0a::59/64 - ARISTA89T0: + ARISTA85T0: properties: - common bgp: - asn: 64002 + asn: 64085 peers: 65100: - - 10.0.0.200 - - fc00::191 + - 10.0.0.176 + - fc00::161 interfaces: Loopback0: - ipv4: 100.1.0.101/32 - ipv6: 2064:100::65/128 + ipv4: 100.1.0.89/32 + ipv6: 2064:100::59/128 Ethernet1: - ipv4: 10.0.0.201/31 - ipv6: fc00::192/126 + ipv4: 10.0.0.177/31 + ipv6: fc00::162/126 bp_interfaces: ipv4: 10.10.246.90/24 ipv6: fc0a::5a/64 - ARISTA90T0: + ARISTA86T0: properties: - common bgp: - asn: 64002 + asn: 64086 peers: 65100: - - 10.0.0.202 - - fc00::195 + - 10.0.0.178 + - fc00::165 interfaces: Loopback0: - ipv4: 100.1.0.102/32 - ipv6: 2064:100::66/128 + ipv4: 100.1.0.90/32 + ipv6: 2064:100::5a/128 Ethernet1: - ipv4: 10.0.0.203/31 - ipv6: fc00::196/126 + ipv4: 10.0.0.179/31 + ipv6: fc00::166/126 bp_interfaces: ipv4: 10.10.246.91/24 ipv6: fc0a::5b/64 - ARISTA91T0: + ARISTA87T0: properties: - common bgp: - asn: 64002 + asn: 64087 peers: 65100: - - 10.0.0.204 - - fc00::199 + - 10.0.0.180 + - fc00::169 interfaces: Loopback0: - ipv4: 100.1.0.103/32 - ipv6: 2064:100::67/128 + ipv4: 100.1.0.91/32 + ipv6: 2064:100::5b/128 Ethernet1: - ipv4: 10.0.0.205/31 - ipv6: fc00::19a/126 + ipv4: 10.0.0.181/31 + ipv6: fc00::16a/126 bp_interfaces: ipv4: 10.10.246.92/24 ipv6: fc0a::5c/64 - ARISTA92T0: + ARISTA88T0: properties: - common bgp: - asn: 64002 + asn: 64088 peers: 65100: - - 10.0.0.206 - - fc00::19d + - 10.0.0.182 + - fc00::16d interfaces: Loopback0: - ipv4: 100.1.0.104/32 - ipv6: 2064:100::68/128 + ipv4: 100.1.0.92/32 + ipv6: 2064:100::5c/128 Ethernet1: - ipv4: 10.0.0.207/31 - ipv6: fc00::19e/126 + ipv4: 10.0.0.183/31 + ipv6: fc00::16e/126 bp_interfaces: ipv4: 10.10.246.93/24 ipv6: fc0a::5d/64 - ARISTA93T0: + ARISTA89T0: properties: - common bgp: - asn: 64002 + asn: 64089 peers: 65100: - - 10.0.0.208 - - fc00::1a1 + - 10.0.0.184 + - fc00::171 interfaces: Loopback0: - ipv4: 100.1.0.105/32 - ipv6: 2064:100::69/128 + ipv4: 100.1.0.93/32 + ipv6: 2064:100::5d/128 Ethernet1: - ipv4: 10.0.0.209/31 - ipv6: fc00::1a2/126 + ipv4: 10.0.0.185/31 + ipv6: fc00::172/126 bp_interfaces: ipv4: 10.10.246.94/24 ipv6: fc0a::5e/64 - ARISTA94T0: + ARISTA90T0: properties: - common bgp: - asn: 64002 + asn: 64090 peers: 65100: - - 10.0.0.210 - - fc00::1a5 + - 10.0.0.186 + - fc00::175 interfaces: Loopback0: - ipv4: 100.1.0.106/32 - ipv6: 2064:100::6a/128 + ipv4: 100.1.0.94/32 + ipv6: 2064:100::5e/128 Ethernet1: - ipv4: 10.0.0.211/31 - ipv6: fc00::1a6/126 + ipv4: 10.0.0.187/31 + ipv6: fc00::176/126 bp_interfaces: ipv4: 10.10.246.95/24 ipv6: fc0a::5f/64 - ARISTA95T0: + ARISTA91T0: properties: - common bgp: - asn: 64002 + asn: 64091 peers: 65100: - - 10.0.0.212 - - fc00::1a9 + - 10.0.0.188 + - fc00::179 interfaces: Loopback0: - ipv4: 100.1.0.107/32 - ipv6: 2064:100::6b/128 + ipv4: 100.1.0.95/32 + ipv6: 2064:100::5f/128 Ethernet1: - ipv4: 10.0.0.213/31 - ipv6: fc00::1aa/126 + ipv4: 10.0.0.189/31 + ipv6: fc00::17a/126 bp_interfaces: ipv4: 10.10.246.96/24 ipv6: fc0a::60/64 - ARISTA96T0: + ARISTA92T0: properties: - common bgp: - asn: 64002 + asn: 64092 peers: 65100: - - 10.0.0.214 - - fc00::1ad + - 10.0.0.190 + - fc00::17d interfaces: Loopback0: - ipv4: 100.1.0.108/32 - ipv6: 2064:100::6c/128 + ipv4: 100.1.0.96/32 + ipv6: 2064:100::60/128 Ethernet1: - ipv4: 10.0.0.215/31 - ipv6: fc00::1ae/126 + ipv4: 10.0.0.191/31 + ipv6: fc00::17e/126 bp_interfaces: ipv4: 10.10.246.97/24 ipv6: fc0a::61/64 - ARISTA97T0: + ARISTA93T0: properties: - common bgp: - asn: 64002 + asn: 64093 peers: 65100: - - 10.0.0.216 - - fc00::1b1 + - 10.0.0.192 + - fc00::181 interfaces: Loopback0: - ipv4: 100.1.0.109/32 - ipv6: 2064:100::6d/128 - Ethernet1: - ipv4: 10.0.0.217/31 - ipv6: fc00::1b2/126 + ipv4: 100.1.0.97/32 + ipv6: 2064:100::61/128 + Ethernet1: + ipv4: 10.0.0.193/31 + ipv6: fc00::182/126 bp_interfaces: ipv4: 10.10.246.98/24 ipv6: fc0a::62/64 - ARISTA98T0: + ARISTA94T0: properties: - common bgp: - asn: 64002 + asn: 64094 peers: 65100: - - 10.0.0.218 - - fc00::1b5 + - 10.0.0.194 + - fc00::185 interfaces: Loopback0: - ipv4: 100.1.0.110/32 - ipv6: 2064:100::6e/128 + ipv4: 100.1.0.98/32 + ipv6: 2064:100::62/128 Ethernet1: - ipv4: 10.0.0.219/31 - ipv6: fc00::1b6/126 + ipv4: 10.0.0.195/31 + ipv6: fc00::186/126 bp_interfaces: ipv4: 10.10.246.99/24 ipv6: fc0a::63/64 - ARISTA99T0: + ARISTA95T0: properties: - common bgp: - asn: 64002 + asn: 64095 peers: 65100: - - 10.0.0.220 - - fc00::1b9 + - 10.0.0.196 + - fc00::189 interfaces: Loopback0: - ipv4: 100.1.0.111/32 - ipv6: 2064:100::6f/128 + ipv4: 100.1.0.99/32 + ipv6: 2064:100::63/128 Ethernet1: - ipv4: 10.0.0.221/31 - ipv6: fc00::1ba/126 + ipv4: 10.0.0.197/31 + ipv6: fc00::18a/126 bp_interfaces: ipv4: 10.10.246.100/24 ipv6: fc0a::64/64 - ARISTA100T0: + ARISTA96T0: properties: - common bgp: - asn: 64002 + asn: 64096 peers: 65100: - - 10.0.0.222 - - fc00::1bd + - 10.0.0.198 + - fc00::18d interfaces: Loopback0: - ipv4: 100.1.0.112/32 - ipv6: 2064:100::70/128 + ipv4: 100.1.0.100/32 + ipv6: 2064:100::64/128 Ethernet1: - ipv4: 10.0.0.223/31 - ipv6: fc00::1be/126 + ipv4: 10.0.0.199/31 + ipv6: fc00::18e/126 bp_interfaces: ipv4: 10.10.246.101/24 ipv6: fc0a::65/64 - ARISTA101T0: + ARISTA97T0: properties: - common bgp: - asn: 64002 + asn: 64097 peers: 65100: - - 10.0.0.224 - - fc00::1c1 + - 10.0.0.200 + - fc00::191 interfaces: Loopback0: - ipv4: 100.1.0.113/32 - ipv6: 2064:100::71/128 + ipv4: 100.1.0.101/32 + ipv6: 2064:100::65/128 Ethernet1: - ipv4: 10.0.0.225/31 - ipv6: fc00::1c2/126 + ipv4: 10.0.0.201/31 + ipv6: fc00::192/126 bp_interfaces: ipv4: 10.10.246.102/24 ipv6: fc0a::66/64 - ARISTA102T0: + ARISTA98T0: properties: - common bgp: - asn: 64002 + asn: 64098 peers: 65100: - - 10.0.0.226 - - fc00::1c5 + - 10.0.0.202 + - fc00::195 interfaces: Loopback0: - ipv4: 100.1.0.114/32 - ipv6: 2064:100::72/128 + ipv4: 100.1.0.102/32 + ipv6: 2064:100::66/128 Ethernet1: - ipv4: 10.0.0.227/31 - ipv6: fc00::1c6/126 + ipv4: 10.0.0.203/31 + ipv6: fc00::196/126 bp_interfaces: ipv4: 10.10.246.103/24 ipv6: fc0a::67/64 - ARISTA103T0: + ARISTA99T0: properties: - common bgp: - asn: 64002 + asn: 64099 peers: 65100: - - 10.0.0.228 - - fc00::1c9 + - 10.0.0.204 + - fc00::199 interfaces: Loopback0: - ipv4: 100.1.0.115/32 - ipv6: 2064:100::73/128 + ipv4: 100.1.0.103/32 + ipv6: 2064:100::67/128 Ethernet1: - ipv4: 10.0.0.229/31 - ipv6: fc00::1ca/126 + ipv4: 10.0.0.205/31 + ipv6: fc00::19a/126 bp_interfaces: ipv4: 10.10.246.104/24 ipv6: fc0a::68/64 - ARISTA104T0: + ARISTA100T0: properties: - common bgp: - asn: 64002 + asn: 64100 peers: 65100: - - 10.0.0.230 - - fc00::1cd + - 10.0.0.206 + - fc00::19d interfaces: Loopback0: - ipv4: 100.1.0.116/32 - ipv6: 2064:100::74/128 + ipv4: 100.1.0.104/32 + ipv6: 2064:100::68/128 Ethernet1: - ipv4: 10.0.0.231/31 - ipv6: fc00::1ce/126 + ipv4: 10.0.0.207/31 + ipv6: fc00::19e/126 bp_interfaces: ipv4: 10.10.246.105/24 ipv6: fc0a::69/64 - ARISTA105T0: + ARISTA101T0: properties: - common bgp: - asn: 64002 + asn: 64101 peers: 65100: - - 10.0.0.232 - - fc00::1d1 + - 10.0.0.208 + - fc00::1a1 interfaces: Loopback0: - ipv4: 100.1.0.117/32 - ipv6: 2064:100::75/128 + ipv4: 100.1.0.105/32 + ipv6: 2064:100::69/128 Ethernet1: - ipv4: 10.0.0.233/31 - ipv6: fc00::1d2/126 + ipv4: 10.0.0.209/31 + ipv6: fc00::1a2/126 bp_interfaces: ipv4: 10.10.246.106/24 ipv6: fc0a::6a/64 - ARISTA106T0: + ARISTA102T0: properties: - common bgp: - asn: 64002 + asn: 64102 peers: 65100: - - 10.0.0.234 - - fc00::1d5 + - 10.0.0.210 + - fc00::1a5 interfaces: Loopback0: - ipv4: 100.1.0.118/32 - ipv6: 2064:100::76/128 + ipv4: 100.1.0.106/32 + ipv6: 2064:100::6a/128 Ethernet1: - ipv4: 10.0.0.235/31 - ipv6: fc00::1d6/126 + ipv4: 10.0.0.211/31 + ipv6: fc00::1a6/126 bp_interfaces: ipv4: 10.10.246.107/24 ipv6: fc0a::6b/64 - ARISTA107T0: + ARISTA103T0: properties: - common bgp: - asn: 64002 + asn: 64103 peers: 65100: - - 10.0.0.236 - - fc00::1d9 + - 10.0.0.212 + - fc00::1a9 interfaces: Loopback0: - ipv4: 100.1.0.119/32 - ipv6: 2064:100::77/128 + ipv4: 100.1.0.107/32 + ipv6: 2064:100::6b/128 Ethernet1: - ipv4: 10.0.0.237/31 - ipv6: fc00::1da/126 + ipv4: 10.0.0.213/31 + ipv6: fc00::1aa/126 bp_interfaces: ipv4: 10.10.246.108/24 ipv6: fc0a::6c/64 - ARISTA108T0: + ARISTA104T0: properties: - common bgp: - asn: 64002 + asn: 64104 peers: 65100: - - 10.0.0.238 - - fc00::1dd + - 10.0.0.214 + - fc00::1ad interfaces: Loopback0: - ipv4: 100.1.0.120/32 - ipv6: 2064:100::78/128 + ipv4: 100.1.0.108/32 + ipv6: 2064:100::6c/128 Ethernet1: - ipv4: 10.0.0.239/31 - ipv6: fc00::1de/126 + ipv4: 10.0.0.215/31 + ipv6: fc00::1ae/126 bp_interfaces: ipv4: 10.10.246.109/24 ipv6: fc0a::6d/64 - ARISTA109T0: + ARISTA105T0: properties: - common bgp: - asn: 64002 + asn: 64105 peers: 65100: - - 10.0.0.240 - - fc00::1e1 + - 10.0.0.216 + - fc00::1b1 interfaces: Loopback0: - ipv4: 100.1.0.121/32 - ipv6: 2064:100::79/128 + ipv4: 100.1.0.109/32 + ipv6: 2064:100::6d/128 Ethernet1: - ipv4: 10.0.0.241/31 - ipv6: fc00::1e2/126 + ipv4: 10.0.0.217/31 + ipv6: fc00::1b2/126 bp_interfaces: ipv4: 10.10.246.110/24 ipv6: fc0a::6e/64 - ARISTA110T0: + ARISTA106T0: properties: - common bgp: - asn: 64002 + asn: 64106 peers: 65100: - - 10.0.0.242 - - fc00::1e5 + - 10.0.0.218 + - fc00::1b5 interfaces: Loopback0: - ipv4: 100.1.0.122/32 - ipv6: 2064:100::7a/128 + ipv4: 100.1.0.110/32 + ipv6: 2064:100::6e/128 Ethernet1: - ipv4: 10.0.0.243/31 - ipv6: fc00::1e6/126 + ipv4: 10.0.0.219/31 + ipv6: fc00::1b6/126 bp_interfaces: ipv4: 10.10.246.111/24 ipv6: fc0a::6f/64 - ARISTA111T0: + ARISTA107T0: properties: - common bgp: - asn: 64002 + asn: 64107 peers: 65100: - - 10.0.0.244 - - fc00::1e9 + - 10.0.0.220 + - fc00::1b9 interfaces: Loopback0: - ipv4: 100.1.0.123/32 - ipv6: 2064:100::7b/128 + ipv4: 100.1.0.111/32 + ipv6: 2064:100::6f/128 Ethernet1: - ipv4: 10.0.0.245/31 - ipv6: fc00::1ea/126 + ipv4: 10.0.0.221/31 + ipv6: fc00::1ba/126 bp_interfaces: ipv4: 10.10.246.112/24 ipv6: fc0a::70/64 - ARISTA112T0: + ARISTA108T0: properties: - common bgp: - asn: 64002 + asn: 64108 peers: 65100: - - 10.0.0.246 - - fc00::1ed + - 10.0.0.222 + - fc00::1bd interfaces: Loopback0: - ipv4: 100.1.0.124/32 - ipv6: 2064:100::7c/128 + ipv4: 100.1.0.112/32 + ipv6: 2064:100::70/128 Ethernet1: - ipv4: 10.0.0.247/31 - ipv6: fc00::1ee/126 + ipv4: 10.0.0.223/31 + ipv6: fc00::1be/126 bp_interfaces: ipv4: 10.10.246.113/24 ipv6: fc0a::71/64 - ARISTA113T0: + ARISTA109T0: properties: - common bgp: - asn: 64002 + asn: 64109 peers: 65100: - - 10.0.0.248 - - fc00::1f1 + - 10.0.0.224 + - fc00::1c1 interfaces: Loopback0: - ipv4: 100.1.0.125/32 - ipv6: 2064:100::7d/128 + ipv4: 100.1.0.113/32 + ipv6: 2064:100::71/128 Ethernet1: - ipv4: 10.0.0.249/31 - ipv6: fc00::1f2/126 + ipv4: 10.0.0.225/31 + ipv6: fc00::1c2/126 bp_interfaces: ipv4: 10.10.246.114/24 ipv6: fc0a::72/64 - ARISTA114T0: + ARISTA110T0: properties: - common bgp: - asn: 64002 + asn: 64110 peers: 65100: - - 10.0.0.250 - - fc00::1f5 + - 10.0.0.226 + - fc00::1c5 interfaces: Loopback0: - ipv4: 100.1.0.126/32 - ipv6: 2064:100::7e/128 + ipv4: 100.1.0.114/32 + ipv6: 2064:100::72/128 Ethernet1: - ipv4: 10.0.0.251/31 - ipv6: fc00::1f6/126 + ipv4: 10.0.0.227/31 + ipv6: fc00::1c6/126 bp_interfaces: ipv4: 10.10.246.115/24 ipv6: fc0a::73/64 - ARISTA115T0: + ARISTA111T0: properties: - common bgp: - asn: 64002 + asn: 64111 peers: 65100: - - 10.0.0.252 - - fc00::1f9 + - 10.0.0.228 + - fc00::1c9 interfaces: Loopback0: - ipv4: 100.1.0.127/32 - ipv6: 2064:100::7f/128 + ipv4: 100.1.0.115/32 + ipv6: 2064:100::73/128 Ethernet1: - ipv4: 10.0.0.253/31 - ipv6: fc00::1fa/126 + ipv4: 10.0.0.229/31 + ipv6: fc00::1ca/126 bp_interfaces: ipv4: 10.10.246.116/24 ipv6: fc0a::74/64 - ARISTA116T0: + ARISTA112T0: properties: - common bgp: - asn: 64002 + asn: 64112 peers: 65100: - - 10.0.0.254 - - fc00::1fd + - 10.0.0.230 + - fc00::1cd interfaces: Loopback0: - ipv4: 100.1.0.128/32 - ipv6: 2064:100::80/128 + ipv4: 100.1.0.116/32 + ipv6: 2064:100::74/128 Ethernet1: - ipv4: 10.0.0.255/31 - ipv6: fc00::1fe/126 + ipv4: 10.0.0.231/31 + ipv6: fc00::1ce/126 bp_interfaces: ipv4: 10.10.246.117/24 ipv6: fc0a::75/64 - ARISTA117T0: + ARISTA113T0: properties: - common bgp: - asn: 64002 + asn: 64113 peers: 65100: - - 10.0.1.0 - - fc00::201 + - 10.0.0.232 + - fc00::1d1 interfaces: Loopback0: - ipv4: 100.1.0.129/32 - ipv6: 2064:100::81/128 + ipv4: 100.1.0.117/32 + ipv6: 2064:100::75/128 Ethernet1: - ipv4: 10.0.1.1/31 - ipv6: fc00::202/126 + ipv4: 10.0.0.233/31 + ipv6: fc00::1d2/126 bp_interfaces: ipv4: 10.10.246.118/24 ipv6: fc0a::76/64 - ARISTA118T0: + ARISTA114T0: properties: - common bgp: - asn: 64002 + asn: 64114 peers: 65100: - - 10.0.1.2 - - fc00::205 + - 10.0.0.234 + - fc00::1d5 interfaces: Loopback0: - ipv4: 100.1.0.130/32 - ipv6: 2064:100::82/128 + ipv4: 100.1.0.118/32 + ipv6: 2064:100::76/128 Ethernet1: - ipv4: 10.0.1.3/31 - ipv6: fc00::206/126 + ipv4: 10.0.0.235/31 + ipv6: fc00::1d6/126 bp_interfaces: ipv4: 10.10.246.119/24 ipv6: fc0a::77/64 - ARISTA119T0: + ARISTA115T0: properties: - common bgp: - asn: 64002 + asn: 64115 peers: 65100: - - 10.0.1.4 - - fc00::209 + - 10.0.0.236 + - fc00::1d9 interfaces: Loopback0: - ipv4: 100.1.0.131/32 - ipv6: 2064:100::83/128 + ipv4: 100.1.0.119/32 + ipv6: 2064:100::77/128 Ethernet1: - ipv4: 10.0.1.5/31 - ipv6: fc00::20a/126 + ipv4: 10.0.0.237/31 + ipv6: fc00::1da/126 bp_interfaces: ipv4: 10.10.246.120/24 ipv6: fc0a::78/64 - ARISTA120T0: + ARISTA116T0: properties: - common bgp: - asn: 64002 + asn: 64116 peers: 65100: - - 10.0.1.6 - - fc00::20d + - 10.0.0.238 + - fc00::1dd interfaces: Loopback0: - ipv4: 100.1.0.132/32 - ipv6: 2064:100::84/128 + ipv4: 100.1.0.120/32 + ipv6: 2064:100::78/128 Ethernet1: - ipv4: 10.0.1.7/31 - ipv6: fc00::20e/126 + ipv4: 10.0.0.239/31 + ipv6: fc00::1de/126 bp_interfaces: ipv4: 10.10.246.121/24 ipv6: fc0a::79/64 - ARISTA121T0: + ARISTA117T0: properties: - common bgp: - asn: 64002 + asn: 64117 peers: 65100: - - 10.0.1.8 - - fc00::211 + - 10.0.0.240 + - fc00::1e1 interfaces: Loopback0: - ipv4: 100.1.0.133/32 - ipv6: 2064:100::85/128 + ipv4: 100.1.0.121/32 + ipv6: 2064:100::79/128 Ethernet1: - ipv4: 10.0.1.9/31 - ipv6: fc00::212/126 + ipv4: 10.0.0.241/31 + ipv6: fc00::1e2/126 bp_interfaces: ipv4: 10.10.246.122/24 ipv6: fc0a::7a/64 - ARISTA122T0: + ARISTA118T0: properties: - common bgp: - asn: 64002 + asn: 64118 peers: 65100: - - 10.0.1.10 - - fc00::215 + - 10.0.0.242 + - fc00::1e5 interfaces: Loopback0: - ipv4: 100.1.0.134/32 - ipv6: 2064:100::86/128 + ipv4: 100.1.0.122/32 + ipv6: 2064:100::7a/128 Ethernet1: - ipv4: 10.0.1.11/31 - ipv6: fc00::216/126 + ipv4: 10.0.0.243/31 + ipv6: fc00::1e6/126 bp_interfaces: ipv4: 10.10.246.123/24 ipv6: fc0a::7b/64 - ARISTA123T0: + ARISTA119T0: properties: - common bgp: - asn: 64002 + asn: 64119 peers: 65100: - - 10.0.1.12 - - fc00::219 + - 10.0.0.244 + - fc00::1e9 interfaces: Loopback0: - ipv4: 100.1.0.135/32 - ipv6: 2064:100::87/128 + ipv4: 100.1.0.123/32 + ipv6: 2064:100::7b/128 Ethernet1: - ipv4: 10.0.1.13/31 - ipv6: fc00::21a/126 + ipv4: 10.0.0.245/31 + ipv6: fc00::1ea/126 bp_interfaces: ipv4: 10.10.246.124/24 ipv6: fc0a::7c/64 - ARISTA124T0: + ARISTA120T0: properties: - common bgp: - asn: 64002 + asn: 64120 peers: 65100: - - 10.0.1.14 - - fc00::21d + - 10.0.0.246 + - fc00::1ed interfaces: Loopback0: - ipv4: 100.1.0.136/32 - ipv6: 2064:100::88/128 + ipv4: 100.1.0.124/32 + ipv6: 2064:100::7c/128 Ethernet1: - ipv4: 10.0.1.15/31 - ipv6: fc00::21e/126 + ipv4: 10.0.0.247/31 + ipv6: fc00::1ee/126 bp_interfaces: ipv4: 10.10.246.125/24 ipv6: fc0a::7d/64 - ARISTA125T0: + ARISTA121T0: properties: - common bgp: - asn: 64002 + asn: 64121 peers: 65100: - - 10.0.1.16 - - fc00::221 + - 10.0.0.248 + - fc00::1f1 interfaces: Loopback0: - ipv4: 100.1.0.137/32 - ipv6: 2064:100::89/128 + ipv4: 100.1.0.125/32 + ipv6: 2064:100::7d/128 Ethernet1: - ipv4: 10.0.1.17/31 - ipv6: fc00::222/126 + ipv4: 10.0.0.249/31 + ipv6: fc00::1f2/126 bp_interfaces: ipv4: 10.10.246.126/24 ipv6: fc0a::7e/64 - ARISTA126T0: + ARISTA122T0: properties: - common bgp: - asn: 64002 + asn: 64122 peers: 65100: - - 10.0.1.18 - - fc00::225 + - 10.0.0.250 + - fc00::1f5 interfaces: Loopback0: - ipv4: 100.1.0.138/32 - ipv6: 2064:100::8a/128 + ipv4: 100.1.0.126/32 + ipv6: 2064:100::7e/128 Ethernet1: - ipv4: 10.0.1.19/31 - ipv6: fc00::226/126 + ipv4: 10.0.0.251/31 + ipv6: fc00::1f6/126 bp_interfaces: ipv4: 10.10.246.127/24 ipv6: fc0a::7f/64 - ARISTA127T0: + ARISTA123T0: properties: - common bgp: - asn: 64002 + asn: 64123 peers: 65100: - - 10.0.1.20 - - fc00::229 + - 10.0.0.252 + - fc00::1f9 interfaces: Loopback0: - ipv4: 100.1.0.139/32 - ipv6: 2064:100::8b/128 + ipv4: 100.1.0.127/32 + ipv6: 2064:100::7f/128 Ethernet1: - ipv4: 10.0.1.21/31 - ipv6: fc00::22a/126 + ipv4: 10.0.0.253/31 + ipv6: fc00::1fa/126 bp_interfaces: ipv4: 10.10.246.128/24 ipv6: fc0a::80/64 - ARISTA128T0: + ARISTA124T0: properties: - common bgp: - asn: 64002 + asn: 64124 peers: 65100: - - 10.0.1.22 - - fc00::22d + - 10.0.0.254 + - fc00::1fd interfaces: Loopback0: - ipv4: 100.1.0.140/32 - ipv6: 2064:100::8c/128 + ipv4: 100.1.0.128/32 + ipv6: 2064:100::80/128 Ethernet1: - ipv4: 10.0.1.23/31 - ipv6: fc00::22e/126 + ipv4: 10.0.0.255/31 + ipv6: fc00::1fe/126 bp_interfaces: ipv4: 10.10.246.129/24 ipv6: fc0a::81/64 - ARISTA129T0: + ARISTA125T0: properties: - common bgp: - asn: 64002 + asn: 64125 peers: 65100: - - 10.0.1.24 - - fc00::231 + - 10.0.1.0 + - fc00::201 interfaces: Loopback0: - ipv4: 100.1.0.141/32 - ipv6: 2064:100::8d/128 + ipv4: 100.1.0.129/32 + ipv6: 2064:100::81/128 Ethernet1: - ipv4: 10.0.1.25/31 - ipv6: fc00::232/126 + ipv4: 10.0.1.1/31 + ipv6: fc00::202/126 bp_interfaces: ipv4: 10.10.246.130/24 ipv6: fc0a::82/64 - ARISTA130T0: + ARISTA126T0: properties: - common bgp: - asn: 64002 + asn: 64126 peers: 65100: - - 10.0.1.26 - - fc00::235 + - 10.0.1.2 + - fc00::205 interfaces: Loopback0: - ipv4: 100.1.0.142/32 - ipv6: 2064:100::8e/128 + ipv4: 100.1.0.130/32 + ipv6: 2064:100::82/128 Ethernet1: - ipv4: 10.0.1.27/31 - ipv6: fc00::236/126 + ipv4: 10.0.1.3/31 + ipv6: fc00::206/126 bp_interfaces: ipv4: 10.10.246.131/24 ipv6: fc0a::83/64 - ARISTA131T0: + ARISTA127T0: properties: - common bgp: - asn: 64002 + asn: 64127 peers: 65100: - - 10.0.1.28 - - fc00::239 + - 10.0.1.4 + - fc00::209 interfaces: Loopback0: - ipv4: 100.1.0.143/32 - ipv6: 2064:100::8f/128 - Ethernet1: - ipv4: 10.0.1.29/31 - ipv6: fc00::23a/126 + ipv4: 100.1.0.131/32 + ipv6: 2064:100::83/128 + Ethernet1: + ipv4: 10.0.1.5/31 + ipv6: fc00::20a/126 bp_interfaces: ipv4: 10.10.246.132/24 ipv6: fc0a::84/64 - ARISTA132T0: + ARISTA128T0: properties: - common bgp: - asn: 64002 + asn: 64128 peers: 65100: - - 10.0.1.30 - - fc00::23d + - 10.0.1.6 + - fc00::20d interfaces: Loopback0: - ipv4: 100.1.0.144/32 - ipv6: 2064:100::90/128 + ipv4: 100.1.0.132/32 + ipv6: 2064:100::84/128 Ethernet1: - ipv4: 10.0.1.31/31 - ipv6: fc00::23e/126 + ipv4: 10.0.1.7/31 + ipv6: fc00::20e/126 bp_interfaces: ipv4: 10.10.246.133/24 ipv6: fc0a::85/64 - ARISTA133T0: + ARISTA129T0: properties: - common bgp: - asn: 64002 + asn: 64129 peers: 65100: - - 10.0.1.32 - - fc00::241 + - 10.0.1.8 + - fc00::211 interfaces: Loopback0: - ipv4: 100.1.0.145/32 - ipv6: 2064:100::91/128 + ipv4: 100.1.0.133/32 + ipv6: 2064:100::85/128 Ethernet1: - ipv4: 10.0.1.33/31 - ipv6: fc00::242/126 + ipv4: 10.0.1.9/31 + ipv6: fc00::212/126 bp_interfaces: ipv4: 10.10.246.134/24 ipv6: fc0a::86/64 - ARISTA134T0: + ARISTA130T0: properties: - common bgp: - asn: 64002 + asn: 64130 peers: 65100: - - 10.0.1.34 - - fc00::245 + - 10.0.1.10 + - fc00::215 interfaces: Loopback0: - ipv4: 100.1.0.146/32 - ipv6: 2064:100::92/128 + ipv4: 100.1.0.134/32 + ipv6: 2064:100::86/128 Ethernet1: - ipv4: 10.0.1.35/31 - ipv6: fc00::246/126 + ipv4: 10.0.1.11/31 + ipv6: fc00::216/126 bp_interfaces: ipv4: 10.10.246.135/24 ipv6: fc0a::87/64 - ARISTA135T0: + ARISTA131T0: properties: - common bgp: - asn: 64002 + asn: 64131 peers: 65100: - - 10.0.1.36 - - fc00::249 + - 10.0.1.12 + - fc00::219 interfaces: Loopback0: - ipv4: 100.1.0.147/32 - ipv6: 2064:100::93/128 + ipv4: 100.1.0.135/32 + ipv6: 2064:100::87/128 Ethernet1: - ipv4: 10.0.1.37/31 - ipv6: fc00::24a/126 + ipv4: 10.0.1.13/31 + ipv6: fc00::21a/126 bp_interfaces: ipv4: 10.10.246.136/24 ipv6: fc0a::88/64 - ARISTA136T0: + ARISTA132T0: properties: - common bgp: - asn: 64002 + asn: 64132 peers: 65100: - - 10.0.1.38 - - fc00::24d + - 10.0.1.14 + - fc00::21d interfaces: Loopback0: - ipv4: 100.1.0.148/32 - ipv6: 2064:100::94/128 + ipv4: 100.1.0.136/32 + ipv6: 2064:100::88/128 Ethernet1: - ipv4: 10.0.1.39/31 - ipv6: fc00::24e/126 + ipv4: 10.0.1.15/31 + ipv6: fc00::21e/126 bp_interfaces: ipv4: 10.10.246.137/24 ipv6: fc0a::89/64 - ARISTA137T0: + ARISTA133T0: properties: - common bgp: - asn: 64002 + asn: 64133 peers: 65100: - - 10.0.1.40 - - fc00::251 + - 10.0.1.16 + - fc00::221 interfaces: Loopback0: - ipv4: 100.1.0.149/32 - ipv6: 2064:100::95/128 + ipv4: 100.1.0.137/32 + ipv6: 2064:100::89/128 Ethernet1: - ipv4: 10.0.1.41/31 - ipv6: fc00::252/126 + ipv4: 10.0.1.17/31 + ipv6: fc00::222/126 bp_interfaces: ipv4: 10.10.246.138/24 ipv6: fc0a::8a/64 - ARISTA138T0: + ARISTA134T0: properties: - common bgp: - asn: 64002 + asn: 64134 peers: 65100: - - 10.0.1.42 - - fc00::255 + - 10.0.1.18 + - fc00::225 interfaces: Loopback0: - ipv4: 100.1.0.150/32 - ipv6: 2064:100::96/128 + ipv4: 100.1.0.138/32 + ipv6: 2064:100::8a/128 Ethernet1: - ipv4: 10.0.1.43/31 - ipv6: fc00::256/126 + ipv4: 10.0.1.19/31 + ipv6: fc00::226/126 bp_interfaces: ipv4: 10.10.246.139/24 ipv6: fc0a::8b/64 - ARISTA139T0: + ARISTA135T0: properties: - common bgp: - asn: 64002 + asn: 64135 peers: 65100: - - 10.0.1.44 - - fc00::259 + - 10.0.1.20 + - fc00::229 interfaces: Loopback0: - ipv4: 100.1.0.151/32 - ipv6: 2064:100::97/128 + ipv4: 100.1.0.139/32 + ipv6: 2064:100::8b/128 Ethernet1: - ipv4: 10.0.1.45/31 - ipv6: fc00::25a/126 + ipv4: 10.0.1.21/31 + ipv6: fc00::22a/126 bp_interfaces: ipv4: 10.10.246.140/24 ipv6: fc0a::8c/64 - ARISTA140T0: + ARISTA136T0: properties: - common bgp: - asn: 64002 + asn: 64136 peers: 65100: - - 10.0.1.46 - - fc00::25d + - 10.0.1.22 + - fc00::22d interfaces: Loopback0: - ipv4: 100.1.0.152/32 - ipv6: 2064:100::98/128 + ipv4: 100.1.0.140/32 + ipv6: 2064:100::8c/128 Ethernet1: - ipv4: 10.0.1.47/31 - ipv6: fc00::25e/126 + ipv4: 10.0.1.23/31 + ipv6: fc00::22e/126 bp_interfaces: ipv4: 10.10.246.141/24 ipv6: fc0a::8d/64 - ARISTA141T0: + ARISTA137T0: properties: - common bgp: - asn: 64002 + asn: 64137 peers: 65100: - - 10.0.1.48 - - fc00::261 + - 10.0.1.24 + - fc00::231 interfaces: Loopback0: - ipv4: 100.1.0.153/32 - ipv6: 2064:100::99/128 + ipv4: 100.1.0.141/32 + ipv6: 2064:100::8d/128 Ethernet1: - ipv4: 10.0.1.49/31 - ipv6: fc00::262/126 + ipv4: 10.0.1.25/31 + ipv6: fc00::232/126 bp_interfaces: ipv4: 10.10.246.142/24 ipv6: fc0a::8e/64 - ARISTA142T0: + ARISTA138T0: properties: - common bgp: - asn: 64002 + asn: 64138 peers: 65100: - - 10.0.1.50 - - fc00::265 + - 10.0.1.26 + - fc00::235 interfaces: Loopback0: - ipv4: 100.1.0.154/32 - ipv6: 2064:100::9a/128 + ipv4: 100.1.0.142/32 + ipv6: 2064:100::8e/128 Ethernet1: - ipv4: 10.0.1.51/31 - ipv6: fc00::266/126 + ipv4: 10.0.1.27/31 + ipv6: fc00::236/126 bp_interfaces: ipv4: 10.10.246.143/24 ipv6: fc0a::8f/64 - ARISTA143T0: + ARISTA139T0: properties: - common bgp: - asn: 64002 + asn: 64139 peers: 65100: - - 10.0.1.52 - - fc00::269 + - 10.0.1.28 + - fc00::239 interfaces: Loopback0: - ipv4: 100.1.0.155/32 - ipv6: 2064:100::9b/128 + ipv4: 100.1.0.143/32 + ipv6: 2064:100::8f/128 Ethernet1: - ipv4: 10.0.1.53/31 - ipv6: fc00::26a/126 + ipv4: 10.0.1.29/31 + ipv6: fc00::23a/126 bp_interfaces: ipv4: 10.10.246.144/24 ipv6: fc0a::90/64 - ARISTA144T0: + ARISTA140T0: properties: - common bgp: - asn: 64002 + asn: 64140 peers: 65100: - - 10.0.1.54 - - fc00::26d + - 10.0.1.30 + - fc00::23d interfaces: Loopback0: - ipv4: 100.1.0.156/32 - ipv6: 2064:100::9c/128 + ipv4: 100.1.0.144/32 + ipv6: 2064:100::90/128 Ethernet1: - ipv4: 10.0.1.55/31 - ipv6: fc00::26e/126 + ipv4: 10.0.1.31/31 + ipv6: fc00::23e/126 bp_interfaces: ipv4: 10.10.246.145/24 ipv6: fc0a::91/64 - ARISTA145T0: + ARISTA141T0: properties: - common bgp: - asn: 64002 + asn: 64141 peers: 65100: - - 10.0.1.56 - - fc00::271 + - 10.0.1.32 + - fc00::241 interfaces: Loopback0: - ipv4: 100.1.0.157/32 - ipv6: 2064:100::9d/128 + ipv4: 100.1.0.145/32 + ipv6: 2064:100::91/128 Ethernet1: - ipv4: 10.0.1.57/31 - ipv6: fc00::272/126 + ipv4: 10.0.1.33/31 + ipv6: fc00::242/126 bp_interfaces: ipv4: 10.10.246.146/24 ipv6: fc0a::92/64 - ARISTA146T0: + ARISTA142T0: properties: - common bgp: - asn: 64002 + asn: 64142 peers: 65100: - - 10.0.1.58 - - fc00::275 + - 10.0.1.34 + - fc00::245 interfaces: Loopback0: - ipv4: 100.1.0.158/32 - ipv6: 2064:100::9e/128 + ipv4: 100.1.0.146/32 + ipv6: 2064:100::92/128 Ethernet1: - ipv4: 10.0.1.59/31 - ipv6: fc00::276/126 + ipv4: 10.0.1.35/31 + ipv6: fc00::246/126 bp_interfaces: ipv4: 10.10.246.147/24 ipv6: fc0a::93/64 - ARISTA147T0: + ARISTA143T0: properties: - common bgp: - asn: 64002 + asn: 64143 peers: 65100: - - 10.0.1.60 - - fc00::279 + - 10.0.1.36 + - fc00::249 interfaces: Loopback0: - ipv4: 100.1.0.159/32 - ipv6: 2064:100::9f/128 + ipv4: 100.1.0.147/32 + ipv6: 2064:100::93/128 Ethernet1: - ipv4: 10.0.1.61/31 - ipv6: fc00::27a/126 + ipv4: 10.0.1.37/31 + ipv6: fc00::24a/126 bp_interfaces: ipv4: 10.10.246.148/24 ipv6: fc0a::94/64 - ARISTA148T0: + ARISTA144T0: properties: - common bgp: - asn: 64002 + asn: 64144 peers: 65100: - - 10.0.1.62 - - fc00::27d + - 10.0.1.38 + - fc00::24d interfaces: Loopback0: - ipv4: 100.1.0.160/32 - ipv6: 2064:100::a0/128 + ipv4: 100.1.0.148/32 + ipv6: 2064:100::94/128 Ethernet1: - ipv4: 10.0.1.63/31 - ipv6: fc00::27e/126 + ipv4: 10.0.1.39/31 + ipv6: fc00::24e/126 bp_interfaces: ipv4: 10.10.246.149/24 ipv6: fc0a::95/64 - ARISTA149T0: + ARISTA145T0: properties: - common bgp: - asn: 64002 + asn: 64145 peers: 65100: - - 10.0.1.64 - - fc00::281 + - 10.0.1.40 + - fc00::251 interfaces: Loopback0: - ipv4: 100.1.0.161/32 - ipv6: 2064:100::a1/128 + ipv4: 100.1.0.149/32 + ipv6: 2064:100::95/128 Ethernet1: - ipv4: 10.0.1.65/31 - ipv6: fc00::282/126 + ipv4: 10.0.1.41/31 + ipv6: fc00::252/126 bp_interfaces: ipv4: 10.10.246.150/24 ipv6: fc0a::96/64 - ARISTA150T0: + ARISTA146T0: properties: - common bgp: - asn: 64002 + asn: 64146 peers: 65100: - - 10.0.1.66 - - fc00::285 + - 10.0.1.42 + - fc00::255 interfaces: Loopback0: - ipv4: 100.1.0.162/32 - ipv6: 2064:100::a2/128 + ipv4: 100.1.0.150/32 + ipv6: 2064:100::96/128 Ethernet1: - ipv4: 10.0.1.67/31 - ipv6: fc00::286/126 + ipv4: 10.0.1.43/31 + ipv6: fc00::256/126 bp_interfaces: ipv4: 10.10.246.151/24 ipv6: fc0a::97/64 - ARISTA151T0: + ARISTA147T0: properties: - common bgp: - asn: 64002 + asn: 64147 peers: 65100: - - 10.0.1.68 - - fc00::289 + - 10.0.1.44 + - fc00::259 interfaces: Loopback0: - ipv4: 100.1.0.163/32 - ipv6: 2064:100::a3/128 + ipv4: 100.1.0.151/32 + ipv6: 2064:100::97/128 Ethernet1: - ipv4: 10.0.1.69/31 - ipv6: fc00::28a/126 + ipv4: 10.0.1.45/31 + ipv6: fc00::25a/126 bp_interfaces: ipv4: 10.10.246.152/24 ipv6: fc0a::98/64 - ARISTA152T0: + ARISTA148T0: properties: - common bgp: - asn: 64002 + asn: 64148 peers: 65100: - - 10.0.1.70 - - fc00::28d + - 10.0.1.46 + - fc00::25d interfaces: Loopback0: - ipv4: 100.1.0.164/32 - ipv6: 2064:100::a4/128 + ipv4: 100.1.0.152/32 + ipv6: 2064:100::98/128 Ethernet1: - ipv4: 10.0.1.71/31 - ipv6: fc00::28e/126 + ipv4: 10.0.1.47/31 + ipv6: fc00::25e/126 bp_interfaces: ipv4: 10.10.246.153/24 ipv6: fc0a::99/64 - ARISTA153T0: + ARISTA149T0: properties: - common bgp: - asn: 64002 + asn: 64149 peers: 65100: - - 10.0.1.72 - - fc00::291 + - 10.0.1.48 + - fc00::261 interfaces: Loopback0: - ipv4: 100.1.0.165/32 - ipv6: 2064:100::a5/128 + ipv4: 100.1.0.153/32 + ipv6: 2064:100::99/128 Ethernet1: - ipv4: 10.0.1.73/31 - ipv6: fc00::292/126 + ipv4: 10.0.1.49/31 + ipv6: fc00::262/126 bp_interfaces: ipv4: 10.10.246.154/24 ipv6: fc0a::9a/64 - ARISTA154T0: + ARISTA150T0: properties: - common bgp: - asn: 64002 + asn: 64150 peers: 65100: - - 10.0.1.74 - - fc00::295 + - 10.0.1.50 + - fc00::265 interfaces: Loopback0: - ipv4: 100.1.0.166/32 - ipv6: 2064:100::a6/128 + ipv4: 100.1.0.154/32 + ipv6: 2064:100::9a/128 Ethernet1: - ipv4: 10.0.1.75/31 - ipv6: fc00::296/126 + ipv4: 10.0.1.51/31 + ipv6: fc00::266/126 bp_interfaces: ipv4: 10.10.246.155/24 ipv6: fc0a::9b/64 - ARISTA155T0: + ARISTA151T0: properties: - common bgp: - asn: 64002 + asn: 64151 peers: 65100: - - 10.0.1.76 - - fc00::299 + - 10.0.1.52 + - fc00::269 interfaces: Loopback0: - ipv4: 100.1.0.167/32 - ipv6: 2064:100::a7/128 + ipv4: 100.1.0.155/32 + ipv6: 2064:100::9b/128 Ethernet1: - ipv4: 10.0.1.77/31 - ipv6: fc00::29a/126 + ipv4: 10.0.1.53/31 + ipv6: fc00::26a/126 bp_interfaces: ipv4: 10.10.246.156/24 ipv6: fc0a::9c/64 - ARISTA156T0: + ARISTA152T0: properties: - common bgp: - asn: 64002 + asn: 64152 peers: 65100: - - 10.0.1.78 - - fc00::29d + - 10.0.1.54 + - fc00::26d interfaces: Loopback0: - ipv4: 100.1.0.168/32 - ipv6: 2064:100::a8/128 + ipv4: 100.1.0.156/32 + ipv6: 2064:100::9c/128 Ethernet1: - ipv4: 10.0.1.79/31 - ipv6: fc00::29e/126 + ipv4: 10.0.1.55/31 + ipv6: fc00::26e/126 bp_interfaces: ipv4: 10.10.246.157/24 ipv6: fc0a::9d/64 - ARISTA157T0: + ARISTA153T0: properties: - common bgp: - asn: 64002 + asn: 64153 peers: 65100: - - 10.0.1.80 - - fc00::2a1 + - 10.0.1.56 + - fc00::271 interfaces: Loopback0: - ipv4: 100.1.0.169/32 - ipv6: 2064:100::a9/128 + ipv4: 100.1.0.157/32 + ipv6: 2064:100::9d/128 Ethernet1: - ipv4: 10.0.1.81/31 - ipv6: fc00::2a2/126 + ipv4: 10.0.1.57/31 + ipv6: fc00::272/126 bp_interfaces: ipv4: 10.10.246.158/24 ipv6: fc0a::9e/64 - ARISTA158T0: + ARISTA154T0: properties: - common bgp: - asn: 64002 + asn: 64154 peers: 65100: - - 10.0.1.82 - - fc00::2a5 + - 10.0.1.58 + - fc00::275 interfaces: Loopback0: - ipv4: 100.1.0.170/32 - ipv6: 2064:100::aa/128 + ipv4: 100.1.0.158/32 + ipv6: 2064:100::9e/128 Ethernet1: - ipv4: 10.0.1.83/31 - ipv6: fc00::2a6/126 + ipv4: 10.0.1.59/31 + ipv6: fc00::276/126 bp_interfaces: ipv4: 10.10.246.159/24 ipv6: fc0a::9f/64 - ARISTA159T0: + ARISTA155T0: properties: - common bgp: - asn: 64002 + asn: 64155 peers: 65100: - - 10.0.1.84 - - fc00::2a9 + - 10.0.1.60 + - fc00::279 interfaces: Loopback0: - ipv4: 100.1.0.171/32 - ipv6: 2064:100::ab/128 + ipv4: 100.1.0.159/32 + ipv6: 2064:100::9f/128 Ethernet1: - ipv4: 10.0.1.85/31 - ipv6: fc00::2aa/126 + ipv4: 10.0.1.61/31 + ipv6: fc00::27a/126 bp_interfaces: ipv4: 10.10.246.160/24 ipv6: fc0a::a0/64 - ARISTA160T0: + ARISTA156T0: properties: - common bgp: - asn: 64002 + asn: 64156 peers: 65100: - - 10.0.1.86 - - fc00::2ad + - 10.0.1.62 + - fc00::27d interfaces: Loopback0: - ipv4: 100.1.0.172/32 - ipv6: 2064:100::ac/128 + ipv4: 100.1.0.160/32 + ipv6: 2064:100::a0/128 Ethernet1: - ipv4: 10.0.1.87/31 - ipv6: fc00::2ae/126 + ipv4: 10.0.1.63/31 + ipv6: fc00::27e/126 bp_interfaces: ipv4: 10.10.246.161/24 ipv6: fc0a::a1/64 - ARISTA161T0: + ARISTA157T0: properties: - common bgp: - asn: 64002 + asn: 64157 peers: 65100: - - 10.0.1.88 - - fc00::2b1 + - 10.0.1.64 + - fc00::281 interfaces: Loopback0: - ipv4: 100.1.0.173/32 - ipv6: 2064:100::ad/128 + ipv4: 100.1.0.161/32 + ipv6: 2064:100::a1/128 Ethernet1: - ipv4: 10.0.1.89/31 - ipv6: fc00::2b2/126 + ipv4: 10.0.1.65/31 + ipv6: fc00::282/126 bp_interfaces: ipv4: 10.10.246.162/24 ipv6: fc0a::a2/64 - ARISTA162T0: + ARISTA158T0: properties: - common bgp: - asn: 64002 + asn: 64158 peers: 65100: - - 10.0.1.90 - - fc00::2b5 + - 10.0.1.66 + - fc00::285 interfaces: Loopback0: - ipv4: 100.1.0.174/32 - ipv6: 2064:100::ae/128 + ipv4: 100.1.0.162/32 + ipv6: 2064:100::a2/128 Ethernet1: - ipv4: 10.0.1.91/31 - ipv6: fc00::2b6/126 + ipv4: 10.0.1.67/31 + ipv6: fc00::286/126 bp_interfaces: ipv4: 10.10.246.163/24 ipv6: fc0a::a3/64 - ARISTA163T0: + ARISTA159T0: properties: - common bgp: - asn: 64002 + asn: 64159 peers: 65100: - - 10.0.1.92 - - fc00::2b9 + - 10.0.1.68 + - fc00::289 interfaces: Loopback0: - ipv4: 100.1.0.175/32 - ipv6: 2064:100::af/128 + ipv4: 100.1.0.163/32 + ipv6: 2064:100::a3/128 Ethernet1: - ipv4: 10.0.1.93/31 - ipv6: fc00::2ba/126 + ipv4: 10.0.1.69/31 + ipv6: fc00::28a/126 bp_interfaces: ipv4: 10.10.246.164/24 ipv6: fc0a::a4/64 - ARISTA164T0: + ARISTA160T0: properties: - common bgp: - asn: 64002 + asn: 64160 peers: 65100: - - 10.0.1.94 - - fc00::2bd + - 10.0.1.70 + - fc00::28d interfaces: Loopback0: - ipv4: 100.1.0.176/32 - ipv6: 2064:100::b0/128 + ipv4: 100.1.0.164/32 + ipv6: 2064:100::a4/128 Ethernet1: - ipv4: 10.0.1.95/31 - ipv6: fc00::2be/126 + ipv4: 10.0.1.71/31 + ipv6: fc00::28e/126 bp_interfaces: ipv4: 10.10.246.165/24 ipv6: fc0a::a5/64 - ARISTA165T2: + ARISTA05T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.96 - - fc00::2c1 + - 10.0.1.72 + - fc00::291 interfaces: Loopback0: - ipv4: 100.1.0.177/32 - ipv6: 2064:100::b1/128 + ipv4: 100.1.0.165/32 + ipv6: 2064:100::a5/128 Ethernet1: - ipv4: 10.0.1.97/31 - ipv6: fc00::2c2/126 + ipv4: 10.0.1.73/31 + ipv6: fc00::292/126 bp_interfaces: ipv4: 10.10.246.166/24 ipv6: fc0a::a6/64 - ARISTA166T2: + ARISTA06T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.98 - - fc00::2c5 + - 10.0.1.74 + - fc00::295 interfaces: Loopback0: - ipv4: 100.1.0.178/32 - ipv6: 2064:100::b2/128 + ipv4: 100.1.0.166/32 + ipv6: 2064:100::a6/128 Ethernet1: - ipv4: 10.0.1.99/31 - ipv6: fc00::2c6/126 + ipv4: 10.0.1.75/31 + ipv6: fc00::296/126 bp_interfaces: ipv4: 10.10.246.167/24 ipv6: fc0a::a7/64 - ARISTA167T0: + ARISTA161T0: properties: - common bgp: - asn: 64002 + asn: 64161 peers: 65100: - - 10.0.1.112 - - fc00::2e1 + - 10.0.1.76 + - fc00::299 interfaces: Loopback0: - ipv4: 100.1.0.185/32 - ipv6: 2064:100::b9/128 + ipv4: 100.1.0.167/32 + ipv6: 2064:100::a7/128 Ethernet1: - ipv4: 10.0.1.113/31 - ipv6: fc00::2e2/126 + ipv4: 10.0.1.77/31 + ipv6: fc00::29a/126 bp_interfaces: ipv4: 10.10.246.168/24 ipv6: fc0a::a8/64 - ARISTA168T0: + ARISTA162T0: properties: - common bgp: - asn: 64002 + asn: 64162 peers: 65100: - - 10.0.1.114 - - fc00::2e5 + - 10.0.1.78 + - fc00::29d interfaces: Loopback0: - ipv4: 100.1.0.186/32 - ipv6: 2064:100::ba/128 + ipv4: 100.1.0.168/32 + ipv6: 2064:100::a8/128 Ethernet1: - ipv4: 10.0.1.115/31 - ipv6: fc00::2e6/126 + ipv4: 10.0.1.79/31 + ipv6: fc00::29e/126 bp_interfaces: ipv4: 10.10.246.169/24 ipv6: fc0a::a9/64 - ARISTA169T0: + ARISTA163T0: properties: - common bgp: - asn: 64002 + asn: 64163 peers: 65100: - - 10.0.1.116 - - fc00::2e9 + - 10.0.1.80 + - fc00::2a1 interfaces: Loopback0: - ipv4: 100.1.0.187/32 - ipv6: 2064:100::bb/128 + ipv4: 100.1.0.169/32 + ipv6: 2064:100::a9/128 Ethernet1: - ipv4: 10.0.1.117/31 - ipv6: fc00::2ea/126 + ipv4: 10.0.1.81/31 + ipv6: fc00::2a2/126 bp_interfaces: ipv4: 10.10.246.170/24 ipv6: fc0a::aa/64 - ARISTA170T0: + ARISTA164T0: properties: - common bgp: - asn: 64002 + asn: 64164 peers: 65100: - - 10.0.1.118 - - fc00::2ed + - 10.0.1.82 + - fc00::2a5 interfaces: Loopback0: - ipv4: 100.1.0.188/32 - ipv6: 2064:100::bc/128 + ipv4: 100.1.0.170/32 + ipv6: 2064:100::aa/128 Ethernet1: - ipv4: 10.0.1.119/31 - ipv6: fc00::2ee/126 + ipv4: 10.0.1.83/31 + ipv6: fc00::2a6/126 bp_interfaces: ipv4: 10.10.246.171/24 ipv6: fc0a::ab/64 - ARISTA171T0: + ARISTA165T0: properties: - common bgp: - asn: 64002 + asn: 64165 peers: 65100: - - 10.0.1.120 - - fc00::2f1 + - 10.0.1.84 + - fc00::2a9 interfaces: Loopback0: - ipv4: 100.1.0.189/32 - ipv6: 2064:100::bd/128 + ipv4: 100.1.0.171/32 + ipv6: 2064:100::ab/128 Ethernet1: - ipv4: 10.0.1.121/31 - ipv6: fc00::2f2/126 + ipv4: 10.0.1.85/31 + ipv6: fc00::2aa/126 bp_interfaces: ipv4: 10.10.246.172/24 ipv6: fc0a::ac/64 - ARISTA172T0: + ARISTA166T0: properties: - common bgp: - asn: 64002 + asn: 64166 peers: 65100: - - 10.0.1.122 - - fc00::2f5 + - 10.0.1.86 + - fc00::2ad interfaces: Loopback0: - ipv4: 100.1.0.190/32 - ipv6: 2064:100::be/128 + ipv4: 100.1.0.172/32 + ipv6: 2064:100::ac/128 Ethernet1: - ipv4: 10.0.1.123/31 - ipv6: fc00::2f6/126 + ipv4: 10.0.1.87/31 + ipv6: fc00::2ae/126 bp_interfaces: ipv4: 10.10.246.173/24 ipv6: fc0a::ad/64 - ARISTA173T0: + ARISTA167T0: properties: - common bgp: - asn: 64002 + asn: 64167 peers: 65100: - - 10.0.1.124 - - fc00::2f9 + - 10.0.1.88 + - fc00::2b1 interfaces: Loopback0: - ipv4: 100.1.0.191/32 - ipv6: 2064:100::bf/128 + ipv4: 100.1.0.173/32 + ipv6: 2064:100::ad/128 Ethernet1: - ipv4: 10.0.1.125/31 - ipv6: fc00::2fa/126 + ipv4: 10.0.1.89/31 + ipv6: fc00::2b2/126 bp_interfaces: ipv4: 10.10.246.174/24 ipv6: fc0a::ae/64 - ARISTA174T0: + ARISTA168T0: properties: - common bgp: - asn: 64002 + asn: 64168 peers: 65100: - - 10.0.1.126 - - fc00::2fd + - 10.0.1.90 + - fc00::2b5 interfaces: Loopback0: - ipv4: 100.1.0.192/32 - ipv6: 2064:100::c0/128 + ipv4: 100.1.0.174/32 + ipv6: 2064:100::ae/128 Ethernet1: - ipv4: 10.0.1.127/31 - ipv6: fc00::2fe/126 + ipv4: 10.0.1.91/31 + ipv6: fc00::2b6/126 bp_interfaces: ipv4: 10.10.246.175/24 ipv6: fc0a::af/64 - ARISTA175T2: + ARISTA07T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.128 - - fc00::301 + - 10.0.1.92 + - fc00::2b9 interfaces: Loopback0: - ipv4: 100.1.0.193/32 - ipv6: 2064:100::c1/128 + ipv4: 100.1.0.175/32 + ipv6: 2064:100::af/128 Ethernet1: - ipv4: 10.0.1.129/31 - ipv6: fc00::302/126 + ipv4: 10.0.1.93/31 + ipv6: fc00::2ba/126 bp_interfaces: ipv4: 10.10.246.176/24 ipv6: fc0a::b0/64 - ARISTA176T2: + ARISTA08T2: properties: - common bgp: asn: 65200 peers: 65100: - - 10.0.1.130 - - fc00::305 + - 10.0.1.94 + - fc00::2bd interfaces: Loopback0: - ipv4: 100.1.0.194/32 - ipv6: 2064:100::c2/128 + ipv4: 100.1.0.176/32 + ipv6: 2064:100::b0/128 Ethernet1: - ipv4: 10.0.1.131/31 - ipv6: fc00::306/126 + ipv4: 10.0.1.95/31 + ipv6: fc00::2be/126 bp_interfaces: ipv4: 10.10.246.177/24 ipv6: fc0a::b1/64 - ARISTA177T0: + ARISTA169T0: properties: - common bgp: - asn: 64002 + asn: 64169 peers: 65100: - - 10.0.1.144 - - fc00::321 + - 10.0.1.96 + - fc00::2c1 interfaces: Loopback0: - ipv4: 100.1.0.201/32 - ipv6: 2064:100::c9/128 + ipv4: 100.1.0.177/32 + ipv6: 2064:100::b1/128 Ethernet1: - ipv4: 10.0.1.145/31 - ipv6: fc00::322/126 + ipv4: 10.0.1.97/31 + ipv6: fc00::2c2/126 bp_interfaces: ipv4: 10.10.246.178/24 ipv6: fc0a::b2/64 - ARISTA178T0: + ARISTA170T0: properties: - common bgp: - asn: 64002 + asn: 64170 peers: 65100: - - 10.0.1.146 - - fc00::325 + - 10.0.1.98 + - fc00::2c5 interfaces: Loopback0: - ipv4: 100.1.0.202/32 - ipv6: 2064:100::ca/128 + ipv4: 100.1.0.178/32 + ipv6: 2064:100::b2/128 Ethernet1: - ipv4: 10.0.1.147/31 - ipv6: fc00::326/126 + ipv4: 10.0.1.99/31 + ipv6: fc00::2c6/126 bp_interfaces: ipv4: 10.10.246.179/24 ipv6: fc0a::b3/64 - ARISTA179T0: + ARISTA171T0: properties: - common bgp: - asn: 64002 + asn: 64171 peers: 65100: - - 10.0.1.148 - - fc00::329 + - 10.0.1.100 + - fc00::2c9 interfaces: Loopback0: - ipv4: 100.1.0.203/32 - ipv6: 2064:100::cb/128 + ipv4: 100.1.0.179/32 + ipv6: 2064:100::b3/128 Ethernet1: - ipv4: 10.0.1.149/31 - ipv6: fc00::32a/126 + ipv4: 10.0.1.101/31 + ipv6: fc00::2ca/126 bp_interfaces: ipv4: 10.10.246.180/24 ipv6: fc0a::b4/64 - ARISTA180T0: + ARISTA172T0: properties: - common bgp: - asn: 64002 + asn: 64172 peers: 65100: - - 10.0.1.150 - - fc00::32d + - 10.0.1.102 + - fc00::2cd interfaces: Loopback0: - ipv4: 100.1.0.204/32 - ipv6: 2064:100::cc/128 + ipv4: 100.1.0.180/32 + ipv6: 2064:100::b4/128 Ethernet1: - ipv4: 10.0.1.151/31 - ipv6: fc00::32e/126 + ipv4: 10.0.1.103/31 + ipv6: fc00::2ce/126 bp_interfaces: ipv4: 10.10.246.181/24 ipv6: fc0a::b5/64 - ARISTA181T0: + ARISTA173T0: properties: - common bgp: - asn: 64002 + asn: 64173 peers: 65100: - - 10.0.1.152 - - fc00::331 + - 10.0.1.104 + - fc00::2d1 interfaces: Loopback0: - ipv4: 100.1.0.205/32 - ipv6: 2064:100::cd/128 + ipv4: 100.1.0.181/32 + ipv6: 2064:100::b5/128 Ethernet1: - ipv4: 10.0.1.153/31 - ipv6: fc00::332/126 + ipv4: 10.0.1.105/31 + ipv6: fc00::2d2/126 bp_interfaces: ipv4: 10.10.246.182/24 ipv6: fc0a::b6/64 - ARISTA182T0: + ARISTA174T0: properties: - common bgp: - asn: 64002 + asn: 64174 peers: 65100: - - 10.0.1.154 - - fc00::335 + - 10.0.1.106 + - fc00::2d5 interfaces: Loopback0: - ipv4: 100.1.0.206/32 - ipv6: 2064:100::ce/128 + ipv4: 100.1.0.182/32 + ipv6: 2064:100::b6/128 Ethernet1: - ipv4: 10.0.1.155/31 - ipv6: fc00::336/126 + ipv4: 10.0.1.107/31 + ipv6: fc00::2d6/126 bp_interfaces: ipv4: 10.10.246.183/24 ipv6: fc0a::b7/64 - ARISTA183T0: + ARISTA175T0: properties: - common bgp: - asn: 64002 + asn: 64175 peers: 65100: - - 10.0.1.156 - - fc00::339 + - 10.0.1.108 + - fc00::2d9 interfaces: Loopback0: - ipv4: 100.1.0.207/32 - ipv6: 2064:100::cf/128 + ipv4: 100.1.0.183/32 + ipv6: 2064:100::b7/128 Ethernet1: - ipv4: 10.0.1.157/31 - ipv6: fc00::33a/126 + ipv4: 10.0.1.109/31 + ipv6: fc00::2da/126 bp_interfaces: ipv4: 10.10.246.184/24 ipv6: fc0a::b8/64 - ARISTA184T0: + ARISTA176T0: properties: - common bgp: - asn: 64002 + asn: 64176 peers: 65100: - - 10.0.1.158 - - fc00::33d + - 10.0.1.110 + - fc00::2dd interfaces: Loopback0: - ipv4: 100.1.0.208/32 - ipv6: 2064:100::d0/128 + ipv4: 100.1.0.184/32 + ipv6: 2064:100::b8/128 Ethernet1: - ipv4: 10.0.1.159/31 - ipv6: fc00::33e/126 + ipv4: 10.0.1.111/31 + ipv6: fc00::2de/126 bp_interfaces: ipv4: 10.10.246.185/24 ipv6: fc0a::b9/64 - ARISTA185T0: + ARISTA177T0: properties: - common bgp: - asn: 64002 + asn: 64177 peers: 65100: - - 10.0.1.160 - - fc00::341 + - 10.0.1.112 + - fc00::2e1 interfaces: Loopback0: - ipv4: 100.1.0.209/32 - ipv6: 2064:100::d1/128 + ipv4: 100.1.0.185/32 + ipv6: 2064:100::b9/128 Ethernet1: - ipv4: 10.0.1.161/31 - ipv6: fc00::342/126 + ipv4: 10.0.1.113/31 + ipv6: fc00::2e2/126 bp_interfaces: ipv4: 10.10.246.186/24 ipv6: fc0a::ba/64 - ARISTA186T0: + ARISTA178T0: properties: - common bgp: - asn: 64002 + asn: 64178 peers: 65100: - - 10.0.1.162 - - fc00::345 + - 10.0.1.114 + - fc00::2e5 interfaces: Loopback0: - ipv4: 100.1.0.210/32 - ipv6: 2064:100::d2/128 + ipv4: 100.1.0.186/32 + ipv6: 2064:100::ba/128 Ethernet1: - ipv4: 10.0.1.163/31 - ipv6: fc00::346/126 + ipv4: 10.0.1.115/31 + ipv6: fc00::2e6/126 bp_interfaces: ipv4: 10.10.246.187/24 ipv6: fc0a::bb/64 - ARISTA187T0: + ARISTA179T0: properties: - common bgp: - asn: 64002 + asn: 64179 peers: 65100: - - 10.0.1.164 - - fc00::349 + - 10.0.1.116 + - fc00::2e9 interfaces: Loopback0: - ipv4: 100.1.0.211/32 - ipv6: 2064:100::d3/128 + ipv4: 100.1.0.187/32 + ipv6: 2064:100::bb/128 Ethernet1: - ipv4: 10.0.1.165/31 - ipv6: fc00::34a/126 + ipv4: 10.0.1.117/31 + ipv6: fc00::2ea/126 bp_interfaces: ipv4: 10.10.246.188/24 ipv6: fc0a::bc/64 - ARISTA188T0: + ARISTA180T0: properties: - common bgp: - asn: 64002 + asn: 64180 peers: 65100: - - 10.0.1.166 - - fc00::34d + - 10.0.1.118 + - fc00::2ed interfaces: Loopback0: - ipv4: 100.1.0.212/32 - ipv6: 2064:100::d4/128 + ipv4: 100.1.0.188/32 + ipv6: 2064:100::bc/128 Ethernet1: - ipv4: 10.0.1.167/31 - ipv6: fc00::34e/126 + ipv4: 10.0.1.119/31 + ipv6: fc00::2ee/126 bp_interfaces: ipv4: 10.10.246.189/24 ipv6: fc0a::bd/64 - ARISTA189T0: + ARISTA181T0: properties: - common bgp: - asn: 64002 + asn: 64181 peers: 65100: - - 10.0.1.168 - - fc00::351 + - 10.0.1.120 + - fc00::2f1 interfaces: Loopback0: - ipv4: 100.1.0.213/32 - ipv6: 2064:100::d5/128 + ipv4: 100.1.0.189/32 + ipv6: 2064:100::bd/128 Ethernet1: - ipv4: 10.0.1.169/31 - ipv6: fc00::352/126 + ipv4: 10.0.1.121/31 + ipv6: fc00::2f2/126 bp_interfaces: ipv4: 10.10.246.190/24 ipv6: fc0a::be/64 - ARISTA190T0: + ARISTA182T0: properties: - common bgp: - asn: 64002 + asn: 64182 peers: 65100: - - 10.0.1.170 - - fc00::355 + - 10.0.1.122 + - fc00::2f5 interfaces: Loopback0: - ipv4: 100.1.0.214/32 - ipv6: 2064:100::d6/128 + ipv4: 100.1.0.190/32 + ipv6: 2064:100::be/128 Ethernet1: - ipv4: 10.0.1.171/31 - ipv6: fc00::356/126 + ipv4: 10.0.1.123/31 + ipv6: fc00::2f6/126 bp_interfaces: ipv4: 10.10.246.191/24 ipv6: fc0a::bf/64 - ARISTA191T0: + ARISTA183T0: properties: - common bgp: - asn: 64002 + asn: 64183 peers: 65100: - - 10.0.1.172 - - fc00::359 + - 10.0.1.124 + - fc00::2f9 interfaces: Loopback0: - ipv4: 100.1.0.215/32 - ipv6: 2064:100::d7/128 + ipv4: 100.1.0.191/32 + ipv6: 2064:100::bf/128 Ethernet1: - ipv4: 10.0.1.173/31 - ipv6: fc00::35a/126 + ipv4: 10.0.1.125/31 + ipv6: fc00::2fa/126 bp_interfaces: ipv4: 10.10.246.192/24 ipv6: fc0a::c0/64 - ARISTA192T0: + ARISTA184T0: properties: - common bgp: - asn: 64002 + asn: 64184 peers: 65100: - - 10.0.1.174 - - fc00::35d + - 10.0.1.126 + - fc00::2fd interfaces: Loopback0: - ipv4: 100.1.0.216/32 - ipv6: 2064:100::d8/128 + ipv4: 100.1.0.192/32 + ipv6: 2064:100::c0/128 Ethernet1: - ipv4: 10.0.1.175/31 - ipv6: fc00::35e/126 + ipv4: 10.0.1.127/31 + ipv6: fc00::2fe/126 bp_interfaces: ipv4: 10.10.246.193/24 ipv6: fc0a::c1/64 - ARISTA193T0: + ARISTA185T0: properties: - common bgp: - asn: 64002 + asn: 64185 peers: 65100: - - 10.0.1.176 - - fc00::361 + - 10.0.1.128 + - fc00::301 interfaces: Loopback0: - ipv4: 100.1.0.217/32 - ipv6: 2064:100::d9/128 + ipv4: 100.1.0.193/32 + ipv6: 2064:100::c1/128 Ethernet1: - ipv4: 10.0.1.177/31 - ipv6: fc00::362/126 + ipv4: 10.0.1.129/31 + ipv6: fc00::302/126 bp_interfaces: ipv4: 10.10.246.194/24 ipv6: fc0a::c2/64 - ARISTA194T0: + ARISTA186T0: properties: - common bgp: - asn: 64002 + asn: 64186 peers: 65100: - - 10.0.1.178 - - fc00::365 + - 10.0.1.130 + - fc00::305 interfaces: Loopback0: - ipv4: 100.1.0.218/32 - ipv6: 2064:100::da/128 + ipv4: 100.1.0.194/32 + ipv6: 2064:100::c2/128 Ethernet1: - ipv4: 10.0.1.179/31 - ipv6: fc00::366/126 + ipv4: 10.0.1.131/31 + ipv6: fc00::306/126 bp_interfaces: ipv4: 10.10.246.195/24 ipv6: fc0a::c3/64 - ARISTA195T0: + ARISTA187T0: properties: - common bgp: - asn: 64002 + asn: 64187 peers: 65100: - - 10.0.1.180 - - fc00::369 + - 10.0.1.132 + - fc00::309 interfaces: Loopback0: - ipv4: 100.1.0.219/32 - ipv6: 2064:100::db/128 + ipv4: 100.1.0.195/32 + ipv6: 2064:100::c3/128 Ethernet1: - ipv4: 10.0.1.181/31 - ipv6: fc00::36a/126 + ipv4: 10.0.1.133/31 + ipv6: fc00::30a/126 bp_interfaces: ipv4: 10.10.246.196/24 ipv6: fc0a::c4/64 - ARISTA196T0: + ARISTA188T0: properties: - common bgp: - asn: 64002 + asn: 64188 peers: 65100: - - 10.0.1.182 - - fc00::36d + - 10.0.1.134 + - fc00::30d interfaces: Loopback0: - ipv4: 100.1.0.220/32 - ipv6: 2064:100::dc/128 + ipv4: 100.1.0.196/32 + ipv6: 2064:100::c4/128 Ethernet1: - ipv4: 10.0.1.183/31 - ipv6: fc00::36e/126 + ipv4: 10.0.1.135/31 + ipv6: fc00::30e/126 bp_interfaces: ipv4: 10.10.246.197/24 ipv6: fc0a::c5/64 - ARISTA197T0: + ARISTA189T0: properties: - common bgp: - asn: 64002 + asn: 64189 peers: 65100: - - 10.0.1.184 - - fc00::371 + - 10.0.1.136 + - fc00::311 interfaces: Loopback0: - ipv4: 100.1.0.221/32 - ipv6: 2064:100::dd/128 + ipv4: 100.1.0.197/32 + ipv6: 2064:100::c5/128 Ethernet1: - ipv4: 10.0.1.185/31 - ipv6: fc00::372/126 + ipv4: 10.0.1.137/31 + ipv6: fc00::312/126 bp_interfaces: ipv4: 10.10.246.198/24 ipv6: fc0a::c6/64 - ARISTA198T0: + ARISTA190T0: properties: - common bgp: - asn: 64002 + asn: 64190 peers: 65100: - - 10.0.1.186 - - fc00::375 + - 10.0.1.138 + - fc00::315 interfaces: Loopback0: - ipv4: 100.1.0.222/32 - ipv6: 2064:100::de/128 + ipv4: 100.1.0.198/32 + ipv6: 2064:100::c6/128 Ethernet1: - ipv4: 10.0.1.187/31 - ipv6: fc00::376/126 + ipv4: 10.0.1.139/31 + ipv6: fc00::316/126 bp_interfaces: ipv4: 10.10.246.199/24 ipv6: fc0a::c7/64 - ARISTA199T0: + ARISTA191T0: properties: - common bgp: - asn: 64002 + asn: 64191 peers: 65100: - - 10.0.1.188 - - fc00::379 + - 10.0.1.140 + - fc00::319 interfaces: Loopback0: - ipv4: 100.1.0.223/32 - ipv6: 2064:100::df/128 + ipv4: 100.1.0.199/32 + ipv6: 2064:100::c7/128 Ethernet1: - ipv4: 10.0.1.189/31 - ipv6: fc00::37a/126 + ipv4: 10.0.1.141/31 + ipv6: fc00::31a/126 bp_interfaces: ipv4: 10.10.246.200/24 ipv6: fc0a::c8/64 - ARISTA200T0: + ARISTA192T0: properties: - common bgp: - asn: 64002 + asn: 64192 peers: 65100: - - 10.0.1.190 - - fc00::37d + - 10.0.1.142 + - fc00::31d interfaces: Loopback0: - ipv4: 100.1.0.224/32 - ipv6: 2064:100::e0/128 + ipv4: 100.1.0.200/32 + ipv6: 2064:100::c8/128 Ethernet1: - ipv4: 10.0.1.191/31 - ipv6: fc00::37e/126 + ipv4: 10.0.1.143/31 + ipv6: fc00::31e/126 bp_interfaces: ipv4: 10.10.246.201/24 ipv6: fc0a::c9/64 - ARISTA201T0: + ARISTA193T0: properties: - common bgp: - asn: 64002 + asn: 64193 peers: 65100: - - 10.0.1.192 - - fc00::381 + - 10.0.1.144 + - fc00::321 interfaces: Loopback0: - ipv4: 100.1.0.225/32 - ipv6: 2064:100::e1/128 + ipv4: 100.1.0.201/32 + ipv6: 2064:100::c9/128 Ethernet1: - ipv4: 10.0.1.193/31 - ipv6: fc00::382/126 + ipv4: 10.0.1.145/31 + ipv6: fc00::322/126 bp_interfaces: ipv4: 10.10.246.202/24 ipv6: fc0a::ca/64 - ARISTA202T0: + ARISTA194T0: properties: - common bgp: - asn: 64002 + asn: 64194 peers: 65100: - - 10.0.1.194 - - fc00::385 + - 10.0.1.146 + - fc00::325 interfaces: Loopback0: - ipv4: 100.1.0.226/32 - ipv6: 2064:100::e2/128 + ipv4: 100.1.0.202/32 + ipv6: 2064:100::ca/128 Ethernet1: - ipv4: 10.0.1.195/31 - ipv6: fc00::386/126 + ipv4: 10.0.1.147/31 + ipv6: fc00::326/126 bp_interfaces: ipv4: 10.10.246.203/24 ipv6: fc0a::cb/64 - ARISTA203T0: + ARISTA195T0: properties: - common bgp: - asn: 64002 + asn: 64195 peers: 65100: - - 10.0.1.196 - - fc00::389 + - 10.0.1.148 + - fc00::329 interfaces: Loopback0: - ipv4: 100.1.0.227/32 - ipv6: 2064:100::e3/128 + ipv4: 100.1.0.203/32 + ipv6: 2064:100::cb/128 Ethernet1: - ipv4: 10.0.1.197/31 - ipv6: fc00::38a/126 + ipv4: 10.0.1.149/31 + ipv6: fc00::32a/126 bp_interfaces: ipv4: 10.10.246.204/24 ipv6: fc0a::cc/64 - ARISTA204T0: + ARISTA196T0: properties: - common bgp: - asn: 64002 + asn: 64196 peers: 65100: - - 10.0.1.198 - - fc00::38d + - 10.0.1.150 + - fc00::32d interfaces: Loopback0: - ipv4: 100.1.0.228/32 - ipv6: 2064:100::e4/128 + ipv4: 100.1.0.204/32 + ipv6: 2064:100::cc/128 Ethernet1: - ipv4: 10.0.1.199/31 - ipv6: fc00::38e/126 + ipv4: 10.0.1.151/31 + ipv6: fc00::32e/126 bp_interfaces: ipv4: 10.10.246.205/24 ipv6: fc0a::cd/64 - ARISTA205T0: + ARISTA197T0: properties: - common bgp: - asn: 64002 + asn: 64197 peers: 65100: - - 10.0.1.200 - - fc00::391 + - 10.0.1.152 + - fc00::331 interfaces: Loopback0: - ipv4: 100.1.0.229/32 - ipv6: 2064:100::e5/128 - Ethernet1: - ipv4: 10.0.1.201/31 - ipv6: fc00::392/126 + ipv4: 100.1.0.205/32 + ipv6: 2064:100::cd/128 + Ethernet1: + ipv4: 10.0.1.153/31 + ipv6: fc00::332/126 bp_interfaces: ipv4: 10.10.246.206/24 ipv6: fc0a::ce/64 - ARISTA206T0: + ARISTA198T0: properties: - common bgp: - asn: 64002 + asn: 64198 peers: 65100: - - 10.0.1.202 - - fc00::395 + - 10.0.1.154 + - fc00::335 interfaces: Loopback0: - ipv4: 100.1.0.230/32 - ipv6: 2064:100::e6/128 + ipv4: 100.1.0.206/32 + ipv6: 2064:100::ce/128 Ethernet1: - ipv4: 10.0.1.203/31 - ipv6: fc00::396/126 + ipv4: 10.0.1.155/31 + ipv6: fc00::336/126 bp_interfaces: ipv4: 10.10.246.207/24 ipv6: fc0a::cf/64 - ARISTA207T0: + ARISTA199T0: properties: - common bgp: - asn: 64002 + asn: 64199 peers: 65100: - - 10.0.1.204 - - fc00::399 + - 10.0.1.156 + - fc00::339 interfaces: Loopback0: - ipv4: 100.1.0.231/32 - ipv6: 2064:100::e7/128 + ipv4: 100.1.0.207/32 + ipv6: 2064:100::cf/128 Ethernet1: - ipv4: 10.0.1.205/31 - ipv6: fc00::39a/126 + ipv4: 10.0.1.157/31 + ipv6: fc00::33a/126 bp_interfaces: ipv4: 10.10.246.208/24 ipv6: fc0a::d0/64 - ARISTA208T0: + ARISTA200T0: properties: - common bgp: - asn: 64002 + asn: 64200 peers: 65100: - - 10.0.1.206 - - fc00::39d + - 10.0.1.158 + - fc00::33d interfaces: Loopback0: - ipv4: 100.1.0.232/32 - ipv6: 2064:100::e8/128 + ipv4: 100.1.0.208/32 + ipv6: 2064:100::d0/128 Ethernet1: - ipv4: 10.0.1.207/31 - ipv6: fc00::39e/126 + ipv4: 10.0.1.159/31 + ipv6: fc00::33e/126 bp_interfaces: ipv4: 10.10.246.209/24 ipv6: fc0a::d1/64 - ARISTA209T0: + ARISTA201T0: properties: - common bgp: - asn: 64002 + asn: 64201 peers: 65100: - - 10.0.1.208 - - fc00::3a1 + - 10.0.1.160 + - fc00::341 interfaces: Loopback0: - ipv4: 100.1.0.233/32 - ipv6: 2064:100::e9/128 + ipv4: 100.1.0.209/32 + ipv6: 2064:100::d1/128 Ethernet1: - ipv4: 10.0.1.209/31 - ipv6: fc00::3a2/126 + ipv4: 10.0.1.161/31 + ipv6: fc00::342/126 bp_interfaces: ipv4: 10.10.246.210/24 ipv6: fc0a::d2/64 - ARISTA210T0: + ARISTA202T0: properties: - common bgp: - asn: 64002 + asn: 64202 peers: 65100: - - 10.0.1.210 - - fc00::3a5 + - 10.0.1.162 + - fc00::345 interfaces: Loopback0: - ipv4: 100.1.0.234/32 - ipv6: 2064:100::ea/128 + ipv4: 100.1.0.210/32 + ipv6: 2064:100::d2/128 Ethernet1: - ipv4: 10.0.1.211/31 - ipv6: fc00::3a6/126 + ipv4: 10.0.1.163/31 + ipv6: fc00::346/126 bp_interfaces: ipv4: 10.10.246.211/24 ipv6: fc0a::d3/64 - ARISTA211T0: + ARISTA203T0: properties: - common bgp: - asn: 64002 + asn: 64203 peers: 65100: - - 10.0.1.212 - - fc00::3a9 + - 10.0.1.164 + - fc00::349 interfaces: Loopback0: - ipv4: 100.1.0.235/32 - ipv6: 2064:100::eb/128 + ipv4: 100.1.0.211/32 + ipv6: 2064:100::d3/128 Ethernet1: - ipv4: 10.0.1.213/31 - ipv6: fc00::3aa/126 + ipv4: 10.0.1.165/31 + ipv6: fc00::34a/126 bp_interfaces: ipv4: 10.10.246.212/24 ipv6: fc0a::d4/64 - ARISTA212T0: + ARISTA204T0: properties: - common bgp: - asn: 64002 + asn: 64204 peers: 65100: - - 10.0.1.214 - - fc00::3ad + - 10.0.1.166 + - fc00::34d interfaces: Loopback0: - ipv4: 100.1.0.236/32 - ipv6: 2064:100::ec/128 + ipv4: 100.1.0.212/32 + ipv6: 2064:100::d4/128 Ethernet1: - ipv4: 10.0.1.215/31 - ipv6: fc00::3ae/126 + ipv4: 10.0.1.167/31 + ipv6: fc00::34e/126 bp_interfaces: ipv4: 10.10.246.213/24 ipv6: fc0a::d5/64 - ARISTA213T0: + ARISTA205T0: properties: - common bgp: - asn: 64002 + asn: 64205 peers: 65100: - - 10.0.1.216 - - fc00::3b1 + - 10.0.1.168 + - fc00::351 interfaces: Loopback0: - ipv4: 100.1.0.237/32 - ipv6: 2064:100::ed/128 + ipv4: 100.1.0.213/32 + ipv6: 2064:100::d5/128 Ethernet1: - ipv4: 10.0.1.217/31 - ipv6: fc00::3b2/126 + ipv4: 10.0.1.169/31 + ipv6: fc00::352/126 bp_interfaces: ipv4: 10.10.246.214/24 ipv6: fc0a::d6/64 - ARISTA214T0: + ARISTA206T0: properties: - common bgp: - asn: 64002 + asn: 64206 peers: 65100: - - 10.0.1.218 - - fc00::3b5 + - 10.0.1.170 + - fc00::355 interfaces: Loopback0: - ipv4: 100.1.0.238/32 - ipv6: 2064:100::ee/128 + ipv4: 100.1.0.214/32 + ipv6: 2064:100::d6/128 Ethernet1: - ipv4: 10.0.1.219/31 - ipv6: fc00::3b6/126 + ipv4: 10.0.1.171/31 + ipv6: fc00::356/126 bp_interfaces: ipv4: 10.10.246.215/24 ipv6: fc0a::d7/64 - ARISTA215T0: + ARISTA207T0: properties: - common bgp: - asn: 64002 + asn: 64207 peers: 65100: - - 10.0.1.220 - - fc00::3b9 + - 10.0.1.172 + - fc00::359 interfaces: Loopback0: - ipv4: 100.1.0.239/32 - ipv6: 2064:100::ef/128 + ipv4: 100.1.0.215/32 + ipv6: 2064:100::d7/128 Ethernet1: - ipv4: 10.0.1.221/31 - ipv6: fc00::3ba/126 + ipv4: 10.0.1.173/31 + ipv6: fc00::35a/126 bp_interfaces: ipv4: 10.10.246.216/24 ipv6: fc0a::d8/64 - ARISTA216T0: + ARISTA208T0: properties: - common bgp: - asn: 64002 + asn: 64208 peers: 65100: - - 10.0.1.222 - - fc00::3bd + - 10.0.1.174 + - fc00::35d interfaces: Loopback0: - ipv4: 100.1.0.240/32 - ipv6: 2064:100::f0/128 + ipv4: 100.1.0.216/32 + ipv6: 2064:100::d8/128 Ethernet1: - ipv4: 10.0.1.223/31 - ipv6: fc00::3be/126 + ipv4: 10.0.1.175/31 + ipv6: fc00::35e/126 bp_interfaces: ipv4: 10.10.246.217/24 ipv6: fc0a::d9/64 - ARISTA217T0: + ARISTA209T0: properties: - common bgp: - asn: 64002 + asn: 64209 peers: 65100: - - 10.0.1.224 - - fc00::3c1 + - 10.0.1.176 + - fc00::361 interfaces: Loopback0: - ipv4: 100.1.0.241/32 - ipv6: 2064:100::f1/128 + ipv4: 100.1.0.217/32 + ipv6: 2064:100::d9/128 Ethernet1: - ipv4: 10.0.1.225/31 - ipv6: fc00::3c2/126 + ipv4: 10.0.1.177/31 + ipv6: fc00::362/126 bp_interfaces: ipv4: 10.10.246.218/24 ipv6: fc0a::da/64 - ARISTA218T0: + ARISTA210T0: properties: - common bgp: - asn: 64002 + asn: 64210 peers: 65100: - - 10.0.1.226 - - fc00::3c5 + - 10.0.1.178 + - fc00::365 interfaces: Loopback0: - ipv4: 100.1.0.242/32 - ipv6: 2064:100::f2/128 + ipv4: 100.1.0.218/32 + ipv6: 2064:100::da/128 Ethernet1: - ipv4: 10.0.1.227/31 - ipv6: fc00::3c6/126 + ipv4: 10.0.1.179/31 + ipv6: fc00::366/126 bp_interfaces: ipv4: 10.10.246.219/24 ipv6: fc0a::db/64 - ARISTA219T0: + ARISTA211T0: properties: - common bgp: - asn: 64002 + asn: 64211 peers: 65100: - - 10.0.1.228 - - fc00::3c9 + - 10.0.1.180 + - fc00::369 interfaces: Loopback0: - ipv4: 100.1.0.243/32 - ipv6: 2064:100::f3/128 + ipv4: 100.1.0.219/32 + ipv6: 2064:100::db/128 Ethernet1: - ipv4: 10.0.1.229/31 - ipv6: fc00::3ca/126 + ipv4: 10.0.1.181/31 + ipv6: fc00::36a/126 bp_interfaces: ipv4: 10.10.246.220/24 ipv6: fc0a::dc/64 - ARISTA220T0: + ARISTA212T0: properties: - common bgp: - asn: 64002 + asn: 64212 peers: 65100: - - 10.0.1.230 - - fc00::3cd + - 10.0.1.182 + - fc00::36d interfaces: Loopback0: - ipv4: 100.1.0.244/32 - ipv6: 2064:100::f4/128 + ipv4: 100.1.0.220/32 + ipv6: 2064:100::dc/128 Ethernet1: - ipv4: 10.0.1.231/31 - ipv6: fc00::3ce/126 + ipv4: 10.0.1.183/31 + ipv6: fc00::36e/126 bp_interfaces: ipv4: 10.10.246.221/24 ipv6: fc0a::dd/64 - ARISTA221T0: + ARISTA213T0: properties: - common bgp: - asn: 64002 + asn: 64213 peers: 65100: - - 10.0.1.232 - - fc00::3d1 + - 10.0.1.184 + - fc00::371 interfaces: Loopback0: - ipv4: 100.1.0.245/32 - ipv6: 2064:100::f5/128 + ipv4: 100.1.0.221/32 + ipv6: 2064:100::dd/128 Ethernet1: - ipv4: 10.0.1.233/31 - ipv6: fc00::3d2/126 + ipv4: 10.0.1.185/31 + ipv6: fc00::372/126 bp_interfaces: ipv4: 10.10.246.222/24 ipv6: fc0a::de/64 - ARISTA222T0: + ARISTA214T0: properties: - common bgp: - asn: 64002 + asn: 64214 peers: 65100: - - 10.0.1.234 - - fc00::3d5 + - 10.0.1.186 + - fc00::375 interfaces: Loopback0: - ipv4: 100.1.0.246/32 - ipv6: 2064:100::f6/128 + ipv4: 100.1.0.222/32 + ipv6: 2064:100::de/128 Ethernet1: - ipv4: 10.0.1.235/31 - ipv6: fc00::3d6/126 + ipv4: 10.0.1.187/31 + ipv6: fc00::376/126 bp_interfaces: ipv4: 10.10.246.223/24 ipv6: fc0a::df/64 - ARISTA223T0: + ARISTA215T0: properties: - common bgp: - asn: 64002 + asn: 64215 peers: 65100: - - 10.0.1.236 - - fc00::3d9 + - 10.0.1.188 + - fc00::379 interfaces: Loopback0: - ipv4: 100.1.0.247/32 - ipv6: 2064:100::f7/128 + ipv4: 100.1.0.223/32 + ipv6: 2064:100::df/128 Ethernet1: - ipv4: 10.0.1.237/31 - ipv6: fc00::3da/126 + ipv4: 10.0.1.189/31 + ipv6: fc00::37a/126 bp_interfaces: ipv4: 10.10.246.224/24 ipv6: fc0a::e0/64 - ARISTA224T0: + ARISTA216T0: properties: - common bgp: - asn: 64002 + asn: 64216 peers: 65100: - - 10.0.1.238 - - fc00::3dd + - 10.0.1.190 + - fc00::37d interfaces: Loopback0: - ipv4: 100.1.0.248/32 - ipv6: 2064:100::f8/128 + ipv4: 100.1.0.224/32 + ipv6: 2064:100::e0/128 Ethernet1: - ipv4: 10.0.1.239/31 - ipv6: fc00::3de/126 + ipv4: 10.0.1.191/31 + ipv6: fc00::37e/126 bp_interfaces: ipv4: 10.10.246.225/24 ipv6: fc0a::e1/64 - ARISTA225T0: + ARISTA217T0: properties: - common bgp: - asn: 64002 + asn: 64217 peers: 65100: - - 10.0.1.240 - - fc00::3e1 + - 10.0.1.192 + - fc00::381 interfaces: Loopback0: - ipv4: 100.1.0.249/32 - ipv6: 2064:100::f9/128 + ipv4: 100.1.0.225/32 + ipv6: 2064:100::e1/128 Ethernet1: - ipv4: 10.0.1.241/31 - ipv6: fc00::3e2/126 + ipv4: 10.0.1.193/31 + ipv6: fc00::382/126 bp_interfaces: ipv4: 10.10.246.226/24 ipv6: fc0a::e2/64 - ARISTA226T0: + ARISTA218T0: properties: - common bgp: - asn: 64002 + asn: 64218 peers: 65100: - - 10.0.1.242 - - fc00::3e5 + - 10.0.1.194 + - fc00::385 interfaces: Loopback0: - ipv4: 100.1.0.250/32 - ipv6: 2064:100::fa/128 + ipv4: 100.1.0.226/32 + ipv6: 2064:100::e2/128 Ethernet1: - ipv4: 10.0.1.243/31 - ipv6: fc00::3e6/126 + ipv4: 10.0.1.195/31 + ipv6: fc00::386/126 bp_interfaces: ipv4: 10.10.246.227/24 ipv6: fc0a::e3/64 - ARISTA227T0: + ARISTA219T0: properties: - common bgp: - asn: 64002 + asn: 64219 peers: 65100: - - 10.0.1.244 - - fc00::3e9 + - 10.0.1.196 + - fc00::389 interfaces: Loopback0: - ipv4: 100.1.0.251/32 - ipv6: 2064:100::fb/128 + ipv4: 100.1.0.227/32 + ipv6: 2064:100::e3/128 Ethernet1: - ipv4: 10.0.1.245/31 - ipv6: fc00::3ea/126 + ipv4: 10.0.1.197/31 + ipv6: fc00::38a/126 bp_interfaces: ipv4: 10.10.246.228/24 ipv6: fc0a::e4/64 - ARISTA228T0: + ARISTA220T0: properties: - common bgp: - asn: 64002 + asn: 64220 peers: 65100: - - 10.0.1.246 - - fc00::3ed + - 10.0.1.198 + - fc00::38d interfaces: Loopback0: - ipv4: 100.1.0.252/32 - ipv6: 2064:100::fc/128 + ipv4: 100.1.0.228/32 + ipv6: 2064:100::e4/128 Ethernet1: - ipv4: 10.0.1.247/31 - ipv6: fc00::3ee/126 + ipv4: 10.0.1.199/31 + ipv6: fc00::38e/126 bp_interfaces: ipv4: 10.10.246.229/24 ipv6: fc0a::e5/64 - ARISTA229T0: + ARISTA221T0: properties: - common bgp: - asn: 64002 + asn: 64221 peers: 65100: - - 10.0.1.248 - - fc00::3f1 + - 10.0.1.200 + - fc00::391 interfaces: Loopback0: - ipv4: 100.1.0.253/32 - ipv6: 2064:100::fd/128 + ipv4: 100.1.0.229/32 + ipv6: 2064:100::e5/128 Ethernet1: - ipv4: 10.0.1.249/31 - ipv6: fc00::3f2/126 + ipv4: 10.0.1.201/31 + ipv6: fc00::392/126 bp_interfaces: ipv4: 10.10.246.230/24 ipv6: fc0a::e6/64 - ARISTA230T0: + ARISTA222T0: properties: - common bgp: - asn: 64002 + asn: 64222 peers: 65100: - - 10.0.1.250 - - fc00::3f5 + - 10.0.1.202 + - fc00::395 interfaces: Loopback0: - ipv4: 100.1.0.254/32 - ipv6: 2064:100::fe/128 + ipv4: 100.1.0.230/32 + ipv6: 2064:100::e6/128 Ethernet1: - ipv4: 10.0.1.251/31 - ipv6: fc00::3f6/126 + ipv4: 10.0.1.203/31 + ipv6: fc00::396/126 bp_interfaces: ipv4: 10.10.246.231/24 ipv6: fc0a::e7/64 - ARISTA231T0: + ARISTA223T0: properties: - common bgp: - asn: 64002 + asn: 64223 peers: 65100: - - 10.0.1.252 - - fc00::3f9 + - 10.0.1.204 + - fc00::399 interfaces: Loopback0: - ipv4: 100.1.0.255/32 - ipv6: 2064:100::ff/128 + ipv4: 100.1.0.231/32 + ipv6: 2064:100::e7/128 Ethernet1: - ipv4: 10.0.1.253/31 - ipv6: fc00::3fa/126 + ipv4: 10.0.1.205/31 + ipv6: fc00::39a/126 bp_interfaces: ipv4: 10.10.246.232/24 ipv6: fc0a::e8/64 - ARISTA232T0: + ARISTA224T0: properties: - common bgp: - asn: 64002 + asn: 64224 peers: 65100: - - 10.0.1.254 - - fc00::3fd + - 10.0.1.206 + - fc00::39d interfaces: Loopback0: - ipv4: 100.1.1.0/32 - ipv6: 2064:100::100/128 + ipv4: 100.1.0.232/32 + ipv6: 2064:100::e8/128 Ethernet1: - ipv4: 10.0.1.255/31 - ipv6: fc00::3fe/126 + ipv4: 10.0.1.207/31 + ipv6: fc00::39e/126 bp_interfaces: ipv4: 10.10.246.233/24 ipv6: fc0a::e9/64 From 199637ff36902849719d119f6d3fd7c456f6e664 Mon Sep 17 00:00:00 2001 From: wumiao_nokia Date: Thu, 7 Nov 2024 20:05:12 -0500 Subject: [PATCH 041/175] Modify Small Partition Size to 300M from 100M (#15274) Description of PR It's found in some scaled setup the OC testing for test_logrotate_small_size could fail on fallocate 512K. {"changed": true, "cmd": "sudo fallocate -l 512.0K /var/log/syslog", "delta": "0:00:00.014333", "end": "2024-10-28 21:15:46.973853", "failed": true, "msg": "non-zero return code", "rc": 1, "start": "2024-10-28 21:15:46.959520", "stderr": "fallocate: fallocate failed: No space left on device", "stderr_lines": ["fallocate: fallocate failed: No space left on device"], "stdout": "", "stdout_lines": []} Our scaled setup could have 34K BGP routes and has more BGP neighbors. This test did config-reload after mounting new partition to /var/log. All new services restart and can fill the syslog and other logs and reach partition size. Added debug code to find after config reload the /var/log partition situation: udev 16G 0 16G 0% /dev tmpfs 3.2G 49M 3.1G 2% /run root-overlay 32G 6.7G 25G 22% / /dev/sda3 32G 6.7G 25G 22% /host /dev/loop2 89M 87M 0 100% /var/log tmpfs 16G 60K 16G 1% /dev/shm tmpfs 5.0M 0 5.0M 0% /run/lock tmpfs 4.0M 0 4.0M 0% /sys/fs/cgroup List all files under /var/log: /n total 4117 drwxr-xr-x 6 root root 1024 Oct 28 21:15 . drwxr-xr-x 1 root root 4096 Oct 25 21:11 .. -rw-r----- 1 root adm 0 Oct 28 21:15 auth.log -rw-r----- 1 root adm 47075 Oct 28 21:15 auth.log.1 -rw-r----- 1 root adm 0 Oct 28 21:15 cron.log drwxr-xr-x 2 root root 1024 Oct 28 21:15 frr -rw-r----- 1 root adm 0 Oct 28 21:15 gnmi.log -rw-r----- 1 root adm 1007 Oct 28 21:15 gnmi.log.1 drwx------ 2 root root 12288 Oct 28 21:10 lost+found -rw-r--r-- 1 root root 64 Oct 28 21:15 nokia-watchdog.log drwxrwxrwx 2 root root 1024 Oct 28 21:12 ntpsec drwxr-xr-x 2 root root 1024 Oct 28 21:15 swss -rw-r----- 1 root adm 0 Oct 28 21:15 syslog -rw-r----- 1 root adm 3185638 Oct 28 21:15 syslog.1 -rw-r----- 1 root adm 0 Oct 28 21:15 teamd.log -rw-r----- 1 root adm 955766 Oct 28 21:14 teamd.log.1 -rw-r----- 1 root adm 0 Oct 28 21:15 telemetry.log Solution is to make this partition to 300M during the test. This could make the test pass in scaled setup. Approach What is the motivation for this PR? test_logrotate_small_size test sometimes fails. How did you do it? How did you verify/test it? Verified with the change. We never hit the failure for test_logrotate_small_size. co-authorized by: jianquanye@microsoft.com --- tests/syslog/test_logrotate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/syslog/test_logrotate.py b/tests/syslog/test_logrotate.py index 3dfeabecb95..cabfaab71ab 100644 --- a/tests/syslog/test_logrotate.py +++ b/tests/syslog/test_logrotate.py @@ -15,7 +15,7 @@ ] LOG_FOLDER = '/var/log' -SMALL_VAR_LOG_PARTITION_SIZE = '100M' +SMALL_VAR_LOG_PARTITION_SIZE = '300M' FAKE_IP = '10.20.30.40' FAKE_MAC = 'aa:bb:cc:dd:11:22' From 9e468e72b35e8abc9a4edba559f9e2d198f2a1c1 Mon Sep 17 00:00:00 2001 From: Chenyang Wang <49756587+cyw233@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:23:55 +1100 Subject: [PATCH 042/175] chore: enable parallel run for more tests (#15449) Description of PR Enable parallel run for 10 more test modules. Disable parallel run for pfcwd/test_pfc_config.py because we are seeing file creation conflict when running it in parallel. Approach What is the motivation for this PR? We want to enable parallel run for more tests to further reduce the multi-DUT (e.g. T2) Nightly test running time. How did you do it? Add them to the test_parallel_modes.json file. How did you verify/test it? I ran these newly added test modules and can confirm that they can be run in parallel. co-authorized by: jianquanye@microsoft.com --- tests/test_parallel_modes.json | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/test_parallel_modes.json b/tests/test_parallel_modes.json index 5d72c23d63f..6d443f8c631 100644 --- a/tests/test_parallel_modes.json +++ b/tests/test_parallel_modes.json @@ -5,16 +5,25 @@ "bgp/test_bgp_session_flap.py": "FULL_PARALLEL", "container_checker/test_container_checker.py": "RP_FIRST", "crm/test_crm.py": "FULL_PARALLEL", + "iface_namingmode/test_iface_namingmode.py": "FULL_PARALLEL", "lldp/test_lldp.py": "FULL_PARALLEL", "memory_checker/test_memory_checker.py": "FULL_PARALLEL", "override_config_table/test_override_config_table_masic.py": "FULL_PARALLEL", "passw_hardening/test_passw_hardening.py": "FULL_PARALLEL", "pc/test_po_cleanup.py": "FULL_PARALLEL", - "pfcwd/test_pfc_config.py": "FULL_PARALLEL", + "platform_tests/api/test_chassis.py": "FULL_PARALLEL", + "platform_tests/api/test_module.py": "FULL_PARALLEL", "platform_tests/api/test_sfp.py": "FULL_PARALLEL", + "platform_tests/api/test_thermal.py": "FULL_PARALLEL", + "platform_tests/cli/test_show_chassis_module.py": "FULL_PARALLEL", + "platform_tests/link_flap/test_cont_link_flap.py": "FULL_PARALLEL", + "platform_tests/sfp/test_sfputil.py": "FULL_PARALLEL", + "platform_tests/test_memory_exhaustion.py": "RP_FIRST", "platform_tests/test_reboot.py": "RP_FIRST", "platform_tests/test_reload_config.py": "RP_FIRST", "platform_tests/test_sequential_restart.py": "FULL_PARALLEL", + "show_techsupport/test_techsupport.py": "FULL_PARALLEL", + "show_techsupport/test_techsupport_no_secret.py": "FULL_PARALLEL", "snmp/test_snmp_cpu.py": "FULL_PARALLEL", "snmp/test_snmp_interfaces.py": "FULL_PARALLEL", "snmp/test_snmp_link_local.py": "FULL_PARALLEL", From 3d5161488a754d1419324196b86a3841932b237d Mon Sep 17 00:00:00 2001 From: Chenyang Wang <49756587+cyw233@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:46:46 +1100 Subject: [PATCH 043/175] refactor: optimize BFD test (#15370) Description of PR Optimize BFD tests to reduce the running time. Approach What is the motivation for this PR? The bfd/test_bfd_static_route.py and bfd/test_bfd_traffic.py are taking a very long time to finish. We can optimize them by multithreading and enabling safe reload and safe reboot. After the optimization, the running time of bfd/test_bfd_static_route.py will decrease by 1.5 to 2 hours, and the running time of bfd/test_bfd_traffic.py will decrease by 1 to 1.5 hours. How did you do it? Using multithreading and enabling safe reload and safe reboot to optimize the BFD tests. How did you verify/test it? I ran the updated code and can confirm that they can still run properly. Any platform specific information? T2 co-authorized by: jianquanye@microsoft.com --- tests/bfd/bfd_base.py | 177 +++- tests/bfd/bfd_helpers.py | 201 +--- tests/bfd/test_bfd_static_route.py | 1418 ++++++++-------------------- tests/bfd/test_bfd_traffic.py | 221 +++-- 4 files changed, 675 insertions(+), 1342 deletions(-) diff --git a/tests/bfd/bfd_base.py b/tests/bfd/bfd_base.py index 08b8a39b9ff..e801cbfa870 100644 --- a/tests/bfd/bfd_base.py +++ b/tests/bfd/bfd_base.py @@ -3,10 +3,10 @@ import pytest -from tests.bfd.bfd_helpers import modify_all_bfd_sessions, find_bfd_peers_with_given_state -from tests.common import config_reload -from tests.common.platform.processes_utils import wait_critical_processes -from tests.common.utilities import wait_until +from tests.bfd.bfd_helpers import prepare_bfd_state, selecting_route_to_delete, \ + extract_ip_addresses_for_backend_portchannels, get_dut_asic_static_routes, extract_backend_portchannels, \ + get_src_dst_asic_next_hops +from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor logger = logging.getLogger(__name__) @@ -25,55 +25,21 @@ def modify_bfd_sessions(self, duthosts): c. If expected state is "Up" and no. of down peers is 0, output is True d. If expected state is "Down" and no. of up peers is 0, output is True """ + duts = duthosts.frontend_nodes try: - duts = duthosts.frontend_nodes - for dut in duts: - modify_all_bfd_sessions(dut, "false") - for dut in duts: - # config reload - config_reload(dut) - wait_critical_processes(dut) - # Verification that all BFD sessions are deleted - for dut in duts: - asics = [ - asic.split("asic")[1] for asic in dut.get_asic_namespace_list() - ] - for asic in asics: - assert wait_until( - 600, - 10, - 0, - lambda: find_bfd_peers_with_given_state( - dut, asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for dut in duts: + executor.submit(prepare_bfd_state, dut, "false", "No BFD sessions found") yield finally: - duts = duthosts.frontend_nodes - for dut in duts: - modify_all_bfd_sessions(dut, "true") - for dut in duts: - config_reload(dut) - wait_critical_processes(dut) - # Verification that all BFD sessions are added - for dut in duts: - asics = [ - asic.split("asic")[1] for asic in dut.get_asic_namespace_list() - ] - for asic in asics: - assert wait_until( - 600, - 10, - 0, - lambda: find_bfd_peers_with_given_state( - dut, asic, "Up" - ), - ) - - @pytest.fixture(scope="class", name="select_src_dst_dut_and_asic", params=(["multi_dut"])) - def select_src_dst_dut_and_asic(self, duthosts, request, tbinfo): + with SafeThreadPoolExecutor(max_workers=8) as executor: + for dut in duts: + executor.submit(prepare_bfd_state, dut, "true", "Up") + + @pytest.fixture(scope="class", name="select_src_dst_dut_and_asic") + def select_src_dst_dut_and_asic(self, duthosts, tbinfo): if (len(duthosts.frontend_nodes)) < 2: pytest.skip("Don't have 2 frontend nodes - so can't run multi_dut tests") # Random selection of dut indices based on number of front end nodes @@ -131,6 +97,80 @@ def get_src_dst_asic_and_duts(self, duthosts, select_src_dst_dut_and_asic): rtn_dict.update(select_src_dst_dut_and_asic) yield rtn_dict + @pytest.fixture(scope="class", params=["ipv4", "ipv6"]) + def select_src_dst_dut_with_asic(self, request, get_src_dst_asic_and_duts): + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + + version = request.param + logger.info("Version: %s", version) + + # Random selection of dut & asic. + src_asic = get_src_dst_asic_and_duts["src_asic"] + dst_asic = get_src_dst_asic_and_duts["dst_asic"] + src_dut = get_src_dst_asic_and_duts["src_dut"] + dst_dut = get_src_dst_asic_and_duts["dst_dut"] + + logger.info("Source Asic: %s", src_asic) + logger.info("Destination Asic: %s", dst_asic) + logger.info("Source dut: %s", src_dut) + logger.info("Destination dut: %s", dst_dut) + + request.config.src_asic = src_asic + request.config.dst_asic = dst_asic + request.config.src_dut = src_dut + request.config.dst_dut = dst_dut + + src_asic_routes = get_dut_asic_static_routes(version, src_dut) + dst_asic_routes = get_dut_asic_static_routes(version, dst_dut) + + # Extracting nexthops + dst_dut_nexthops = ( + extract_ip_addresses_for_backend_portchannels( + src_dut, src_asic, version + ) + ) + logger.info("Destination nexthops, {}".format(dst_dut_nexthops)) + assert len(dst_dut_nexthops) != 0, "Destination Nexthops are empty" + + src_dut_nexthops = ( + extract_ip_addresses_for_backend_portchannels( + dst_dut, dst_asic, version + ) + ) + logger.info("Source nexthops, {}".format(src_dut_nexthops)) + assert len(src_dut_nexthops) != 0, "Source Nexthops are empty" + + # Picking a static route to delete corresponding BFD session + src_prefix = selecting_route_to_delete( + src_asic_routes, src_dut_nexthops.values() + ) + logger.info("Source prefix: %s", src_prefix) + request.config.src_prefix = src_prefix + assert src_prefix is not None and src_prefix != "", "Source prefix not found" + + dst_prefix = selecting_route_to_delete( + dst_asic_routes, dst_dut_nexthops.values() + ) + logger.info("Destination prefix: %s", dst_prefix) + request.config.dst_prefix = dst_prefix + assert ( + dst_prefix is not None and dst_prefix != "" + ), "Destination prefix not found" + + yield { + "src_asic": src_asic, + "dst_asic": dst_asic, + "src_dut": src_dut, + "dst_dut": dst_dut, + "src_dut_nexthops": src_dut_nexthops, + "dst_dut_nexthops": dst_dut_nexthops, + "src_prefix": src_prefix, + "dst_prefix": dst_prefix, + "version": version, + } + @pytest.fixture(scope="class") def select_dut_and_src_dst_asic_index(self, duthosts): if not duthosts.frontend_nodes: @@ -179,3 +219,44 @@ def get_src_dst_asic(self, request, duthosts, select_dut_and_src_dst_asic_index) rtn_dict.update(select_dut_and_src_dst_asic_index) yield rtn_dict + + @pytest.fixture(scope="class", params=["ipv4", "ipv6"]) + def prepare_traffic_test_variables(self, get_src_dst_asic, request): + version = request.param + logger.info("Version: %s", version) + + dut = get_src_dst_asic["dut"] + src_asic = get_src_dst_asic["src_asic"] + src_asic_index = get_src_dst_asic["src_asic_index"] + dst_asic = get_src_dst_asic["dst_asic"] + dst_asic_index = get_src_dst_asic["dst_asic_index"] + logger.info( + "DUT: {}, src_asic_index: {}, dst_asic_index: {}".format(dut.hostname, src_asic_index, dst_asic_index) + ) + + backend_port_channels = extract_backend_portchannels(dut) + src_asic_next_hops, dst_asic_next_hops, src_prefix, dst_prefix = get_src_dst_asic_next_hops( + version, + dut, + src_asic, + dst_asic, + request, + backend_port_channels, + ) + + src_asic_router_mac = src_asic.get_router_mac() + + yield { + "dut": dut, + "src_asic": src_asic, + "src_asic_index": src_asic_index, + "dst_asic": dst_asic, + "dst_asic_index": dst_asic_index, + "src_asic_next_hops": src_asic_next_hops, + "dst_asic_next_hops": dst_asic_next_hops, + "src_prefix": src_prefix, + "dst_prefix": dst_prefix, + "src_asic_router_mac": src_asic_router_mac, + "backend_port_channels": backend_port_channels, + "version": version, + } diff --git a/tests/bfd/bfd_helpers.py b/tests/bfd/bfd_helpers.py index a867614baf0..1744545f38c 100644 --- a/tests/bfd/bfd_helpers.py +++ b/tests/bfd/bfd_helpers.py @@ -7,12 +7,57 @@ import pytest from ptf import testutils +from tests.common import config_reload from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor from tests.common.utilities import wait_until logger = logging.getLogger(__name__) +def prepare_bfd_state(dut, flag, expected_bfd_state): + modify_all_bfd_sessions(dut, flag) + config_reload(dut, safe_reload=True) + # Verification that all BFD sessions are deleted + asics = [asic.split("asic")[1] for asic in dut.get_asic_namespace_list()] + for asic in asics: + assert wait_until( + 600, + 10, + 0, + lambda: find_bfd_peers_with_given_state(dut, asic, expected_bfd_state), + ) + + +def verify_bfd_only(dut, nexthops, asic, expected_bfd_state): + logger.info("BFD verifications") + assert wait_until( + 300, + 10, + 0, + lambda: verify_bfd_state(dut, nexthops.values(), asic, expected_bfd_state), + ) + + +def create_and_verify_bfd_state(asic, prefix, dut, dut_nexthops): + logger.info("BFD addition on dut") + add_bfd(asic.asic_index, prefix, dut) + verify_bfd_only(dut, dut_nexthops, asic, "Up") + + +def verify_bfd_and_static_route(dut, dut_nexthops, asic, expected_bfd_state, request, prefix, + expected_prefix_state, version): + logger.info("BFD & Static route verifications") + verify_bfd_only(dut, dut_nexthops, asic, expected_bfd_state) + verify_static_route( + request, + asic, + prefix, + dut, + expected_prefix_state, + version, + ) + + def get_dut_asic_static_routes(version, dut): if version == "ipv4": static_route_command = "show ip route static" @@ -33,75 +78,6 @@ def get_dut_asic_static_routes(version, dut): return asic_static_routes -def select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version -): - logger.debug("Selecting source and destination DUTs with ASICs...") - # Random selection of dut & asic. - src_asic = get_src_dst_asic_and_duts["src_asic"] - dst_asic = get_src_dst_asic_and_duts["dst_asic"] - src_dut = get_src_dst_asic_and_duts["src_dut"] - dst_dut = get_src_dst_asic_and_duts["dst_dut"] - - logger.info("Source Asic: %s", src_asic) - logger.info("Destination Asic: %s", dst_asic) - logger.info("Source dut: %s", src_dut) - logger.info("Destination dut: %s", dst_dut) - - request.config.src_asic = src_asic - request.config.dst_asic = dst_asic - request.config.src_dut = src_dut - request.config.dst_dut = dst_dut - - src_asic_routes = get_dut_asic_static_routes(version, src_dut) - dst_asic_routes = get_dut_asic_static_routes(version, dst_dut) - - # Extracting nexthops - dst_dut_nexthops = ( - extract_ip_addresses_for_backend_portchannels( - src_dut, src_asic, version - ) - ) - logger.info("Destination nexthops, {}".format(dst_dut_nexthops)) - assert len(dst_dut_nexthops) != 0, "Destination Nexthops are empty" - - src_dut_nexthops = ( - extract_ip_addresses_for_backend_portchannels( - dst_dut, dst_asic, version - ) - ) - logger.info("Source nexthops, {}".format(src_dut_nexthops)) - assert len(src_dut_nexthops) != 0, "Source Nexthops are empty" - - # Picking a static route to delete correspinding BFD session - src_prefix = selecting_route_to_delete( - src_asic_routes, src_dut_nexthops.values() - ) - logger.info("Source prefix: %s", src_prefix) - request.config.src_prefix = src_prefix - assert src_prefix is not None and src_prefix != "", "Source prefix not found" - - dst_prefix = selecting_route_to_delete( - dst_asic_routes, dst_dut_nexthops.values() - ) - logger.info("Destination prefix: %s", dst_prefix) - request.config.dst_prefix = dst_prefix - assert ( - dst_prefix is not None and dst_prefix != "" - ), "Destination prefix not found" - - return ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) - - def verify_bfd_state(dut, dut_nexthops, dut_asic, expected_bfd_state): logger.info("Verifying BFD state on {} ".format(dut)) for nexthop in dut_nexthops: @@ -491,56 +467,6 @@ def ensure_interfaces_are_up(dut, asic, interfaces): toggle_interfaces_in_parallel(cmds, dut, asic, interfaces, "up") -def prepare_traffic_test_variables(get_src_dst_asic, request, version): - dut = get_src_dst_asic["dut"] - src_asic = get_src_dst_asic["src_asic"] - src_asic_index = get_src_dst_asic["src_asic_index"] - dst_asic = get_src_dst_asic["dst_asic"] - dst_asic_index = get_src_dst_asic["dst_asic_index"] - logger.info( - "DUT: {}, src_asic_index: {}, dst_asic_index: {}".format(dut.hostname, src_asic_index, dst_asic_index) - ) - - backend_port_channels = extract_backend_portchannels(dut) - src_asic_next_hops, dst_asic_next_hops, src_prefix, dst_prefix = get_src_dst_asic_next_hops( - version, - dut, - src_asic, - dst_asic, - request, - backend_port_channels, - ) - - add_bfd(src_asic_index, src_prefix, dut) - add_bfd(dst_asic_index, dst_prefix, dut) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, src_asic_next_hops.values(), src_asic, "Up"), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, dst_asic_next_hops.values(), dst_asic, "Up"), - ) - - src_asic_router_mac = src_asic.get_router_mac() - - return ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) - - def clear_bfd_configs(dut, asic_index, prefix): logger.info("Clearing BFD configs on {}".format(dut)) command = ( @@ -794,27 +720,12 @@ def verify_given_bfd_state(asic_next_hops, port_channel, asic_index, dut, expect return current_state == expected_state -def wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel, - src_asic_index, - dst_asic_next_hops, - dst_port_channel, - dst_asic_index, - dut, -): - assert wait_until( - 180, - 10, - 0, - lambda: verify_given_bfd_state(src_asic_next_hops, dst_port_channel, src_asic_index, dut, "Down"), - ) - +def wait_until_given_bfd_down(next_hops, port_channel, asic_index, dut): assert wait_until( - 180, + 300, 10, 0, - lambda: verify_given_bfd_state(dst_asic_next_hops, src_port_channel, dst_asic_index, dut, "Down"), + lambda: verify_given_bfd_state(next_hops, port_channel, asic_index, dut, "Down"), ) @@ -859,19 +770,3 @@ def assert_traffic_switching( dst_asic_index, dut.hostname, ) - - -def wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic): - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, src_asic_next_hops.values(), src_asic, "Up"), - ) - - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state(dut, dst_asic_next_hops.values(), dst_asic, "Up"), - ) diff --git a/tests/bfd/test_bfd_static_route.py b/tests/bfd/test_bfd_static_route.py index 28c56228e0a..85e2e0bc607 100644 --- a/tests/bfd/test_bfd_static_route.py +++ b/tests/bfd/test_bfd_static_route.py @@ -4,12 +4,12 @@ import pytest from tests.bfd.bfd_base import BfdBase -from tests.bfd.bfd_helpers import verify_static_route, select_src_dst_dut_with_asic, check_bgp_status, \ - add_bfd, verify_bfd_state, delete_bfd, extract_backend_portchannels, batch_control_interface_state +from tests.bfd.bfd_helpers import check_bgp_status, add_bfd, delete_bfd, extract_backend_portchannels, \ + batch_control_interface_state, create_and_verify_bfd_state, verify_bfd_and_static_route, verify_bfd_only from tests.common.config_reload import config_reload +from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor from tests.common.platform.processes_utils import wait_critical_processes from tests.common.reboot import reboot -from tests.common.utilities import wait_until pytestmark = [ pytest.mark.topology("t2"), @@ -28,91 +28,39 @@ class TestBfdStaticRoute(BfdBase): 'diagnose': 200, } - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_with_lc_reboot( - self, - localhost, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + def test_bfd_with_lc_reboot(self, localhost, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") # Perform a cold reboot on source dut - reboot(src_dut, localhost) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + reboot(src_dut, localhost, safe_reboot=True) check_bgp_status(request) - # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -122,41 +70,16 @@ def test_bfd_with_lc_reboot( src_dut.shell("sudo config save -y") # Config reload of Source dut - reboot(src_dut, localhost) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + reboot(src_dut, localhost, safe_reboot=True) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_static_route_deletion( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + def test_bfd_static_route_deletion(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com @@ -169,133 +92,64 @@ def test_bfd_static_route_deletion( 4. Delete BFD on Destination dut. 5. Verify that on Destination dut BFD gets cleaned up and static route will be added back. """ - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) logger.info("BFD deletion on source dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "No BFD sessions found" if target == "src" else "Down", + request, + prefix, + "Route Addition" if target == "src" else "Route Removal", + version, + ) logger.info("BFD deletion on destination dut") delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "No BFD sessions found", + request, + prefix, + "Route Addition", + version, + ) logger.info("BFD deletion did not influence static routes and test completed successfully") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_flap( self, - duthost, request, - duthosts, - tbinfo, - get_src_dst_asic_and_duts, + select_src_dst_dut_with_asic, bfd_cleanup_db, get_function_completeness_level, - version, ): """ Author: Harsha Golla @@ -311,45 +165,23 @@ def test_bfd_flap( 6. Verify that on destination dut BFD is up and static route is added back. 7. Repeat above steps 100 times. """ - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) completeness_level = get_function_completeness_level if completeness_level is None: @@ -367,78 +199,36 @@ def test_bfd_flap( time.sleep(5) logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, - src_dut_nexthops.values(), - src_asic, - "No BFD sessions found", - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "No BFD sessions found" if target == "src" else "Down", + request, + prefix, + "Route Addition" if target == "src" else "Route Removal", + version, + ) logger.info("BFD addition on source dut") add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD & Static route verifications") - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for target, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) # Check if both iterations were successful and increment the counter successful_iterations += 1 @@ -455,18 +245,14 @@ def test_bfd_flap( logger.info("test_bfd_flap completed") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_with_rp_reboot( self, localhost, - duthost, request, duthosts, - tbinfo, - get_src_dst_asic_and_duts, enum_supervisor_dut_hostname, + select_src_dst_dut_with_asic, bfd_cleanup_db, - version, ): """ Author: Harsha Golla @@ -474,78 +260,40 @@ def test_bfd_with_rp_reboot( """ rp = duthosts[enum_supervisor_dut_hostname] - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Perform a cold reboot on source dut - reboot(rp, localhost) + # Perform a cold reboot on RP + reboot(rp, localhost, safe_reboot=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) - # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -555,89 +303,44 @@ def test_bfd_with_rp_reboot( src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Config reload of Source dut - reboot(rp, localhost) + # Perform a cold reboot on RP + reboot(rp, localhost, safe_reboot=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) - # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_remote_link_flap( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + def test_bfd_remote_link_flap(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ request.config.interface_shutdown = True - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Extract portchannel interfaces on dst list_of_portchannels_on_dst = src_dut_nexthops.keys() @@ -648,121 +351,56 @@ def test_bfd_remote_link_flap( batch_control_interface_state(dst_dut, dst_asic, list_of_portchannels_on_dst, "shutdown") # Verification of BFD session state on src dut - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) - - # Verify that corresponding static route has been removed on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, + verify_bfd_and_static_route( + src_dut, + src_dut_nexthops, src_asic, + "Down", + request, src_prefix, - src_dut, "Route Removal", version, ) batch_control_interface_state(dst_dut, dst_asic, list_of_portchannels_on_dst, "startup") - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - - # Verify that corresponding static route has been added on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) - - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_lc_asic_shutdown( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) + + def test_bfd_lc_asic_shutdown(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ request.config.interface_shutdown = True - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Extract portchannel interfaces on src list_of_portchannels_on_src = dst_dut_nexthops.keys() @@ -772,138 +410,62 @@ def test_bfd_lc_asic_shutdown( # Shutdown PortChannels batch_control_interface_state(src_dut, src_asic, list_of_portchannels_on_src, "shutdown") - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) - - # Verify that corresponding static route has been removed on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Removal", - version, - ) + # Verify BFD and static routes + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Down", + request, + prefix, + "Route Removal", + version, + ) batch_control_interface_state(src_dut, src_asic, list_of_portchannels_on_src, "startup") - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - - # Verify that corresponding static route has been added on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) - - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_portchannel_member_flap( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + # Verify BFD and static routes. + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) + + def test_bfd_portchannel_member_flap(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ request.config.interface_shutdown = True - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + version = select_src_dst_dut_with_asic["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Extract portchannel interfaces on src list_of_portchannels_on_src = dst_dut_nexthops.keys() @@ -922,167 +484,73 @@ def test_bfd_portchannel_member_flap( request.config.selected_portchannel_members = port_channel_members_on_src batch_control_interface_state(src_dut, src_asic, port_channel_members_on_src, "shutdown") - # Verification of BFD session state. - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) - - # Verify that corresponding static route has been removed on both duts - logger.info("BFD & Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Removal", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Removal", - version, - ) + # Verify BFD and static routes + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Down", + request, + prefix, + "Route Removal", + version, + ) # Bring up of PortChannel members batch_control_interface_state(src_dut, src_asic, port_channel_members_on_src, "startup") - # Verification of BFD session state. - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) - - # Verify that corresponding static route has been added on both duts - logger.info("Static route verifications") - verify_static_route( - request, - dst_asic, - dst_prefix, - dst_dut, - "Route Addition", - version, - ) - verify_static_route( - request, - src_asic, - src_prefix, - src_dut, - "Route Addition", - version, - ) - - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) - def test_bfd_config_reload( - self, - duthost, - request, - tbinfo, - get_src_dst_asic_and_duts, - bfd_cleanup_db, - version, - ): + # Verify BFD and static routes + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit( + verify_bfd_and_static_route, + dut, + dut_nexthops, + asic, + "Up", + request, + prefix, + "Route Addition", + version, + ) + + def test_bfd_config_reload(self, request, select_src_dst_dut_with_asic, bfd_cleanup_db): """ Author: Harsha Golla Email : harsgoll@cisco.com """ - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") # Config reload of Source dut - config_reload(src_dut) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + config_reload(src_dut, safe_reload=True) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -1092,43 +560,22 @@ def test_bfd_config_reload( src_dut.shell("sudo config save -y") # Config reload of Source dut - config_reload(src_dut) - - # Waiting for all processes on Source dut - wait_critical_processes(src_dut) + config_reload(src_dut, safe_reload=True) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_with_rp_config_reload( self, - localhost, - duthost, request, duthosts, - tbinfo, - get_src_dst_asic_and_duts, + select_src_dst_dut_with_asic, enum_supervisor_dut_hostname, bfd_cleanup_db, - version, ): """ Author: Harsha Golla @@ -1136,78 +583,41 @@ def test_bfd_with_rp_config_reload( """ rp = duthosts[enum_supervisor_dut_hostname] - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Perform a cold reboot on source dut - config_reload(rp) + # Config reload of RP + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source & destination dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -1217,45 +627,28 @@ def test_bfd_with_rp_config_reload( src_dut.shell("sudo config save -y") dst_dut.shell("sudo config save -y") - # Config reload of Source dut - config_reload(rp) + # Config reload of RP + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_with_bad_fc_asic( self, - localhost, - duthost, request, duthosts, - tbinfo, - get_src_dst_asic_and_duts, + select_src_dst_dut_with_asic, enum_supervisor_dut_hostname, bfd_cleanup_db, - version, ): """ Author: Harsha Golla @@ -1263,47 +656,22 @@ def test_bfd_with_bad_fc_asic( """ rp = duthosts[enum_supervisor_dut_hostname] - # Selecting source, destination dut & prefix & BFD status verification for all nexthops - logger.info( - "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" - ) - ( - src_asic, - dst_asic, - src_dut, - dst_dut, - src_dut_nexthops, - dst_dut_nexthops, - src_prefix, - dst_prefix, - ) = select_src_dst_dut_with_asic( - request, get_src_dst_asic_and_duts, version - ) - - # Creation of BFD - logger.info("BFD addition on source dut") - add_bfd(src_asic.asic_index, src_prefix, src_dut) - - logger.info("BFD addition on destination dut") - add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) - - # Verification of BFD session state. - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + src_asic = select_src_dst_dut_with_asic["src_asic"] + dst_asic = select_src_dst_dut_with_asic["dst_asic"] + src_dut = select_src_dst_dut_with_asic["src_dut"] + dst_dut = select_src_dst_dut_with_asic["dst_dut"] + src_dut_nexthops = select_src_dst_dut_with_asic["src_dut_nexthops"] + dst_dut_nexthops = select_src_dst_dut_with_asic["dst_dut_nexthops"] + src_prefix = select_src_dst_dut_with_asic["src_prefix"] + dst_prefix = select_src_dst_dut_with_asic["dst_prefix"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_dut, src_dut_nexthops), + ("dst", dst_asic, dst_prefix, dst_dut, dst_dut_nexthops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, dut, dut_nexthops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, dut_nexthops) # Savings the configs src_dut.shell("sudo config save -y") @@ -1316,53 +684,29 @@ def test_bfd_with_bad_fc_asic( asic_ids = [int(element.split("swss")[1]) for element in docker_output] # Shut down corresponding asic on supervisor to simulate bad asic - for asic_id in asic_ids: - rp.shell("systemctl stop swss@{}".format(asic_id)) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for asic_id in asic_ids: + executor.submit(rp.shell, "systemctl stop swss@{}".format(asic_id)) # Verify that BFD sessions are down - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" - ), - ) - assert wait_until( - 180, - 10, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Down" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Down") # Config reload RP to bring up the swss containers - config_reload(rp) + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "Up" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "Up") logger.info("BFD deletion on source dut") delete_bfd(src_asic.asic_index, src_prefix, src_dut) @@ -1373,28 +717,16 @@ def test_bfd_with_bad_fc_asic( dst_dut.shell("sudo config save -y") # Config reload RP - config_reload(rp) + config_reload(rp, safe_reload=True) # Waiting for all processes on Source & destination dut - wait_critical_processes(src_dut) - wait_critical_processes(dst_dut) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, _, _, dut, _ in src_dst_context: + executor.submit(wait_critical_processes, dut) check_bgp_status(request) # Verification of BFD session state. - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" - ), - ) - assert wait_until( - 300, - 20, - 0, - lambda: verify_bfd_state( - src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" - ), - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, dut, dut_nexthops in src_dst_context: + executor.submit(verify_bfd_only, dut, dut_nexthops, asic, "No BFD sessions found") diff --git a/tests/bfd/test_bfd_traffic.py b/tests/bfd/test_bfd_traffic.py index a49b09193ce..fd3aa77d614 100644 --- a/tests/bfd/test_bfd_traffic.py +++ b/tests/bfd/test_bfd_traffic.py @@ -4,8 +4,9 @@ from tests.bfd.bfd_base import BfdBase from tests.bfd.bfd_helpers import get_ptf_src_port, get_backend_interface_in_use_by_counter, \ - prepare_traffic_test_variables, get_random_bgp_neighbor_ip_of_asic, toggle_port_channel_or_member, \ - get_port_channel_by_member, wait_until_bfd_up, wait_until_given_bfd_down, assert_traffic_switching + get_random_bgp_neighbor_ip_of_asic, toggle_port_channel_or_member, get_port_channel_by_member, \ + wait_until_given_bfd_down, assert_traffic_switching, create_and_verify_bfd_state, verify_bfd_only +from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor pytestmark = [ pytest.mark.topology("t2"), @@ -18,27 +19,34 @@ class TestBfdTraffic(BfdBase): PACKET_COUNT = 10000 - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_remote_port_channel_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -78,15 +86,12 @@ def test_bfd_traffic_remote_port_channel_shutdown( src_bp_iface_before_shutdown, ) - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -121,29 +126,38 @@ def test_bfd_traffic_remote_port_channel_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_local_port_channel_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -183,15 +197,12 @@ def test_bfd_traffic_local_port_channel_shutdown( dst_bp_iface_before_shutdown, ) - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -226,29 +237,38 @@ def test_bfd_traffic_local_port_channel_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_remote_port_channel_member_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -288,15 +308,12 @@ def test_bfd_traffic_remote_port_channel_member_shutdown( if not src_port_channel_before_shutdown or not dst_port_channel_before_shutdown: pytest.fail("No port channel found with interface in use") - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -331,29 +348,38 @@ def test_bfd_traffic_remote_port_channel_member_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") - @pytest.mark.parametrize("version", ["ipv4", "ipv6"]) def test_bfd_traffic_local_port_channel_member_shutdown( self, request, tbinfo, ptfadapter, - get_src_dst_asic, + prepare_traffic_test_variables, bfd_cleanup_db, - version, ): - ( - dut, - src_asic, - src_asic_index, - dst_asic, - dst_asic_index, - src_asic_next_hops, - dst_asic_next_hops, - src_asic_router_mac, - backend_port_channels, - ) = prepare_traffic_test_variables(get_src_dst_asic, request, version) + dut = prepare_traffic_test_variables["dut"] + src_asic = prepare_traffic_test_variables["src_asic"] + src_asic_index = prepare_traffic_test_variables["src_asic_index"] + dst_asic = prepare_traffic_test_variables["dst_asic"] + dst_asic_index = prepare_traffic_test_variables["dst_asic_index"] + src_asic_next_hops = prepare_traffic_test_variables["src_asic_next_hops"] + dst_asic_next_hops = prepare_traffic_test_variables["dst_asic_next_hops"] + src_prefix = prepare_traffic_test_variables["src_prefix"] + dst_prefix = prepare_traffic_test_variables["dst_prefix"] + src_asic_router_mac = prepare_traffic_test_variables["src_asic_router_mac"] + backend_port_channels = prepare_traffic_test_variables["backend_port_channels"] + version = prepare_traffic_test_variables["version"] + src_dst_context = [ + ("src", src_asic, src_prefix, src_asic_next_hops), + ("dst", dst_asic, dst_prefix, dst_asic_next_hops), + ] + + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, prefix, next_hops in src_dst_context: + executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: @@ -393,15 +419,12 @@ def test_bfd_traffic_local_port_channel_member_shutdown( if not src_port_channel_before_shutdown or not dst_port_channel_before_shutdown: pytest.fail("No port channel found with interface in use") - wait_until_given_bfd_down( - src_asic_next_hops, - src_port_channel_before_shutdown, - src_asic_index, - dst_asic_next_hops, - dst_port_channel_before_shutdown, - dst_asic_index, - dut, - ) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for next_hops, port_channel, asic_index in [ + (src_asic_next_hops, dst_port_channel_before_shutdown, src_asic_index), + (dst_asic_next_hops, src_port_channel_before_shutdown, dst_asic_index), + ]: + executor.submit(wait_until_given_bfd_down, next_hops, port_channel, asic_index, dut) src_bp_iface_after_shutdown, dst_bp_iface_after_shutdown = get_backend_interface_in_use_by_counter( dut, @@ -436,4 +459,6 @@ def test_bfd_traffic_local_port_channel_member_shutdown( "startup", ) - wait_until_bfd_up(dut, src_asic_next_hops, src_asic, dst_asic_next_hops, dst_asic) + with SafeThreadPoolExecutor(max_workers=8) as executor: + for _, asic, _, next_hops in src_dst_context: + executor.submit(verify_bfd_only, dut, next_hops, asic, "Up") From f9d7b6f65001a13abc4a36397799a5c32d235e5c Mon Sep 17 00:00:00 2001 From: rbpittman Date: Thu, 7 Nov 2024 22:13:14 -0500 Subject: [PATCH 044/175] Cisco-8122 Increase buffer pool watermark test margin (#15450) * Increase pkt margin on buffer pool watermark test to 8. * Revise to be GR2 only. --- tests/qos/files/cisco/qos_param_generator.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/qos/files/cisco/qos_param_generator.py b/tests/qos/files/cisco/qos_param_generator.py index 3f109ee69e5..b38a30d33c1 100644 --- a/tests/qos/files/cisco/qos_param_generator.py +++ b/tests/qos/files/cisco/qos_param_generator.py @@ -462,6 +462,8 @@ def __define_buffer_pool_watermark(self): "pkts_num_trig_pfc": self.lossless_drop_thr // self.buffer_size // packet_buffs, "cell_size": self.buffer_size, "packet_size": packet_size} + if self.dutAsic == "gr2": + lossless_params["pkts_num_margin"] = 8 self.write_params("wm_buf_pool_lossless", lossless_params) if self.should_autogen(["wm_buf_pool_lossy"]): lossy_params = {"dscp": self.dscp_queue0, @@ -472,6 +474,8 @@ def __define_buffer_pool_watermark(self): "pkts_num_fill_egr_min": 0, "cell_size": self.buffer_size, "packet_size": packet_size} + if self.dutAsic == "gr2": + lossy_params["pkts_num_margin"] = 8 self.write_params("wm_buf_pool_lossy", lossy_params) def __define_q_shared_watermark(self): From 3e54c34c934cb4d4bb5d8682ddacbc8a37d23ff9 Mon Sep 17 00:00:00 2001 From: nissampa <99767762+nissampa@users.noreply.github.com> Date: Thu, 7 Nov 2024 20:51:19 -0800 Subject: [PATCH 045/175] added fixtures as argument to all the test cases (#15153) * added fixtures as argument to all the test cases * added smartswitch topo * changed the scope for fixtures * added the is_smartswitch field in ansible/testbed.yaml * changed scope of the fixture * changed back to function scope to resolve mismatch * removed fixtures as parameter after autouse * removed is_smartswitch field from non-smartswitch configs * changed topo name to smartswitch-t1 and added symbolic links * added num_dpu_modules as fixtures * added correct marker * non-smartswitch runs fixes * non-smartswitch runs fixes * non-smartswitch runs fixes * resolved set of PR comments * resolved flake8 error * modified the skip reason * minor change --- .../eos/templates/smartswitch-t1-spine.j2 | 1 + .../roles/eos/templates/smartswitch-t1-tor.j2 | 1 + ansible/testbed.csv | 32 +- ansible/vars/topo_smartswitch-t1.yml | 669 ++++++++++++++++++ tests/smartswitch/common/device_utils_dpu.py | 31 +- .../platform_tests/test_reload_dpu.py | 14 +- .../platform_tests/test_show_platform_dpu.py | 18 +- 7 files changed, 715 insertions(+), 51 deletions(-) create mode 120000 ansible/roles/eos/templates/smartswitch-t1-spine.j2 create mode 120000 ansible/roles/eos/templates/smartswitch-t1-tor.j2 create mode 100644 ansible/vars/topo_smartswitch-t1.yml diff --git a/ansible/roles/eos/templates/smartswitch-t1-spine.j2 b/ansible/roles/eos/templates/smartswitch-t1-spine.j2 new file mode 120000 index 00000000000..1029da8a546 --- /dev/null +++ b/ansible/roles/eos/templates/smartswitch-t1-spine.j2 @@ -0,0 +1 @@ +t1-28-lag-spine.j2 \ No newline at end of file diff --git a/ansible/roles/eos/templates/smartswitch-t1-tor.j2 b/ansible/roles/eos/templates/smartswitch-t1-tor.j2 new file mode 120000 index 00000000000..7e09b9a1dce --- /dev/null +++ b/ansible/roles/eos/templates/smartswitch-t1-tor.j2 @@ -0,0 +1 @@ +t1-28-lag-tor.j2 \ No newline at end of file diff --git a/ansible/testbed.csv b/ansible/testbed.csv index 3892122bbbe..a91a8284a30 100644 --- a/ansible/testbed.csv +++ b/ansible/testbed.csv @@ -1,16 +1,16 @@ -# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment -ptf1-m,ptf1,ptf32,docker-ptf,ptf_ptf1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Test ptf Mellanox -ptf2-b,ptf2,ptf64,docker-ptf,ptf_ptf2,10.255.0.189/24,,server_1,,lab-s6100-01,lab,False,Test ptf Broadcom -vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf_vms1-1,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms -vms-sn2700-t1-lag,vms1-2,t1-lag,docker-ptf,ptf_vms1-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms -vms-sn2700-t0,vms1-3,t0,docker-ptf,ptf_vms1-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms -vms-s6000-t0,vms2-1,t0,docker-ptf,ptf_vms2-1,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,lab,True,Tests Dell S6000 vms -vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf_vms3-1,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,lab,True,Tests Arista A7260 vms -vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf_vms4-1,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms -vms-s6100-t1,vms4-2,t1-64,docker-ptf,ptf_vms4-2,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms -vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf_vms5-1,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,lab,True,ests Dell S6100 vms -vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf_vms1-duts,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],lab,True,Example Multi DUTs testbed -vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,lab,True,superman -ixanvl-vs-conf,anvl,ptf32,docker-ptf-anvl,ptf_anvl,10.250.0.100/24,,server_1,,vlab-01,lab,True,Test ptf ANVL SONIC VM -vms-snappi-sonic,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,sonic-s6100-dut1,snappi-sonic,True,Batman -vms-snappi-sonic-multidut,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,[sonic-s6100-dut1;sonic-s6100-dut2],snappi-sonic,True,Batman +# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,is_smartswitch,comment +ptf1-m,ptf1,ptf32,docker-ptf,ptf_ptf1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,,Test ptf Mellanox +ptf2-b,ptf2,ptf64,docker-ptf,ptf_ptf2,10.255.0.189/24,,server_1,,lab-s6100-01,lab,False,,Test ptf Broadcom +vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf_vms1-1,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,,Tests Mellanox SN2700 vms +vms-sn2700-t1-lag,vms1-2,t1-lag,docker-ptf,ptf_vms1-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,,Tests Mellanox SN2700 vms +vms-sn2700-t0,vms1-3,t0,docker-ptf,ptf_vms1-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,,Tests Mellanox SN2700 vms +vms-s6000-t0,vms2-1,t0,docker-ptf,ptf_vms2-1,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,lab,True,,Tests Dell S6000 vms +vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf_vms3-1,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,lab,True,,Tests Arista A7260 vms +vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf_vms4-1,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,lab,True,,Tests Dell S6100 vms +vms-s6100-t1,vms4-2,t1-64,docker-ptf,ptf_vms4-2,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,lab,True,,Tests Dell S6100 vms +vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf_vms5-1,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,lab,True,,ests Dell S6100 vms +vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf_vms1-duts,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],lab,True,,Example Multi DUTs testbed +vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,lab,True,,superman +ixanvl-vs-conf,anvl,ptf32,docker-ptf-anvl,ptf_anvl,10.250.0.100/24,,server_1,,vlab-01,lab,True,,Test ptf ANVL SONIC VM +vms-snappi-sonic,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,sonic-s6100-dut1,snappi-sonic,True,,Batman +vms-snappi-sonic-multidut,vms6-1,ptf64,docker-ptf-snappi,snappi-sonic-ptf,10.251.0.232,,Server_6,,[sonic-s6100-dut1;sonic-s6100-dut2],snappi-sonic,True,,Batman diff --git a/ansible/vars/topo_smartswitch-t1.yml b/ansible/vars/topo_smartswitch-t1.yml new file mode 100644 index 00000000000..c17de99876d --- /dev/null +++ b/ansible/vars/topo_smartswitch-t1.yml @@ -0,0 +1,669 @@ +topology: + VMs: + ARISTA01T2: + vlans: + - 0 + - 1 + vm_offset: 0 + ARISTA03T2: + vlans: + - 2 + - 3 + vm_offset: 1 + ARISTA05T2: + vlans: + - 4 + - 5 + vm_offset: 2 + ARISTA07T2: + vlans: + - 6 + - 7 + vm_offset: 3 + ARISTA01T0: + vlans: + - 8 + vm_offset: 4 + ARISTA02T0: + vlans: + - 9 + vm_offset: 5 + ARISTA03T0: + vlans: + - 10 + vm_offset: 6 + ARISTA04T0: + vlans: + - 11 + vm_offset: 7 + ARISTA05T0: + vlans: + - 12 + vm_offset: 8 + ARISTA06T0: + vlans: + - 13 + vm_offset: 9 + ARISTA07T0: + vlans: + - 14 + vm_offset: 10 + ARISTA08T0: + vlans: + - 15 + vm_offset: 11 + ARISTA09T0: + vlans: + - 16 + vm_offset: 12 + ARISTA10T0: + vlans: + - 17 + vm_offset: 13 + ARISTA11T0: + vlans: + - 18 + vm_offset: 14 + ARISTA12T0: + vlans: + - 19 + vm_offset: 15 + ARISTA13T0: + vlans: + - 20 + vm_offset: 16 + ARISTA14T0: + vlans: + - 21 + vm_offset: 17 + ARISTA15T0: + vlans: + - 22 + vm_offset: 18 + ARISTA16T0: + vlans: + - 23 + vm_offset: 19 + ARISTA17T0: + vlans: + - 24 + vm_offset: 20 + ARISTA18T0: + vlans: + - 25 + vm_offset: 21 + ARISTA19T0: + vlans: + - 26 + vm_offset: 22 + ARISTA20T0: + vlans: + - 27 + vm_offset: 23 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: LeafRouter + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 20 + tor_subnet_number: 2 + max_tor_subnet_number: 20 + tor_subnet_size: 128 + spine: + swrole: spine + tor: + swrole: tor + +configuration: + ARISTA01T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.0 + - FC00::1 + interfaces: + Loopback0: + ipv4: 100.1.0.1/32 + ipv6: 2064:100::1/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.1/31 + ipv6: fc00::2/126 + bp_interface: + ipv4: 10.10.246.1/24 + ipv6: fc0a::2/64 + + ARISTA03T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.4 + - FC00::9 + interfaces: + Loopback0: + ipv4: 100.1.0.3/32 + ipv6: 2064:100::3/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.5/31 + ipv6: fc00::a/126 + bp_interface: + ipv4: 10.10.246.3/24 + ipv6: fc0a::6/64 + + ARISTA05T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.8 + - FC00::11 + interfaces: + Loopback0: + ipv4: 100.1.0.5/32 + ipv6: 2064:100::5/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.9/31 + ipv6: fc00::12/126 + bp_interface: + ipv4: 10.10.246.5/24 + ipv6: fc0a::a/64 + + ARISTA07T2: + properties: + - common + - spine + bgp: + asn: 65200 + peers: + 65100: + - 10.0.0.12 + - FC00::19 + interfaces: + Loopback0: + ipv4: 100.1.0.7/32 + ipv6: 2064:100::7/128 + Ethernet1: + lacp: 1 + Ethernet2: + lacp: 1 + Port-Channel1: + ipv4: 10.0.0.13/31 + ipv6: fc00::1a/126 + bp_interface: + ipv4: 10.10.246.7/24 + ipv6: fc0a::e/64 + + ARISTA01T0: + properties: + - common + - tor + tornum: 1 + bgp: + asn: 64001 + peers: + 65100: + - 10.0.0.32 + - FC00::41 + interfaces: + Loopback0: + ipv4: 100.1.0.17/32 + ipv6: 2064:100::11/128 + Ethernet1: + ipv4: 10.0.0.33/31 + ipv6: fc00::42/126 + bp_interface: + ipv4: 10.10.246.17/24 + ipv6: fc0a::22/64 + vips: + ipv4: + prefixes: + - 200.0.1.0/26 + asn: 64700 + + ARISTA02T0: + properties: + - common + - tor + tornum: 2 + bgp: + asn: 64002 + peers: + 65100: + - 10.0.0.34 + - FC00::45 + interfaces: + Loopback0: + ipv4: 100.1.0.18/32 + ipv6: 2064:100::12/128 + Ethernet1: + ipv4: 10.0.0.35/31 + ipv6: fc00::46/126 + bp_interface: + ipv4: 10.10.246.18/24 + ipv6: fc0a::25/64 + + ARISTA03T0: + properties: + - common + - tor + tornum: 3 + bgp: + asn: 64003 + peers: + 65100: + - 10.0.0.36 + - FC00::49 + interfaces: + Loopback0: + ipv4: 100.1.0.19/32 + ipv6: 2064:100::13/128 + Ethernet1: + ipv4: 10.0.0.37/31 + ipv6: fc00::4a/126 + bp_interface: + ipv4: 10.10.246.19/24 + ipv6: fc0a::26/64 + vips: + ipv4: + prefixes: + - 200.0.1.0/26 + asn: 64700 + + ARISTA04T0: + properties: + - common + - tor + tornum: 4 + bgp: + asn: 64004 + peers: + 65100: + - 10.0.0.38 + - FC00::4D + interfaces: + Loopback0: + ipv4: 100.1.0.20/32 + ipv6: 2064:100::14/128 + Ethernet1: + ipv4: 10.0.0.39/31 + ipv6: fc00::4e/126 + bp_interface: + ipv4: 10.10.246.20/24 + ipv6: fc0a::29/64 + + ARISTA05T0: + properties: + - common + - tor + tornum: 5 + bgp: + asn: 64005 + peers: + 65100: + - 10.0.0.40 + - FC00::51 + interfaces: + Loopback0: + ipv4: 100.1.0.21/32 + ipv6: 2064:100::15/128 + Ethernet1: + ipv4: 10.0.0.41/31 + ipv6: fc00::52/126 + bp_interface: + ipv4: 10.10.246.21/24 + ipv6: fc0a::2a/64 + + ARISTA06T0: + properties: + - common + - tor + tornum: 6 + bgp: + asn: 64006 + peers: + 65100: + - 10.0.0.42 + - FC00::55 + interfaces: + Loopback0: + ipv4: 100.1.0.22/32 + ipv6: 2064:100::16/128 + Ethernet1: + ipv4: 10.0.0.43/31 + ipv6: fc00::56/126 + bp_interface: + ipv4: 10.10.246.22/24 + ipv6: fc0a::2d/64 + + ARISTA07T0: + properties: + - common + - tor + tornum: 7 + bgp: + asn: 64007 + peers: + 65100: + - 10.0.0.44 + - FC00::59 + interfaces: + Loopback0: + ipv4: 100.1.0.23/32 + ipv6: 2064:100::17/128 + Ethernet1: + ipv4: 10.0.0.45/31 + ipv6: fc00::5a/126 + bp_interface: + ipv4: 10.10.246.23/24 + ipv6: fc0a::2e/64 + + ARISTA08T0: + properties: + - common + - tor + tornum: 8 + bgp: + asn: 64008 + peers: + 65100: + - 10.0.0.46 + - FC00::5D + interfaces: + Loopback0: + ipv4: 100.1.0.24/32 + ipv6: 2064:100::18/128 + Ethernet1: + ipv4: 10.0.0.47/31 + ipv6: fc00::5e/126 + bp_interface: + ipv4: 10.10.246.24/24 + ipv6: fc0a::31/64 + + ARISTA09T0: + properties: + - common + - tor + tornum: 9 + bgp: + asn: 64009 + peers: + 65100: + - 10.0.0.48 + - FC00::61 + interfaces: + Loopback0: + ipv4: 100.1.0.25/32 + ipv6: 2064:100::19/128 + Ethernet1: + ipv4: 10.0.0.49/31 + ipv6: fc00::62/126 + bp_interface: + ipv4: 10.10.246.25/24 + ipv6: fc0a::32/64 + + ARISTA10T0: + properties: + - common + - tor + tornum: 10 + bgp: + asn: 64010 + peers: + 65100: + - 10.0.0.50 + - FC00::65 + interfaces: + Loopback0: + ipv4: 100.1.0.26/32 + ipv6: 2064:100::1a/128 + Ethernet1: + ipv4: 10.0.0.51/31 + ipv6: fc00::66/126 + bp_interface: + ipv4: 10.10.246.26/24 + ipv6: fc0a::35/64 + + ARISTA11T0: + properties: + - common + - tor + tornum: 11 + bgp: + asn: 64011 + peers: + 65100: + - 10.0.0.52 + - FC00::69 + interfaces: + Loopback0: + ipv4: 100.1.0.27/32 + ipv6: 2064:100::1b/128 + Ethernet1: + ipv4: 10.0.0.53/31 + ipv6: fc00::6a/126 + bp_interface: + ipv4: 10.10.246.27/24 + ipv6: fc0a::36/64 + + ARISTA12T0: + properties: + - common + - tor + tornum: 12 + bgp: + asn: 64012 + peers: + 65100: + - 10.0.0.54 + - FC00::6D + interfaces: + Loopback0: + ipv4: 100.1.0.28/32 + ipv6: 2064:100::1c/128 + Ethernet1: + ipv4: 10.0.0.55/31 + ipv6: fc00::6e/126 + bp_interface: + ipv4: 10.10.246.28/24 + ipv6: fc0a::39/64 + + ARISTA13T0: + properties: + - common + - tor + tornum: 13 + bgp: + asn: 64013 + peers: + 65100: + - 10.0.0.56 + - FC00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100::1d/128 + Ethernet1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interface: + ipv4: 10.10.246.29/24 + ipv6: fc0a::3a/64 + + ARISTA14T0: + properties: + - common + - tor + tornum: 14 + bgp: + asn: 64014 + peers: + 65100: + - 10.0.0.58 + - FC00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100::1e/128 + Ethernet1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interface: + ipv4: 10.10.246.30/24 + ipv6: fc0a::3d/64 + + ARISTA15T0: + properties: + - common + - tor + tornum: 15 + bgp: + asn: 64015 + peers: + 65100: + - 10.0.0.60 + - FC00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100::1f/128 + Ethernet1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interface: + ipv4: 10.10.246.31/24 + ipv6: fc0a::3e/64 + + ARISTA16T0: + properties: + - common + - tor + tornum: 16 + bgp: + asn: 64016 + peers: + 65100: + - 10.0.0.62 + - FC00::7D + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100::20/128 + Ethernet1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + bp_interface: + ipv4: 10.10.246.32/24 + ipv6: fc0a::41/64 + + ARISTA17T0: + properties: + - common + - tor + tornum: 17 + bgp: + asn: 64017 + peers: + 65100: + - 10.0.0.64 + - FC00::81 + interfaces: + Loopback0: + ipv4: 100.1.0.33/32 + ipv6: 2064:100::21/128 + Ethernet1: + ipv4: 10.0.0.65/31 + ipv6: fc00::82/126 + bp_interface: + ipv4: 10.10.246.33/24 + ipv6: fc0a::42/64 + + ARISTA18T0: + properties: + - common + - tor + tornum: 18 + bgp: + asn: 64018 + peers: + 65100: + - 10.0.0.66 + - FC00::85 + interfaces: + Loopback0: + ipv4: 100.1.0.34/32 + ipv6: 2064:100::22/128 + Ethernet1: + ipv4: 10.0.0.67/31 + ipv6: fc00::86/126 + bp_interface: + ipv4: 10.10.246.34/24 + ipv6: fc0a::45/64 + + ARISTA19T0: + properties: + - common + - tor + tornum: 19 + bgp: + asn: 64019 + peers: + 65100: + - 10.0.0.68 + - FC00::89 + interfaces: + Loopback0: + ipv4: 100.1.0.35/32 + ipv6: 2064:100::23/128 + Ethernet1: + ipv4: 10.0.0.69/31 + ipv6: fc00::8a/126 + bp_interface: + ipv4: 10.10.246.35/24 + ipv6: fc0a::46/64 + + ARISTA20T0: + properties: + - common + - tor + tornum: 20 + bgp: + asn: 64020 + peers: + 65100: + - 10.0.0.70 + - FC00::8D + interfaces: + Loopback0: + ipv4: 100.1.0.36/32 + ipv6: 2064:100::24/128 + Ethernet1: + ipv4: 10.0.0.71/31 + ipv6: fc00::8e/126 + bp_interface: + ipv4: 10.10.246.36/24 + ipv6: fc0a::49/64 diff --git a/tests/smartswitch/common/device_utils_dpu.py b/tests/smartswitch/common/device_utils_dpu.py index 1367eff53f5..9b80882dd66 100644 --- a/tests/smartswitch/common/device_utils_dpu.py +++ b/tests/smartswitch/common/device_utils_dpu.py @@ -8,7 +8,6 @@ from tests.common.helpers.platform_api import chassis, module from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert -from pkg_resources import parse_version @pytest.fixture(scope='function') @@ -23,10 +22,10 @@ def num_dpu_modules(platform_api_conn): return num_modules -@pytest.fixture(scope='function') +@pytest.fixture(scope='function', autouse=True) def check_smartswitch_and_dark_mode(duthosts, enum_rand_one_per_hwsku_hostname, - platform_api_conn): + platform_api_conn, num_dpu_modules): """ Checks whether given testbed is running 202405 image or below versions @@ -38,17 +37,17 @@ def check_smartswitch_and_dark_mode(duthosts, duthost = duthosts[enum_rand_one_per_hwsku_hostname] - if not duthost.facts["DPUS"] and \ - parse_version(duthost.os_version) <= parse_version("202405"): - pytest.skip("Test is not supported for this testbed and os version") + if "DPUS" not in duthost.facts: + pytest.skip("Test is not supported for this testbed") - darkmode = is_dark_mode_enabled(duthost, platform_api_conn) + darkmode = is_dark_mode_enabled(duthost, platform_api_conn, + num_dpu_modules) if darkmode: - dpu_power_on(duthost, platform_api_conn) + dpu_power_on(duthost, platform_api_conn, num_dpu_modules) -def is_dark_mode_enabled(duthost, platform_api_conn): +def is_dark_mode_enabled(duthost, platform_api_conn, num_dpu_modules): """ Checks the liveliness of DPU Returns: @@ -56,10 +55,9 @@ def is_dark_mode_enabled(duthost, platform_api_conn): else False """ - num_modules = num_dpu_modules(platform_api_conn) count_admin_down = 0 - for index in range(num_modules): + for index in range(num_dpu_modules): dpu = module.get_name(platform_api_conn, index) output_config_db = duthost.command( 'redis-cli -p 6379 -h 127.0.0.1 \ @@ -70,7 +68,7 @@ def is_dark_mode_enabled(duthost, platform_api_conn): if 'down' in output_config_db['stdout']: count_admin_down += 1 - if count_admin_down == num_modules: + if count_admin_down == num_dpu_modules: logging.info("Smartswitch is in dark mode") return True @@ -78,17 +76,16 @@ def is_dark_mode_enabled(duthost, platform_api_conn): return False -def dpu_power_on(duthost, platform_api_conn): +def dpu_power_on(duthost, platform_api_conn, num_dpu_modules): """ Executes power on all DPUs Returns: Returns True or False based on all DPUs powered on or not """ - num_modules = num_dpu_modules(platform_api_conn) ip_address_list = [] - for index in range(num_modules): + for index in range(num_dpu_modules): dpu = module.get_name(platform_api_conn, index) ip_address_list.append( module.get_midplane_ip(platform_api_conn, index)) @@ -129,7 +126,7 @@ def check_dpu_module_status(duthost, power_status, dpu_name): Returns True or False based on status of given DPU module """ - output_dpu_status = duthost.command( + output_dpu_status = duthost.shell( 'show chassis module status | grep %s' % (dpu_name)) if "Offline" in output_dpu_status["stdout"]: @@ -158,7 +155,7 @@ def check_dpu_reboot_cause(duthost, dpu_name): Returns True or False based on reboot cause of all DPU modules """ - output_reboot_cause = duthost.command( + output_reboot_cause = duthost.shell( 'show reboot-cause all | grep %s' % (dpu_name)) if 'Unknown' in output_reboot_cause["stdout"]: diff --git a/tests/smartswitch/platform_tests/test_reload_dpu.py b/tests/smartswitch/platform_tests/test_reload_dpu.py index 4c4d7b61d20..1e8e7518f33 100644 --- a/tests/smartswitch/platform_tests/test_reload_dpu.py +++ b/tests/smartswitch/platform_tests/test_reload_dpu.py @@ -17,18 +17,18 @@ from tests.platform_tests.api.conftest import * # noqa: F401,F403 pytestmark = [ - pytest.mark.topology('t1') + pytest.mark.topology('smartswitch') ] def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn, num_dpu_modules): + localhost, platform_api_conn, + num_dpu_modules): """ @summary: Verify output of `config chassis modules startup ` """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] ip_address_list = [] - num_modules = num_dpu_modules(platform_api_conn) logging.info("Starting switch reboot...") reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD, @@ -39,7 +39,7 @@ def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, "Not all ports that are admin up on are operationally up") logging.info("Interfaces are up") - for index in range(num_modules): + for index in range(num_dpu_modules): ip_address_list.append( module.get_midplane_ip(platform_api_conn, index)) dpu = module.get_name(platform_api_conn, index) @@ -52,16 +52,16 @@ def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, def test_show_ping_int_after_reload(duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn, num_dpu_modules): + localhost, platform_api_conn, + num_dpu_modules): """ @summary: To Check Ping between NPU and DPU after configuration reload on NPU """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] - num_modules = num_dpu_modules(platform_api_conn) ip_address_list = [] - for index in range(num_modules): + for index in range(num_dpu_modules): ip_address_list.append( module.get_midplane_ip(platform_api_conn, index)) diff --git a/tests/smartswitch/platform_tests/test_show_platform_dpu.py b/tests/smartswitch/platform_tests/test_show_platform_dpu.py index 9c575a47880..5049975b67d 100644 --- a/tests/smartswitch/platform_tests/test_show_platform_dpu.py +++ b/tests/smartswitch/platform_tests/test_show_platform_dpu.py @@ -12,7 +12,7 @@ from tests.common.devices.sonic import * # noqa: 403 pytestmark = [ - pytest.mark.topology('t1') + pytest.mark.topology('smartswitch') ] @@ -44,9 +44,8 @@ def test_shutdown_power_up_dpu(duthosts, enum_rand_one_per_hwsku_hostname, @summary: Verify `shut down and power up DPU` """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] - num_modules = num_dpu_modules(platform_api_conn) - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules shutdown %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -54,7 +53,7 @@ def test_shutdown_power_up_dpu(duthosts, enum_rand_one_per_hwsku_hostname, duthost, "off", dpu_name), "DPU is not operationally down") - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules startup %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -69,9 +68,8 @@ def test_reboot_cause(duthosts, enum_rand_one_per_hwsku_hostname, @summary: Verify `Reboot Cause` """ duthost = duthosts[enum_rand_one_per_hwsku_hostname] - num_modules = num_dpu_modules(platform_api_conn) - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthost.shell("config chassis \ module shutdown %s" % (dpu_name))["stdout_lines"] @@ -80,7 +78,7 @@ def test_reboot_cause(duthosts, enum_rand_one_per_hwsku_hostname, duthost, "off", dpu_name), "DPU is not operationally down") - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules startup %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -105,9 +103,7 @@ def test_pcie_link(duthosts, enum_rand_one_per_hwsku_hostname, 'PCIe Device Checking All Test ----------->>> PASSED', "PCIe Link is good'{}'".format(duthost.hostname)) - num_modules = num_dpu_modules(platform_api_conn) - - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules shutdown %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, @@ -120,7 +116,7 @@ def test_pcie_link(duthosts, enum_rand_one_per_hwsku_hostname, 'PCIe Device Checking All Test ----------->>> PASSED', "PCIe Link is good'{}'".format(duthost.hostname)) - for index in range(num_modules): + for index in range(num_dpu_modules): dpu_name = module.get_name(platform_api_conn, index) duthosts.shell("config chassis modules startup %s" % (dpu_name)) pytest_assert(wait_until(180, 60, 0, From e159098bfd3abc1028d1cdc4cf5342721ccc6096 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 17:12:57 +0800 Subject: [PATCH 046/175] Remove skip_traffic_test fixture in vxlan tests (#15459) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in vxlan tests How did you verify/test it? --- tests/vxlan/test_vxlan_bfd_tsa.py | 37 +++--- tests/vxlan/test_vxlan_decap.py | 6 +- tests/vxlan/test_vxlan_ecmp.py | 154 +++++++++------------- tests/vxlan/test_vxlan_ecmp_switchover.py | 41 +++--- 4 files changed, 100 insertions(+), 138 deletions(-) diff --git a/tests/vxlan/test_vxlan_bfd_tsa.py b/tests/vxlan/test_vxlan_bfd_tsa.py index 1538efd891f..b2a4ac12909 100644 --- a/tests/vxlan/test_vxlan_bfd_tsa.py +++ b/tests/vxlan/test_vxlan_bfd_tsa.py @@ -14,7 +14,6 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.vxlan_ecmp_utils import Ecmp_Utils from tests.common.config_reload import config_system_checks_passed @@ -238,8 +237,7 @@ def dump_self_info_and_run_ptf(self, random_sport=False, random_src_ip=False, tolerance=None, - payload=None, - skip_traffic_test=False): # noqa F811 + payload=None): ''' Just a wrapper for dump_info_to_ptf to avoid entering 30 lines everytime. @@ -291,9 +289,6 @@ def dump_self_info_and_run_ptf(self, Logger.info( "dest->nh mapping:%s", self.vxlan_test_setup[encap_type]['dest_to_nh_map']) - if skip_traffic_test is True: - Logger.info("Skipping traffic test.") - return ptf_runner(self.vxlan_test_setup['ptfhost'], "ptftests", "vxlan_traffic.VxLAN_in_VxLAN" if payload == 'vxlan' @@ -411,7 +406,7 @@ def verfiy_bfd_down(self, ep_list): return False return True - def test_tsa_case1(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case1(self, setUp, encap_type): ''' tc1: This test checks the basic TSA removal of BFD sessions. 1) Create Vnet route with 4 endpoints and BFD monitors. @@ -428,7 +423,7 @@ def test_tsa_case1(self, setUp, encap_type, skip_traffic_test): # noqa F811 dest, ep_list = self.create_vnet_route(encap_type) - self.dump_self_info_and_run_ptf("test1", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True, []) self.apply_tsa() pytest_assert(self.in_maintainence()) @@ -437,11 +432,11 @@ def test_tsa_case1(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test1b", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1b", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case2(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case2(self, setUp, encap_type): ''' tc2: This test checks the basic route application while in TSA. 1) apply TSA. @@ -464,11 +459,11 @@ def test_tsa_case2(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test2", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case3(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case3(self, setUp, encap_type): ''' tc3: This test checks for lasting impact of TSA and TSB. 1) apply TSA. @@ -491,11 +486,11 @@ def test_tsa_case3(self, setUp, encap_type, skip_traffic_test): # noqa F811 dest, ep_list = self.create_vnet_route(encap_type) - self.dump_self_info_and_run_ptf("test3", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test3", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case4(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case4(self, setUp, encap_type): ''' tc4: This test checks basic Vnet route state retention during config reload. 1) Create Vnet route with 4 endpoints and BFD monitors. @@ -514,7 +509,7 @@ def test_tsa_case4(self, setUp, encap_type, skip_traffic_test): # noqa F811 duthost.shell("sudo config save -y", executable="/bin/bash", module_ignore_errors=True) - self.dump_self_info_and_run_ptf("test4", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test4", encap_type, True, []) duthost.shell("sudo config reload -y", executable="/bin/bash", module_ignore_errors=True) @@ -524,11 +519,11 @@ def test_tsa_case4(self, setUp, encap_type, skip_traffic_test): # noqa F811 ecmp_utils.configure_vxlan_switch(duthost, vxlan_port=4789, dutmac=self.vxlan_test_setup['dut_mac']) dest, ep_list = self.create_vnet_route(encap_type) - self.dump_self_info_and_run_ptf("test4b", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test4b", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case5(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case5(self, setUp, encap_type): ''' tc4: This test checks TSA state retention w.r.t BFD accross config reload. 1) Create Vnet route with 4 endpoints and BFD monitors. @@ -552,7 +547,7 @@ def test_tsa_case5(self, setUp, encap_type, skip_traffic_test): # noqa F811 duthost.shell("sudo config save -y", executable="/bin/bash", module_ignore_errors=True) - self.dump_self_info_and_run_ptf("test5", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test5", encap_type, True, []) self.apply_tsa() pytest_assert(self.in_maintainence()) @@ -569,11 +564,11 @@ def test_tsa_case5(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test5b", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test5b", encap_type, True, []) self.delete_vnet_route(encap_type, dest) - def test_tsa_case6(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_tsa_case6(self, setUp, encap_type): ''' tc6: This test checks that the BFD doesnt come up while device is in TSA and remains down accross config reload. @@ -615,6 +610,6 @@ def test_tsa_case6(self, setUp, encap_type, skip_traffic_test): # noqa F811 self.apply_tsb() pytest_assert(not self.in_maintainence()) - self.dump_self_info_and_run_ptf("test6", encap_type, True, [], skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test6", encap_type, True, []) self.delete_vnet_route(encap_type, dest) diff --git a/tests/vxlan/test_vxlan_decap.py b/tests/vxlan/test_vxlan_decap.py index b6761ac25fd..5ba9d2c1237 100644 --- a/tests/vxlan/test_vxlan_decap.py +++ b/tests/vxlan/test_vxlan_decap.py @@ -14,7 +14,6 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py # noqa F401 from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.dualtor.mux_simulator_control import mux_server_url,\ toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -185,7 +184,7 @@ def vxlan_status(setup, request, duthosts, rand_one_dut_hostname): def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, tbinfo, - ptfhost, creds, toggle_all_simulator_ports_to_rand_selected_tor_m, skip_traffic_test): # noqa F811 + ptfhost, creds, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 duthost = duthosts[rand_one_dut_hostname] sonic_admin_alt_password = duthost.host.options['variable_manager'].\ @@ -199,9 +198,6 @@ def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, tbinf log_file = "/tmp/vxlan-decap.Vxlan.{}.{}.log".format( scenario, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')) - if skip_traffic_test is True: - logger.info("Skip traffic test") - return ptf_runner(ptfhost, "ptftests", "vxlan-decap.Vxlan", diff --git a/tests/vxlan/test_vxlan_ecmp.py b/tests/vxlan/test_vxlan_ecmp.py index 028e856fa2f..a9a419065ac 100644 --- a/tests/vxlan/test_vxlan_ecmp.py +++ b/tests/vxlan/test_vxlan_ecmp.py @@ -60,7 +60,6 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import wait_until from tests.ptf_runner import ptf_runner from tests.common.vxlan_ecmp_utils import Ecmp_Utils @@ -394,8 +393,7 @@ def dump_self_info_and_run_ptf(self, random_sport=False, random_src_ip=False, tolerance=None, - payload=None, - skip_traffic_test=False): # noqa F811 + payload=None): ''' Just a wrapper for dump_info_to_ptf to avoid entering 30 lines everytime. @@ -450,9 +448,6 @@ def dump_self_info_and_run_ptf(self, Logger.info( "dest->nh mapping:%s", self.vxlan_test_setup[encap_type]['dest_to_nh_map']) - if skip_traffic_test is True: - Logger.info("Skipping traffic test.") - return ptf_runner(self.vxlan_test_setup['ptfhost'], "ptftests", "vxlan_traffic.VxLAN_in_VxLAN" if payload == 'vxlan' @@ -510,18 +505,16 @@ class Test_VxLAN_route_tests(Test_VxLAN): Common class for the basic route test cases. ''' - def test_vxlan_single_endpoint(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_single_endpoint(self, setUp, encap_type): ''' tc1:Create a tunnel route to a single endpoint a. Send packets to the route prefix dst. ''' self.vxlan_test_setup = setUp - self.dump_self_info_and_run_ptf("tc1", encap_type, True, skip_traffic_test=skip_traffic_test) - self.dump_self_info_and_run_ptf("tc1", encap_type, True, - payload="vxlan", skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc1", encap_type, True) + self.dump_self_info_and_run_ptf("tc1", encap_type, True, payload="vxlan") - def test_vxlan_modify_route_different_endpoint( - self, setUp, request, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_modify_route_different_endpoint(self, setUp, request, encap_type): ''' tc2: change the route to different endpoint. Packets are received only at endpoint b.") @@ -571,9 +564,9 @@ def test_vxlan_modify_route_different_endpoint( Logger.info( "Copy the new set of configs to the PTF and run the tests.") - self.dump_self_info_and_run_ptf("tc2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc2", encap_type, True) - def test_vxlan_remove_all_route(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_all_route(self, setUp, encap_type): ''' tc3: remove the tunnel route. Send packets to the route prefix dst. packets should not @@ -588,7 +581,7 @@ def test_vxlan_remove_all_route(self, setUp, encap_type, skip_traffic_test): ecmp_utils.get_payload_version(encap_type), "DEL") Logger.info("Verify that the traffic is not coming back.") - self.dump_self_info_and_run_ptf("tc3", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc3", encap_type, False) finally: Logger.info("Restore the routes in the DUT.") ecmp_utils.set_routes_in_dut( @@ -605,7 +598,7 @@ class Test_VxLAN_ecmp_create(Test_VxLAN): create testcases. ''' - def test_vxlan_configure_route1_ecmp_group_a(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_configure_route1_ecmp_group_a(self, setUp, encap_type): ''' tc4:create tunnel route 1 with two endpoints a = {a1, a2...}. send packets to the route 1's prefix dst. packets are received at either @@ -646,12 +639,12 @@ def test_vxlan_configure_route1_ecmp_group_a(self, setUp, encap_type, skip_traff Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc4", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc4", encap_type, True) # Add vxlan payload testing as well. self.dump_self_info_and_run_ptf("tc4", encap_type, True, - payload="vxlan", skip_traffic_test=skip_traffic_test) + payload="vxlan") - def test_vxlan_remove_ecmp_route1(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_ecmp_route1(self, setUp, encap_type): ''' Remove tunnel route 1. Send multiple packets (varying tuple) to the route 1's prefix dst. @@ -695,7 +688,7 @@ def test_vxlan_remove_ecmp_route1(self, setUp, encap_type, skip_traffic_test): ecmp_route1_end_point_list) Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc5", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, True) # Deleting Tunnel route 1 ecmp_utils.create_and_apply_config( @@ -710,13 +703,13 @@ def test_vxlan_remove_ecmp_route1(self, setUp, encap_type, skip_traffic_test): {ecmp_route1_new_dest: ecmp_route1_end_point_list} Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc5", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, False) # Restoring dest_to_nh_map to old values self.vxlan_test_setup[encap_type]['dest_to_nh_map'][vnet] = copy.deepcopy(backup_dest) - self.dump_self_info_and_run_ptf("tc5", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, True) - def test_vxlan_configure_route1_ecmp_group_b(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_configure_route1_ecmp_group_b(self, setUp, encap_type): ''' tc5: set tunnel route 2 to endpoint group a = {a1, a2}. send packets to route 2"s prefix dst. packets are received at either a1 @@ -725,7 +718,7 @@ def test_vxlan_configure_route1_ecmp_group_b(self, setUp, encap_type, skip_traff self.vxlan_test_setup = setUp self.setup_route2_ecmp_group_b(encap_type) Logger.info("Verify the configs work and traffic flows correctly.") - self.dump_self_info_and_run_ptf("tc5", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc5", encap_type, True) def setup_route2_ecmp_group_b(self, encap_type): ''' @@ -767,7 +760,7 @@ def setup_route2_ecmp_group_b(self, encap_type): self.vxlan_test_setup[encap_type]['tc5_dest'] = tc5_new_dest - def test_vxlan_configure_route2_ecmp_group_b(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_configure_route2_ecmp_group_b(self, setUp, encap_type): ''' tc6: set tunnel route 2 to endpoint group b = {b1, b2}. send packets to route 2"s prefix dst. packets are received at either @@ -809,13 +802,12 @@ def test_vxlan_configure_route2_ecmp_group_b(self, setUp, encap_type, skip_traff tc6_end_point_list) Logger.info("Verify that the traffic works.") - self.dump_self_info_and_run_ptf("tc6", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc6", encap_type, True) @pytest.mark.skipif( "config.option.bfd is False", reason="This test will be run only if '--bfd=True' is provided.") - def test_vxlan_bfd_health_state_change_a2down_a1up( - self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_bfd_health_state_change_a2down_a1up(self, setUp, encap_type): ''' Set BFD state for a1' to UP and a2' to Down. Send multiple packets (varying tuple) to the route 1's prefix dst. Packets are received @@ -863,12 +855,12 @@ def test_vxlan_bfd_health_state_change_a2down_a1up( end_point_list[1]) Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc_a2down_a1up", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc_a2down_a1up", encap_type, True) @pytest.mark.skipif( "config.option.bfd is False", reason="This test will be run only if '--bfd=True' is provided.") - def test_vxlan_bfd_health_state_change_a1a2_down(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_bfd_health_state_change_a1a2_down(self, setUp, encap_type): ''' Set BFD state for a1' to Down and a2' to Down. Send multiple packets (varying tuple) to the route 1's prefix dst. Packets @@ -915,14 +907,13 @@ def test_vxlan_bfd_health_state_change_a1a2_down(self, setUp, encap_type, skip_t "a1a2_down", encap_type, True, - packet_count=4, - skip_traffic_test=skip_traffic_test) + packet_count=4) @pytest.mark.skipif( "config.option.bfd is False", reason="This test will be run only if '--bfd=True' is provided.") def test_vxlan_bfd_health_state_change_a2up_a1down( - self, setUp, encap_type, skip_traffic_test): # noqa F811 + self, setUp, encap_type): ''' Set BFD state for a2' to UP. Send packets to the route 1's prefix dst. Packets are received only at endpoint a2. Verify advertise @@ -970,9 +961,9 @@ def test_vxlan_bfd_health_state_change_a2up_a1down( end_point_list[0]) Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("a2up_a1down", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("a2up_a1down", encap_type, True) - def test_vxlan_bfd_health_state_change_a1a2_up(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_bfd_health_state_change_a1a2_up(self, setUp, encap_type): ''' Set BFD state for a1' & a2' to UP. Send multiple packets (varying tuple) to the route 1's prefix dst. Packets are received at both @@ -1015,7 +1006,7 @@ def test_vxlan_bfd_health_state_change_a1a2_up(self, setUp, encap_type, skip_tra Logger.info("Verify that the new config takes effect and run traffic.") - self.dump_self_info_and_run_ptf("tc4", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc4", encap_type, True) # perform cleanup by removing all the routes added by this test class. # reset to add only the routes added in the setup phase. @@ -1206,7 +1197,7 @@ def setup_route2_shared_different_endpoints(self, encap_type): encap_type, tc9_new_nhs) - def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_route2(self, setUp, encap_type): ''' tc7:send packets to route 1's prefix dst. by removing route 2 from group a, no change expected to route 1. @@ -1252,7 +1243,7 @@ def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): encap_type, tc7_end_point_list) Logger.info("Verify the setup works.") - self.dump_self_info_and_run_ptf("tc7", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc7", encap_type, True) Logger.info("End of setup.") Logger.info("Remove one of the routes.") @@ -1272,7 +1263,7 @@ def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): "DEL") Logger.info("Verify the rest of the traffic still works.") - self.dump_self_info_and_run_ptf("tc7", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc7", encap_type, True) # perform cleanup by removing all the routes added by this test class. # reset to add only the routes added in the setup phase. @@ -1289,18 +1280,18 @@ def test_vxlan_remove_route2(self, setUp, encap_type, skip_traffic_test): ecmp_utils.get_payload_version(encap_type), "SET") - def test_vxlan_route2_single_nh(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_route2_single_nh(self, setUp, encap_type): ''' tc8: set tunnel route 2 to single endpoint b1. Send packets to route 2's prefix dst. ''' self.vxlan_test_setup = setUp self.setup_route2_single_endpoint(encap_type) - self.dump_self_info_and_run_ptf("tc8", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc8", encap_type, True) self.dump_self_info_and_run_ptf("tc8", encap_type, True, - payload="vxlan", skip_traffic_test=skip_traffic_test) + payload="vxlan") - def test_vxlan_route2_shared_nh(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_route2_shared_nh(self, setUp, encap_type): ''' tc9: set tunnel route 2 to shared endpoints a1 and b1. Send packets to route 2's @@ -1308,9 +1299,9 @@ def test_vxlan_route2_shared_nh(self, setUp, encap_type, skip_traffic_test): ''' self.vxlan_test_setup = setUp self.setup_route2_shared_endpoints(encap_type) - self.dump_self_info_and_run_ptf("tc9", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc9", encap_type, True) - def test_vxlan_route2_shared_different_nh(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_route2_shared_different_nh(self, setUp, encap_type): ''' tc9.2: set tunnel route 2 to 2 completely different shared(no-reuse) endpoints a1 and b1. send packets @@ -1318,9 +1309,9 @@ def test_vxlan_route2_shared_different_nh(self, setUp, encap_type, skip_traffic_ ''' self.vxlan_test_setup = setUp self.setup_route2_shared_different_endpoints(encap_type) - self.dump_self_info_and_run_ptf("tc9.2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc9.2", encap_type, True) - def test_vxlan_remove_ecmp_route2(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_remove_ecmp_route2(self, setUp, encap_type): ''' tc10: remove tunnel route 2. send packets to route 2's prefix dst. ''' @@ -1369,7 +1360,7 @@ def test_vxlan_remove_ecmp_route2(self, setUp, encap_type, skip_traffic_test): tc10_nhs Logger.info("The deleted route should fail to receive traffic.") - self.dump_self_info_and_run_ptf("tc10", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc10", encap_type, False) # all others should be working. # Housekeeping: @@ -1380,7 +1371,7 @@ def test_vxlan_remove_ecmp_route2(self, setUp, encap_type, skip_traffic_test): del_needed = False Logger.info("Check the traffic is working in the other routes.") - self.dump_self_info_and_run_ptf("tc10", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc10", encap_type, True) except BaseException: self.vxlan_test_setup[encap_type]['dest_to_nh_map'][vnet] = full_map.copy() @@ -1399,7 +1390,7 @@ class Test_VxLAN_ecmp_random_hash(Test_VxLAN): Class for testing different tcp ports for payload. ''' - def test_vxlan_random_hash(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_random_hash(self, setUp, encap_type): ''' tc11: set tunnel route 3 to endpoint group c = {c1, c2, c3}. Ensure c1, c2, and c3 matches to underlay default route. @@ -1448,8 +1439,7 @@ def test_vxlan_random_hash(self, setUp, encap_type, skip_traffic_test): # no "tc11", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) @pytest.mark.skipif( @@ -1461,8 +1451,7 @@ class Test_VxLAN_underlay_ecmp(Test_VxLAN): Class for all test cases that modify the underlay default route. ''' @pytest.mark.parametrize("ecmp_path_count", [1, 2]) - def test_vxlan_modify_underlay_default( - self, setUp, minigraph_facts, encap_type, ecmp_path_count, skip_traffic_test): # noqa F811 + def test_vxlan_modify_underlay_default(self, setUp, minigraph_facts, encap_type, ecmp_path_count): ''' tc12: modify the underlay default route nexthop/s. send packets to route 3's prefix dst. @@ -1534,8 +1523,7 @@ def test_vxlan_modify_underlay_default( "tc12", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) Logger.info( "Reverse the action: bring up the selected_intfs" @@ -1582,8 +1570,7 @@ def test_vxlan_modify_underlay_default( "tc12", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) Logger.info("Recovery. Bring all up, and verify traffic works.") for intf in all_t2_intfs: @@ -1611,8 +1598,7 @@ def test_vxlan_modify_underlay_default( "tc12", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) except Exception: # If anything goes wrong in the try block, atleast bring the intf @@ -1640,8 +1626,7 @@ def test_vxlan_modify_underlay_default( def test_vxlan_remove_add_underlay_default(self, setUp, minigraph_facts, - encap_type, - skip_traffic_test): # noqa F811 + encap_type): ''' tc13: remove the underlay default route. tc14: add the underlay default route. @@ -1682,7 +1667,7 @@ def test_vxlan_remove_add_underlay_default(self, "BGP neighbors have not reached the required state after " "T2 intf are shutdown.") Logger.info("Verify that traffic is not flowing through.") - self.dump_self_info_and_run_ptf("tc13", encap_type, False, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc13", encap_type, False) # tc14: Re-add the underlay default route. Logger.info("Bring up the T2 interfaces.") @@ -1704,8 +1689,7 @@ def test_vxlan_remove_add_underlay_default(self, "tc14", encap_type, True, - packet_count=1000, - skip_traffic_test=skip_traffic_test) + packet_count=1000) except Exception: Logger.info( "If anything goes wrong in the try block," @@ -1724,7 +1708,7 @@ def test_vxlan_remove_add_underlay_default(self, " interfaces have been brought up.") raise - def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type, skip_traffic_test): # noqa F811 + def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type): ''' Create a more specific underlay route to c1. Verify c1 packets are received only on the c1's nexthop interface @@ -1795,8 +1779,7 @@ def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type, skip_ self.dump_self_info_and_run_ptf( "underlay_specific_route", encap_type, - True, - skip_traffic_test=skip_traffic_test) + True) # Deletion of all static routes gateway = all_t2_neighbors[t2_neighbor][outer_layer_version].lower() for _, nexthops in list(endpoint_nhmap.items()): @@ -1844,14 +1827,12 @@ def test_underlay_specific_route(self, setUp, minigraph_facts, encap_type, skip_ self.dump_self_info_and_run_ptf( "underlay_specific_route", encap_type, - True, - skip_traffic_test=skip_traffic_test) + True) def test_underlay_portchannel_shutdown(self, setUp, minigraph_facts, - encap_type, - skip_traffic_test): # noqa F811 + encap_type): ''' Bring down one of the port-channels. Packets are equally recieved at c1, c2 or c3 @@ -1859,7 +1840,7 @@ def test_underlay_portchannel_shutdown(self, self.vxlan_test_setup = setUp # Verification of traffic before shutting down port channel - self.dump_self_info_and_run_ptf("tc12", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc12", encap_type, True) # Gathering all portchannels all_t2_portchannel_intfs = \ @@ -1894,7 +1875,7 @@ def test_underlay_portchannel_shutdown(self, self.vxlan_test_setup[encap_type]['t2_ports'], list(self.vxlan_test_setup['list_of_bfd_monitors'])) time.sleep(10) - self.dump_self_info_and_run_ptf("tc12", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc12", encap_type, True) for intf in all_t2_portchannel_members[selected_portchannel]: self.vxlan_test_setup['duthost'].shell( @@ -1906,7 +1887,7 @@ def test_underlay_portchannel_shutdown(self, self.vxlan_test_setup[encap_type]['t2_ports'], list(self.vxlan_test_setup['list_of_bfd_monitors'])) time.sleep(10) - self.dump_self_info_and_run_ptf("tc12", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("tc12", encap_type, True) except BaseException: for intf in all_t2_portchannel_members[selected_portchannel]: self.vxlan_test_setup['duthost'].shell( @@ -1936,8 +1917,7 @@ def verify_entropy( random_sport=False, random_dport=True, random_src_ip=False, - tolerance=None, - skip_traffic_test=False): # noqa F811 + tolerance=None): ''' Function to be reused by the entropy testcases. Sets up a couple of endpoints on the top of the existing ones, and performs the traffic @@ -1981,10 +1961,9 @@ def verify_entropy( random_dport=random_dport, random_src_ip=random_src_ip, packet_count=1000, - tolerance=tolerance, - skip_traffic_test=skip_traffic_test) + tolerance=tolerance) - def test_verify_entropy(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_verify_entropy(self, setUp, encap_type): ''' Verification of entropy - Create tunnel route 4 to endpoint group A. Send packets (fixed tuple) to route 4's prefix dst @@ -1995,10 +1974,9 @@ def test_verify_entropy(self, setUp, encap_type, skip_traffic_test): random_dport=True, random_sport=True, random_src_ip=True, - tolerance=0.75, # More tolerance since this varies entropy a lot. - skip_traffic_test=skip_traffic_test) + tolerance=0.75) # More tolerance since this varies entropy a lot. - def test_vxlan_random_dst_port(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_random_dst_port(self, setUp, encap_type): ''' Verification of entropy - Change the udp dst port of original packet to route 4's prefix dst @@ -2006,7 +1984,7 @@ def test_vxlan_random_dst_port(self, setUp, encap_type, skip_traffic_test): self.vxlan_test_setup = setUp self.verify_entropy(encap_type, tolerance=0.03) - def test_vxlan_random_src_port(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_random_src_port(self, setUp, encap_type): ''' Verification of entropy - Change the udp src port of original packet to route 4's prefix dst @@ -2016,10 +1994,9 @@ def test_vxlan_random_src_port(self, setUp, encap_type, skip_traffic_test): encap_type, random_dport=False, random_sport=True, - tolerance=0.03, - skip_traffic_test=skip_traffic_test) + tolerance=0.03) - def test_vxlan_varying_src_ip(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_varying_src_ip(self, setUp, encap_type): ''' Verification of entropy - Change the udp src ip of original packet to route 4's prefix dst @@ -2029,5 +2006,4 @@ def test_vxlan_varying_src_ip(self, setUp, encap_type, skip_traffic_test): encap_type, random_dport=False, random_src_ip=True, - tolerance=0.03, - skip_traffic_test=skip_traffic_test) + tolerance=0.03) diff --git a/tests/vxlan/test_vxlan_ecmp_switchover.py b/tests/vxlan/test_vxlan_ecmp_switchover.py index 200f9f3548d..32a13cadf83 100644 --- a/tests/vxlan/test_vxlan_ecmp_switchover.py +++ b/tests/vxlan/test_vxlan_ecmp_switchover.py @@ -11,7 +11,6 @@ import pytest from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from tests.common.vxlan_ecmp_utils import Ecmp_Utils @@ -222,8 +221,7 @@ def dump_self_info_and_run_ptf(self, random_sport=False, random_src_ip=False, tolerance=None, - payload=None, - skip_traffic_test=False): # noqa F811 + payload=None): ''' Just a wrapper for dump_info_to_ptf to avoid entering 30 lines everytime. @@ -276,9 +274,6 @@ def dump_self_info_and_run_ptf(self, Logger.info( "dest->nh mapping:%s", self.vxlan_test_setup[encap_type]['dest_to_nh_map']) - if skip_traffic_test is True: - Logger.info("Skipping traffic test.") - return ptf_runner(self.vxlan_test_setup['ptfhost'], "ptftests", "vxlan_traffic.VxLAN_in_VxLAN" if payload == 'vxlan' @@ -292,7 +287,7 @@ def dump_self_info_and_run_ptf(self, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')), is_python3=True) - def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type): ''' tc1:create tunnel route 1 with two endpoints a = {a1, b1}. a1 is primary, b1 is secondary. 1) both a1,b1 are UP. @@ -369,7 +364,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)])) assert str(result['stdout']) == ecmp_utils.OVERLAY_DMAC - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) # Single primary-secondary switchover. # Endpoint list = [A, A`], Primary[A] | Active NH=[A] | @@ -387,7 +382,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], tc1_end_point_list[0], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) # Single primary recovery. # Endpoint list = [A, A`], Primary[A] | Active NH=[A`] | @@ -405,7 +400,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], tc1_end_point_list[0], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) # Single primary backup Failure. # Endpoint list = [A, A`]. Primary[A]| Active NH=[A`] A is DOWN | @@ -427,7 +422,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ tc1_end_point_list[0], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test1", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test1", encap_type, True) ecmp_utils.create_and_apply_priority_config( self.vxlan_test_setup['duthost'], vnet, @@ -447,7 +442,7 @@ def test_vxlan_priority_single_pri_sec_switchover(self, setUp, encap_type, skip_ [tc1_end_point_list[0]], "DEL") - def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_traffic_test): # noqa F811 + def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type): ''' tc2:create tunnel route 1 with 6 endpoints a = {A, B, A`, B`}. A,B are primary, A`,B` are secondary. @@ -545,7 +540,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t self.vxlan_test_setup['list_of_downed_endpoints'] = set(inactive_list) time.sleep(10) # ensure that the traffic is distributed to all 3 primary Endpoints. - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Single primary failure. # Endpoint list = [A, B, A`, B`], Primary = [A, B] | active NH = [A, B] | @@ -563,7 +558,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], primary_nhg[0], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. All primary failure. # Endpoint list = [A, B, A`, B`] Primary = [A, B] | A is Down. active NH = [B] | @@ -580,7 +575,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], primary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Backup Failure. # Endpoint list = [A, B, A`, B`] Primary = [A, B] | @@ -599,7 +594,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], secondary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Single primary recovery. # Endpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A`] | @@ -617,7 +612,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], primary_nhg[0], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A] | @@ -639,7 +634,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t secondary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup all failure. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A,B] | @@ -668,7 +663,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], secondary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [] | @@ -698,7 +693,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t secondary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup all failure 2. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A,B] | @@ -727,7 +722,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t ecmp_utils.HOST_MASK[ecmp_utils.get_payload_version(encap_type)], secondary_nhg[1], "down") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery of secondary. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [] | @@ -749,7 +744,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t secondary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) # Multiple primary backups. Multiple primary & backup recovery of primary after secondary. # Edpoint list = [A, B, A`, B`] Primary = [A, B] | Active NH = [A`, B`] | @@ -771,7 +766,7 @@ def test_vxlan_priority_multi_pri_sec_switchover(self, setUp, encap_type, skip_t primary_nhg[1], "up") time.sleep(10) - self.dump_self_info_and_run_ptf("test2", encap_type, True, skip_traffic_test=skip_traffic_test) + self.dump_self_info_and_run_ptf("test2", encap_type, True) ecmp_utils.create_and_apply_priority_config( self.vxlan_test_setup['duthost'], vnet, From 757b86cad04c5cc063fd7676e247129447de9514 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 17:13:31 +0800 Subject: [PATCH 047/175] Remove skip_traffic_test fixture in vlan tests (#15458) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in vlan tests How did you verify/test it? --- tests/vlan/test_vlan_ping.py | 32 ++++++++++---------------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/tests/vlan/test_vlan_ping.py b/tests/vlan/test_vlan_ping.py index b2141646673..fd19021c88f 100644 --- a/tests/vlan/test_vlan_ping.py +++ b/tests/vlan/test_vlan_ping.py @@ -10,7 +10,6 @@ from tests.common.helpers.assertions import pytest_assert as py_assert from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 from tests.common.dualtor.dual_tor_utils import lower_tor_host # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__name__) @@ -180,10 +179,7 @@ def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo, def verify_icmp_packet(dut_mac, src_port, dst_port, ptfadapter, tbinfo, - vlan_mac=None, dtor_ul=False, dtor_dl=False, skip_traffic_test=False): # noqa F811 - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return + vlan_mac=None, dtor_ul=False, dtor_dl=False): if dtor_ul is True: # use vlan int mac in case of dualtor UL test pkt pkt = testutils.simple_icmp_packet(eth_src=str(src_port['mac']), @@ -224,7 +220,7 @@ def verify_icmp_packet(dut_mac, src_port, dst_port, ptfadapter, tbinfo, def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor_m, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ test for checking connectivity of statically added ipv4 and ipv6 arp entries """ @@ -252,16 +248,12 @@ def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter, for member in ptfhost_info: if 'dualtor' in tbinfo["topo"]["name"]: verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], - vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True, - skip_traffic_test=skip_traffic_test) + vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True) verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], - ptfadapter, tbinfo, vlan_mac, dtor_dl=True, - skip_traffic_test=skip_traffic_test) + ptfadapter, tbinfo, vlan_mac, dtor_dl=True) else: - verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) - verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo) # flushing and re-adding ipv6 static arp entry static_neighbor_entry(duthost, ptfhost_info, "del", "6") @@ -280,13 +272,9 @@ def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter, for member in ptfhost_info: if 'dualtor' in tbinfo["topo"]["name"]: verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], - vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True, - skip_traffic_test=skip_traffic_test) + vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True) verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], - ptfadapter, tbinfo, vlan_mac, dtor_dl=True, - skip_traffic_test=skip_traffic_test) + ptfadapter, tbinfo, vlan_mac, dtor_dl=True) else: - verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) - verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo, - skip_traffic_test=skip_traffic_test) + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo) From ef01e7524743ad4108715db11fa4b7d7704c30a5 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 17:35:44 +0800 Subject: [PATCH 048/175] Remove skip_traffic_test fixture in span tests (#15456) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in span tests How did you verify/test it? --- tests/span/span_helpers.py | 4 +--- tests/span/test_port_mirroring.py | 27 ++++++++++----------------- 2 files changed, 11 insertions(+), 20 deletions(-) diff --git a/tests/span/span_helpers.py b/tests/span/span_helpers.py index 28dc2f351b7..a85c2b8d309 100644 --- a/tests/span/span_helpers.py +++ b/tests/span/span_helpers.py @@ -5,7 +5,7 @@ import ptf.testutils as testutils -def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor, skip_traffic_test=False): +def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor): ''' Send packet from ptf and verify it on monitor port @@ -18,8 +18,6 @@ def send_and_verify_mirrored_packet(ptfadapter, src_port, monitor, skip_traffic_ pkt = testutils.simple_icmp_packet(eth_src=src_mac, eth_dst='ff:ff:ff:ff:ff:ff') - if skip_traffic_test is True: - return ptfadapter.dataplane.flush() testutils.send(ptfadapter, src_port, pkt) testutils.verify_packet(ptfadapter, pkt, monitor) diff --git a/tests/span/test_port_mirroring.py b/tests/span/test_port_mirroring.py index 646f699264a..145395b8e7f 100644 --- a/tests/span/test_port_mirroring.py +++ b/tests/span/test_port_mirroring.py @@ -5,7 +5,6 @@ import pytest from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from span_helpers import send_and_verify_mirrored_packet pytestmark = [ @@ -13,7 +12,7 @@ ] -def test_mirroring_rx(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_rx(ptfadapter, setup_session): ''' Test case #1 Verify ingress direction session @@ -27,11 +26,10 @@ def test_mirroring_rx(ptfadapter, setup_session, skip_traffic_test): # noqa F ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source1_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) -def test_mirroring_tx(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_tx(ptfadapter, setup_session): ''' Test case #2 Verify egress direction session @@ -45,11 +43,10 @@ def test_mirroring_tx(ptfadapter, setup_session, skip_traffic_test): # noqa F ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source2_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) -def test_mirroring_both(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_both(ptfadapter, setup_session): ''' Test case #3 Verify bidirectional session @@ -66,16 +63,14 @@ def test_mirroring_both(ptfadapter, setup_session, skip_traffic_test): # noqa ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source1_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) send_and_verify_mirrored_packet(ptfadapter, setup_session['source2_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) -def test_mirroring_multiple_source(ptfadapter, setup_session, skip_traffic_test): # noqa F811 +def test_mirroring_multiple_source(ptfadapter, setup_session): ''' Test case #4 Verify ingress direction session with multiple source ports @@ -92,10 +87,8 @@ def test_mirroring_multiple_source(ptfadapter, setup_session, skip_traffic_test) ''' send_and_verify_mirrored_packet(ptfadapter, setup_session['source1_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) send_and_verify_mirrored_packet(ptfadapter, setup_session['source2_index'], - setup_session['destination_index'], - skip_traffic_test=skip_traffic_test) + setup_session['destination_index']) From 0e320fe12c0a4b920404987890692b8a3646d926 Mon Sep 17 00:00:00 2001 From: "Austin (Thang Pham)" Date: Fri, 8 Nov 2024 20:40:27 +1100 Subject: [PATCH 049/175] feat: fix pfc_storm flaky (#15418) Description of PR Summary: Fixes # (issue) 301125860 Approach What is the motivation for this PR? pfc_gen_t2 is written and using python3 library format. In some switches the default python is not linked to python3 and still refers to python2.7 which leads to unexpected result. How did you do it? Explicitly specify to use python3 co-authorized by: jianquanye@microsoft.com --- tests/common/templates/pfc_storm_sonic_t2.j2 | 4 ++-- tests/common/templates/pfc_storm_stop_sonic_t2.j2 | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/common/templates/pfc_storm_sonic_t2.j2 b/tests/common/templates/pfc_storm_sonic_t2.j2 index fb6945ee2dc..7881a510804 100644 --- a/tests/common/templates/pfc_storm_sonic_t2.j2 +++ b/tests/common/templates/pfc_storm_sonic_t2.j2 @@ -1,6 +1,6 @@ cd {{pfc_gen_dir}} {% if (pfc_asym is defined) and (pfc_asym == True) %} -nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python3 {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}" > /dev/null 2>&1 & {% else %} -nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_defer_time is defined %}sleep {{pfc_storm_defer_time}} &&{% endif %} sudo nice --20 python3 {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}" > /dev/null 2>&1 & {% endif %} diff --git a/tests/common/templates/pfc_storm_stop_sonic_t2.j2 b/tests/common/templates/pfc_storm_stop_sonic_t2.j2 index 1f29691ca5f..597417abb6b 100755 --- a/tests/common/templates/pfc_storm_stop_sonic_t2.j2 +++ b/tests/common/templates/pfc_storm_stop_sonic_t2.j2 @@ -1,6 +1,6 @@ cd {{pfc_gen_dir}} {% if (pfc_asym is defined) and (pfc_asym == True) %} -nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}'" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python3 {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}}'" > /dev/null 2>&1 & {% else %} -nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}'" > /dev/null 2>&1 & +nohup sh -c "{% if pfc_storm_stop_defer_time is defined %}sleep {{pfc_storm_stop_defer_time}} &&{% endif %} sudo pkill -f 'python3 {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -s {{pfc_send_period}} -n {{pfc_frames_number}} -i {{pfc_fanout_interface}} -r {{ansible_eth0_ipv4_addr}}'" > /dev/null 2>&1 & {% endif %} From 8901c4b06d0c10f06099a00da62dd8b509ebc430 Mon Sep 17 00:00:00 2001 From: rraghav-cisco <58446052+rraghav-cisco@users.noreply.github.com> Date: Fri, 8 Nov 2024 01:44:01 -0800 Subject: [PATCH 050/175] [T2:multidut]: Change reboot to reboot-fixture in pfcwd_basic. (#15445) Description of PR Summary: As per PR:15099, change test_multidut_pfcwd_basic_with_snappi.py and test_multidut_pfc_pause_lossy_with_snappi.py to use the new fixtures. Approach What is the motivation for this PR? Fix for: "ARP not resolved issue" in reboot cases in pause_lossy, and pfcwd_basic. How did you do it? Moved the reboot logic to the fixture, and calling the fixture in the tests. Removed the repeated code in all tests. How did you verify/test it? Ran it on my T2 TB: ------------------------------------------------------------------------------------------------------------------- live log sessionfinish ------------------------------------------------------------------------------------------------------------------- 06:40:51 __init__.pytest_terminal_summary L0067 INFO | Can not get Allure report URL. Please check logs ================================================================================================================== short test summary info =================================================================================================================== PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|0] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|1] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|2] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|5] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|6] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|0] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|1] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|2] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|5] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|6] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_multi_lossy_prio[multidut_port_info0] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_multi_lossy_prio[multidut_port_info1] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info0-cold-yy39top-lc4|0] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info0-cold-yy39top-lc4|1] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info0-cold-yy39top-lc4|2] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info0-cold-yy39top-lc4|5] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info0-cold-yy39top-lc4|6] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info1-cold-yy39top-lc4|0] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info1-cold-yy39top-lc4|1] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info1-cold-yy39top-lc4|2] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info1-cold-yy39top-lc4|5] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio_reboot[multidut_port_info1-cold-yy39top-lc4|6] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_multi_lossy_prio_reboot[multidut_port_info0-cold] PASSED snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_multi_lossy_prio_reboot[multidut_port_info1-cold] SKIPPED [10] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py:135: Reboot type warm is not supported on cisco-8000 switches SKIPPED [10] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py:135: Reboot type fast is not supported on cisco-8000 switches SKIPPED [2] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py:195: Reboot type warm is not supported on cisco-8000 switches SKIPPED [2] snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py:195: Reboot type fast is not supported on cisco-8000 switches ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|0] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|5] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info0-yy39top-lc4|6] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|1] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|2] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_single_lossy_prio[multidut_port_info1-yy39top-lc4|5] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_multi_lossy_prio[multidut_port_info0] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ERROR snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py::test_pfc_pause_multi_lossy_prio[multidut_port_info1] - Failed: Processes "['analyze_logs--']" failed with exit code "1" ============================================================================================ 24 passed, 24 skipped, 28 warnings, 8 errors in 20375.01s (5:39:35) ============================================================================================= sonic@ixia-sonic-mgmt-whitebox:/data/tests$ co-authorized by: jianquanye@microsoft.com --- ...st_multidut_pfc_pause_lossy_with_snappi.py | 195 +++--------- .../test_multidut_pfcwd_basic_with_snappi.py | 299 ++++-------------- 2 files changed, 100 insertions(+), 394 deletions(-) diff --git a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py index 0158317a951..8a03b72ac0e 100644 --- a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py +++ b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py @@ -9,30 +9,34 @@ get_snappi_ports # noqa: F401 from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, all_prio_list, lossless_prio_list,\ lossy_prio_list # noqa F401 -from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED +from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED # noqa: F401 from tests.snappi_tests.multidut.pfc.files.multidut_helper import run_pfc_test -from tests.common.reboot import reboot -from tests.common.utilities import wait_until import logging from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.files.helper import skip_warm_reboot +from tests.snappi_tests.files.helper import reboot_duts, setup_ports_and_dut, multidut_port_info # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) +@pytest.fixture(autouse=True) +def number_of_tx_rx_ports(): + yield (1, 1) + + def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, enum_dut_lossy_prio, - prio_dscp_map, # noqa: F811 - lossy_prio_list, # noqa: F811 - all_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - multidut_port_info): # noqa: F811 + prio_dscp_map, # noqa: F811 + lossy_prio_list, # noqa: F811 + all_prio_list, # noqa: F811 + lossless_prio_list, # noqa: F811 + get_snappi_ports, # noqa: F811 + tbinfo, # noqa: F811 + setup_ports_and_dut # noqa: F811 + ): """ Test if PFC will impact a single lossy priority in multidut setup @@ -51,31 +55,7 @@ def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut _, lossy_prio = enum_dut_lossy_prio.split('|') lossy_prio = int(lossy_prio) @@ -99,10 +79,8 @@ def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 @@ -112,14 +90,14 @@ def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 lossless_prio_list, # noqa: F811 get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info): # noqa: F811 + setup_ports_and_dut): # noqa: F811 """ Test if PFC will impact multiple lossy priorities in multidut setup Args: snappi_api (pytest fixture): SNAPPI session conn_graph_facts (pytest fixture): connection graph - fanout_graph_facts (pytest fixture): fanout graph + fanout_graph_facts_multidut (pytest fixture): fanout graph duthosts (pytest fixture): list of DUTs prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). lossless_prio_list (pytest fixture): list of all the lossless priorities @@ -129,31 +107,7 @@ def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut pause_prio_list = lossy_prio_list test_prio_list = lossy_prio_list @@ -174,35 +128,33 @@ def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, localhost, - enum_dut_lossy_prio_with_completeness_level, - prio_dscp_map, # noqa: F811 - lossy_prio_list, # noqa: F811 - all_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 + enum_dut_lossy_prio, + prio_dscp_map, # noqa: F811 + lossy_prio_list, # noqa: F811 + all_prio_list, # noqa: F811 + lossless_prio_list, # noqa: F811 + get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - reboot_type, - multidut_port_info): + setup_ports_and_dut, # noqa: F811 + reboot_duts): # noqa: F811 """ Test if PFC will impact a single lossy priority after various kinds of reboots in multidut setup Args: snappi_api (pytest fixture): SNAPPI session conn_graph_facts (pytest fixture): connection graph - fanout_graph_facts (pytest fixture): fanout graph + fanout_graph_facts_multidut (pytest fixture): fanout graph duthosts (pytest fixture): list of DUTs localhost (pytest fixture): localhost handle - enum_dut_lossy_prio_with_completeness_level (str): lossy priority to test, e.g., 's6100-1|2' + enum_dut_lossy_prio (str): name of lossy priority to test, e.g., 's6100-1|2' prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). lossy_prio_list (pytest fixture): list of all the lossy priorities all_prio_list (pytest fixture): list of all the priorities @@ -212,48 +164,15 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) - - _, lossy_prio = enum_dut_lossy_prio_with_completeness_level.split('|') + _, lossy_prio = enum_dut_lossy_prio.split('|') lossy_prio = int(lossy_prio) pause_prio_list = [lossy_prio] test_prio_list = [lossy_prio] bg_prio_list = [p for p in all_prio_list] bg_prio_list.remove(lossy_prio) - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - wait_until(180, 20, 0, duthost.critical_services_fully_started) - snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -269,24 +188,21 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, localhost, - prio_dscp_map, # noqa: F811 - lossy_prio_list, # noqa: F811 - lossless_prio_list, # noqa: F811 + prio_dscp_map, # noqa: F811 + lossy_prio_list, # noqa: F811 + lossless_prio_list, # noqa: F811 get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - reboot_type, - multidut_port_info): + tbinfo, # noqa: F811 + setup_ports_and_dut, # noqa: F811 + reboot_duts): # noqa: F811 """ Test if PFC will impact multiple lossy priorities after various kinds of reboots @@ -294,7 +210,7 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 snappi_api (pytest fixture): SNAPPI session snappi_testbed_config (pytest fixture): testbed configuration information conn_graph_facts (pytest fixture): connection graph - fanout_graph_facts (pytest fixture): fanout graph + fanout_graph_facts_multidut (pytest fixture): fanout graph localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). @@ -306,44 +222,12 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut pause_prio_list = lossy_prio_list test_prio_list = lossy_prio_list bg_prio_list = lossless_prio_list - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - wait_until(180, 20, 0, duthost.critical_services_fully_started) - snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -359,4 +243,3 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 prio_dscp_map=prio_dscp_map, test_traffic_pause=False, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) diff --git a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py index 01b5a64a899..126a5fe14dd 100644 --- a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py +++ b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py @@ -11,28 +11,36 @@ get_snappi_ports_multi_dut, is_snappi_multidut, \ snappi_api, snappi_dut_base_config, get_snappi_ports, get_snappi_ports_for_rdma, cleanup_config # noqa: F401 from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, lossless_prio_list # noqa F401 -from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED +from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED # noqa: F401 from tests.common.reboot import reboot # noqa: F401 from tests.common.utilities import wait_until # noqa: F401 from tests.snappi_tests.multidut.pfcwd.files.pfcwd_multidut_basic_helper import run_pfcwd_basic_test from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.files.helper import skip_warm_reboot, skip_pfcwd_test # noqa: F401 +from tests.snappi_tests.files.helper import skip_pfcwd_test, reboot_duts, \ + setup_ports_and_dut, multidut_port_info # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] +WAIT_TIME = 600 +INTERVAL = 40 + + +@pytest.fixture(autouse=True) +def number_of_tx_rx_ports(): + yield (1, 1) + @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_single_lossless_prio(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 - trigger_pfcwd): + tbinfo, # noqa: F811 + prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 + trigger_pfcwd, # noqa: F811 + ): """ Run PFC watchdog basic test on a single lossless priority @@ -47,33 +55,7 @@ def test_pfcwd_basic_single_lossless_prio(snappi_api, # noqa: Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - skip_pfcwd_test(duthost=snappi_ports[0]['duthost'], trigger_pfcwd=trigger_pfcwd) - skip_pfcwd_test(duthost=snappi_ports[1]['duthost'], trigger_pfcwd=trigger_pfcwd) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut lossless_prio = random.sample(lossless_prio_list, 1) lossless_prio = int(lossless_prio[0]) @@ -92,20 +74,16 @@ def test_pfcwd_basic_single_lossless_prio(snappi_api, # noqa: trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_multi_lossless_prio(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 trigger_pfcwd): """ Run PFC watchdog basic test on multiple lossless priorities @@ -122,31 +100,7 @@ def test_pfcwd_basic_multi_lossless_prio(snappi_api, # noqa F811 Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -162,25 +116,21 @@ def test_pfcwd_basic_multi_lossless_prio(snappi_api, # noqa F811 trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 localhost, duthosts, - enum_dut_lossless_prio_with_completeness_level, # noqa: F811 - get_snappi_ports, # noqa: F811 + lossless_prio_list, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 - reboot_type, - trigger_pfcwd): + prio_dscp_map, # noqa: F811 + setup_ports_and_dut, # noqa: F811 + reboot_duts, # noqa: F811 + trigger_pfcwd # noqa: F811 + ): """ Verify PFC watchdog basic test works on a single lossless priority after various types of reboot @@ -190,7 +140,6 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no fanout_graph_facts_multidut (pytest fixture): fanout graph localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs - enum_dut_lossless_prio_with_completeness_level (str): lossless priority to test, e.g., 's6100-1|3' prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority) reboot_type (str): reboot type to be issued on the DUT trigger_pfcwd (bool): if PFC watchdog is expected to be triggered @@ -199,46 +148,13 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) - - _, lossless_prio = enum_dut_lossless_prio_with_completeness_level.split('|') - lossless_prio = int(lossless_prio) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut + + lossless_prio = random.sample(lossless_prio_list, 1) + lossless_prio = int(lossless_prio[0]) snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), - "Not all critical services are fully started") - run_pfcwd_basic_test(api=snappi_api, testbed_config=testbed_config, port_config_list=port_config_list, @@ -250,24 +166,19 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer -@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 localhost, duthosts, - lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 - tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 - reboot_type, + lossless_prio_list, # noqa: F811 + tbinfo, # noqa: F811 + prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 + reboot_duts, # noqa: F811 trigger_pfcwd): """ Verify PFC watchdog basic test works on multiple lossless priorities after various kinds of reboots @@ -286,41 +197,7 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # no Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) - - skip_warm_reboot(snappi_ports[0]['duthost'], reboot_type) - skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) - - for duthost in set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]): - logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname)) - reboot(duthost, localhost, reboot_type=reboot_type, safe_reboot=True) - logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), - "Not all critical services are fully started") + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -336,24 +213,20 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # no trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer @pytest.mark.parametrize('restart_service', ['swss']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, - prio_dscp_map, # noqa F811 + prio_dscp_map, # noqa: F811 restart_service, - trigger_pfcwd): + trigger_pfcwd, + setup_ports_and_dut): # noqa: F811 """ Verify PFC watchdog basic test works on a single lossless priority after various service restarts @@ -369,31 +242,7 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut lossless_prio = random.sample(lossless_prio_list, 1) lossless_prio = int(lossless_prio[0]) @@ -406,24 +255,27 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, ports_dict[k] = list(set(ports_dict[k])) logger.info('Port dictionary:{}'.format(ports_dict)) - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): + # Record current state of critical services. + duthost.critical_services_fully_started() + asic_list = ports_dict[duthost.hostname] - for asic in asic_list: - asic_id = re.match(r"(asic)(\d+)", asic).group(2) - proc = 'swss@' + asic_id - logger.info("Issuing a restart of service {} on the dut {}".format(proc, duthost.hostname)) - duthost.command("sudo systemctl reset-failed {}".format(proc)) - duthost.command("sudo systemctl restart {}".format(proc)) - logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), - "Not all critical services are fully started") + asic = random.sample(asic_list, 1)[0] + asic_id = re.match(r"(asic)(\d+)", asic).group(2) + proc = 'swss@' + asic_id + logger.info("Issuing a restart of service {} on the dut {}".format(proc, duthost.hostname)) + duthost.command("sudo systemctl reset-failed {}".format(proc)) + duthost.command("sudo systemctl restart {}".format(proc)) + logger.info("Wait until the system is stable") + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), + "Not all critical services are fully started") else: - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) duthost.command("systemctl reset-failed {}".format(restart_service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") snappi_extra_params = SnappiTestParams() @@ -440,23 +292,19 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - cleanup_config(duthosts, snappi_ports) - @pytest.mark.disable_loganalyzer @pytest.mark.parametrize('restart_service', ['swss']) @pytest.mark.parametrize("trigger_pfcwd", [True, False]) -@pytest.mark.parametrize("multidut_port_info", MULTIDUT_PORT_INFO[MULTIDUT_TESTBED]) def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, # noqa F811 conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 duthosts, lossless_prio_list, # noqa: F811 - get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - multidut_port_info, prio_dscp_map, # noqa F811 restart_service, + setup_ports_and_dut, # noqa: F811 trigger_pfcwd): """ Verify PFC watchdog basic test works on multiple lossless priorities after various service restarts @@ -474,31 +322,8 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, Returns: N/A """ - for testbed_subtype, rdma_ports in multidut_port_info.items(): - tx_port_count = 1 - rx_port_count = 1 - snappi_port_list = get_snappi_ports - pytest_assert(len(snappi_port_list) >= tx_port_count + rx_port_count, - "Need Minimum of 2 ports defined in ansible/files/*links.csv file") - - pytest_assert(len(rdma_ports['tx_ports']) >= tx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Tx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - - pytest_assert(len(rdma_ports['rx_ports']) >= rx_port_count, - 'MULTIDUT_PORT_INFO doesn\'t have the required Rx ports defined for \ - testbed {}, subtype {} in variables.py'. - format(MULTIDUT_TESTBED, testbed_subtype)) - logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - if is_snappi_multidut(duthosts): - snappi_ports = get_snappi_ports_for_rdma(snappi_port_list, rdma_ports, - tx_port_count, rx_port_count, MULTIDUT_TESTBED) - else: - snappi_ports = get_snappi_ports - testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(duthosts, - snappi_ports, - snappi_api) + + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut if (snappi_ports[0]['duthost'].is_multi_asic): ports_dict = defaultdict(list) @@ -509,7 +334,7 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, ports_dict[k] = list(set(ports_dict[k])) logger.info('Port dictionary:{}'.format(ports_dict)) - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): asic_list = ports_dict[duthost.hostname] for asic in asic_list: asic_id = re.match(r"(asic)(\d+)", asic).group(2) @@ -518,15 +343,15 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, duthost.command("sudo systemctl reset-failed {}".format(proc)) duthost.command("sudo systemctl restart {}".format(proc)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") else: - for duthost in [snappi_ports[0]['duthost'], snappi_ports[1]['duthost']]: + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) duthost.command("systemctl reset-failed {}".format(restart_service)) duthost.command("systemctl restart {}".format(restart_service)) logger.info("Wait until the system is stable") - pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started), + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") snappi_extra_params = SnappiTestParams() @@ -541,5 +366,3 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, prio_dscp_map=prio_dscp_map, trigger_pfcwd=trigger_pfcwd, snappi_extra_params=snappi_extra_params) - - cleanup_config(duthosts, snappi_ports) From ed98bddc49faa9f6b3fa9ad3bde5d2d11f612b45 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Fri, 8 Nov 2024 19:16:47 +0800 Subject: [PATCH 051/175] Skip dualtor check_nexthops_single_downlink on VS platform (#15470) Approach What is the motivation for this PR? In PR test, check_nexthops_single_downlink have chance to take more than 1 hour because it needs to run several rounds of traffic test, each round would take more than 20 mins, it would easily cause PR test timeout How did you do it? Skip earlier in check_nexthops_single_downlink for VS platform, needn't go deep into traffic test Co-authored-by: xwjiang2021 <96218837+xwjiang2021@users.noreply.github.com> --- tests/common/dualtor/dual_tor_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py index ee76923be5d..27b4819d447 100644 --- a/tests/common/dualtor/dual_tor_utils.py +++ b/tests/common/dualtor/dual_tor_utils.py @@ -1184,6 +1184,10 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add ptf_t1_intf = random.choice(get_t1_ptf_ports(rand_selected_dut, tbinfo)) port_packet_count = dict() + + if asic_type == "vs": + logging.info("Skipping validation on VS platform") + return packets_to_send = generate_hashed_packet_to_server(ptfadapter, rand_selected_dut, HASH_KEYS, dst_server_addr, expect_packet_num) @@ -1197,10 +1201,6 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add for ptf_idx, pkt_count in ptf_port_count.items(): port_packet_count[ptf_idx] = port_packet_count.get(ptf_idx, 0) + pkt_count - if asic_type == "vs": - logging.info("Skipping validation on VS platform") - return - logging.info("Received packets in ports: {}".format(str(port_packet_count))) for downlink_int in expected_downlink_ports: # packets should be either 0 or expect_packet_num: From 151b07c9b9c5c6743a0ce04e16b5d4fc9f0b1a61 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Fri, 8 Nov 2024 20:01:12 +0800 Subject: [PATCH 052/175] [GCU] Extend timeout to 10min (#15434) What is the motivation for this PR? update timeout introduced in #14881 How did you do it? increase How did you verify/test it? E2E --- tests/common/gu_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/gu_utils.py b/tests/common/gu_utils.py index 3bf7da0d5bf..cb5b0f96a37 100644 --- a/tests/common/gu_utils.py +++ b/tests/common/gu_utils.py @@ -14,7 +14,7 @@ DEFAULT_CHECKPOINT_NAME = "test" GCU_FIELD_OPERATION_CONF_FILE = "gcu_field_operation_validators.conf.json" GET_HWSKU_CMD = "sonic-cfggen -d -v DEVICE_METADATA.localhost.hwsku" -GCUTIMEOUT = 240 +GCUTIMEOUT = 600 BASE_DIR = os.path.dirname(os.path.realpath(__file__)) FILES_DIR = os.path.join(BASE_DIR, "files") From 0a4e40b04a1ff41cbc5f8ef63199e68734ff0d81 Mon Sep 17 00:00:00 2001 From: "Austin (Thang Pham)" Date: Mon, 11 Nov 2024 13:42:49 +1100 Subject: [PATCH 053/175] feat: add support for clearing counter for ingress (#15469) Description of PR Summary: Fixes # (issue) Az 30112879 Approach What is the motivation for this PR? Ingress port does not get cleared before running the test. This change will make sure both ingress and egress are cleared Signed-off-by: Austin Pham --- tests/common/snappi_tests/multi_dut_params.py | 2 ++ tests/common/snappi_tests/traffic_generation.py | 7 ++++--- .../lossless_response_to_external_pause_storms_helper.py | 8 ++++++++ ...lossless_response_to_throttling_pause_storms_helper.py | 8 ++++++++ .../multidut/pfc/files/m2o_fluctuating_lossless_helper.py | 8 ++++++++ .../pfc/files/m2o_oversubscribe_lossless_helper.py | 7 +++++++ .../pfc/files/m2o_oversubscribe_lossless_lossy_helper.py | 8 ++++++++ .../multidut/pfc/files/m2o_oversubscribe_lossy_helper.py | 8 ++++++++ 8 files changed, 53 insertions(+), 3 deletions(-) diff --git a/tests/common/snappi_tests/multi_dut_params.py b/tests/common/snappi_tests/multi_dut_params.py index b8e5ba4d332..20f7bd5b90d 100644 --- a/tests/common/snappi_tests/multi_dut_params.py +++ b/tests/common/snappi_tests/multi_dut_params.py @@ -16,3 +16,5 @@ def __init__(self): self.duthost1 = None self.duthost2 = None self.multi_dut_ports = None + self.ingress_duthosts = [] + self.egress_duthosts = [] diff --git a/tests/common/snappi_tests/traffic_generation.py b/tests/common/snappi_tests/traffic_generation.py index 5acf21c90fd..005f53c0a00 100644 --- a/tests/common/snappi_tests/traffic_generation.py +++ b/tests/common/snappi_tests/traffic_generation.py @@ -344,9 +344,10 @@ def run_traffic(duthost, cs.state = cs.START api.set_capture_state(cs) - clear_dut_interface_counters(duthost) - - clear_dut_que_counters(duthost) + for host in set([*snappi_extra_params.multi_dut_params.ingress_duthosts, + *snappi_extra_params.multi_dut_params.egress_duthosts, duthost]): + clear_dut_interface_counters(host) + clear_dut_que_counters(host) logger.info("Starting transmit on all flows ...") ts = api.transmit_state() diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py index 88ec47e38b9..56fc66aabf5 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py @@ -74,11 +74,19 @@ def run_lossless_response_to_external_pause_storms_test(api, rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] rx_port_id_list = [rx_port["port_id"]] egress_duthost = rx_port['duthost'] + + # Append the egress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.egress_duthosts.append(egress_duthost) + dut_asics_to_be_configured.add((egress_duthost, rx_port['asic_value'])) tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] ingress_duthost = tx_port[0]['duthost'] + + # Append the ingress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set dut_asics_to_be_configured.add((tx_port[0]['duthost'], tx_port[0]['asic_value'])) diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py index a07222e5bcd..216a22ece41 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py @@ -80,11 +80,19 @@ def run_lossless_response_to_throttling_pause_storms_test(api, rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] rx_port_id_list = [rx_port["port_id"]] egress_duthost = rx_port['duthost'] + + # Append the egress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.egress_duthosts.append(egress_duthost) + dut_asics_to_be_configured.add((egress_duthost, rx_port['asic_value'])) tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] ingress_duthost = tx_port[0]['duthost'] + + # Append the ingress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set dut_asics_to_be_configured.add((tx_port[0]['duthost'], tx_port[0]['asic_value'])) diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py index 2561831ec24..ad1a5bb7f81 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py @@ -67,11 +67,19 @@ def run_m2o_fluctuating_lossless_test(api, rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] rx_port_id_list = [rx_port["port_id"]] egress_duthost = rx_port['duthost'] + + # Append the egress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.egress_duthosts.append(egress_duthost) + dut_asics_to_be_configured.add((egress_duthost, rx_port['asic_value'])) tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] ingress_duthost = tx_port[0]['duthost'] + + # Append the ingress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set dut_asics_to_be_configured.add((tx_port[0]['duthost'], tx_port[0]['asic_value'])) diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py index 948a370ba49..550f94a0236 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py @@ -71,9 +71,16 @@ def run_m2o_oversubscribe_lossless_test(api, egress_duthost = rx_port['duthost'] dut_asics_to_be_configured.add((egress_duthost, rx_port['asic_value'])) + # Append the egress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.egress_duthosts.append(egress_duthost) + tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] ingress_duthost = tx_port[0]['duthost'] + + # Append the ingress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set dut_asics_to_be_configured.add((tx_port[0]['duthost'], tx_port[0]['asic_value'])) diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py index 5d8f740044f..8efd129cff3 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py @@ -74,11 +74,19 @@ def run_pfc_m2o_oversubscribe_lossless_lossy_test(api, rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] rx_port_id_list = [rx_port["port_id"]] egress_duthost = rx_port['duthost'] + + # Append the egress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.egress_duthosts.append(egress_duthost) + dut_asics_to_be_configured.add((egress_duthost, rx_port['asic_value'])) tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] ingress_duthost = tx_port[0]['duthost'] + + # Append the ingress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set dut_asics_to_be_configured.add((tx_port[0]['duthost'], tx_port[0]['asic_value'])) diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py index 261ab507f75..ac5defb9bdf 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py @@ -73,11 +73,19 @@ def run_pfc_m2o_oversubscribe_lossy_test(api, rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] rx_port_id_list = [rx_port["port_id"]] egress_duthost = rx_port['duthost'] + + # Append the egress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.egress_duthosts.append(egress_duthost) + dut_asics_to_be_configured.add((egress_duthost, rx_port['asic_value'])) tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] ingress_duthost = tx_port[0]['duthost'] + + # Append the ingress here for run_traffic to clear its counters + snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set dut_asics_to_be_configured.add((tx_port[0]['duthost'], tx_port[0]['asic_value'])) From 91ea307b26ff2a4f94db2b114de7cd78360416ff Mon Sep 17 00:00:00 2001 From: wenyiz2021 <91497961+wenyiz2021@users.noreply.github.com> Date: Mon, 11 Nov 2024 11:33:23 -0800 Subject: [PATCH 054/175] [multi-asic fix] fix new change in test_route_perf.py to support multi-asic (#15452) Description of PR Summary: Fixes # (issue) for multi-asic devices, when asic is broadcom, using "bcmcmd" needs to specify asic id. --- .azure-pipelines/pr_test_scripts.yaml | 1 + tests/route/test_route_perf.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index af74bf44aa0..290ce1d9a7d 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -428,6 +428,7 @@ multi-asic-t1-lag: - bgp/test_bgp_bbr.py - bgp/test_bgp_fact.py - bgp/test_bgp_update_timer.py + - route/test_route_perf.py - snmp/test_snmp_default_route.py - snmp/test_snmp_link_local.py - snmp/test_snmp_loopback.py diff --git a/tests/route/test_route_perf.py b/tests/route/test_route_perf.py index 2a7fccd3167..3488792e9d8 100644 --- a/tests/route/test_route_perf.py +++ b/tests/route/test_route_perf.py @@ -55,15 +55,18 @@ def get_route_scale_per_role(tbinfo, ip_version): @pytest.fixture -def check_config(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo): +def check_config(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_rand_one_frontend_asic_index, tbinfo): if tbinfo["topo"]["type"] in ["m0", "mx"]: return duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] asic = duthost.facts["asic_type"] + asic_id = enum_rand_one_frontend_asic_index if (asic == "broadcom"): - alpm_enable = duthost.command('bcmcmd "conf show l3_alpm_enable"')["stdout_lines"][2].strip() + broadcom_cmd = "bcmcmd -n " + str(asic_id) if duthost.is_multi_asic else "bcmcmd" + alpm_cmd = "{} {}".format(broadcom_cmd, "conf show l3_alpm_enable") + alpm_enable = duthost.command(alpm_cmd)["stdout_lines"][2].strip() logger.info("Checking config: {}".format(alpm_enable)) pytest_assert(alpm_enable == "l3_alpm_enable=2", "l3_alpm_enable is not set for route scaling") From 55ff70e99ef07e33f82df13896abb874da4db46c Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Mon, 11 Nov 2024 16:40:44 -0800 Subject: [PATCH 055/175] Address test gap to verify portchannel member packet forwarding w.r.t SAI_LAG_MEMBER_ATTR_EGRESS/INGRESS enable/disable state (#15191) Verifies: Added new test case to validate LAG MEMBER SAI attribute SAI_LAG_MEMBER_ATTR_EGRESS and SAI_LAG_MEMBER_ATTR_INGRESS value of enable/disable On disable: 1. Control packets to/from should not be processed (BGP down) 2. Data packets from/to CPU not be processed (ping to IP interface should faild) 3. Data packets across portchannel not to be processed (ping to another peer ip should fail) On enable: Reverse of all the above 3 scenarios, --- tests/common/devices/sonic.py | 30 +++- tests/common/devices/sonic_asic.py | 26 +++- tests/pc/test_lag_member_forwarding.py | 181 +++++++++++++++++++++++++ 3 files changed, 233 insertions(+), 4 deletions(-) create mode 100644 tests/pc/test_lag_member_forwarding.py diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 83a6d52ed31..b19ee0fb873 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -2229,12 +2229,38 @@ def ping_v4(self, ipv4, count=1, ns_arg=""): netns_arg = "sudo ip netns exec {} ".format(ns_arg) try: - self.shell("{}ping -q -c{} {} > /dev/null".format( + rc = self.shell("{}ping -q -c{} {} > /dev/null".format( netns_arg, count, ipv4 )) except RunAnsibleModuleFail: return False - return True + return not rc['failed'] + + def ping_v6(self, ipv6, count=1, ns_arg=""): + """ + Returns 'True' if ping to IP address works, else 'False' + Args: + IPv6 address + + Returns: + True or False + """ + try: + socket.inet_pton(socket.AF_INET6, ipv6) + except socket.error: + raise Exception("Invalid IPv6 address {}".format(ipv6)) + + netns_arg = "" + if ns_arg is not DEFAULT_NAMESPACE: + netns_arg = "sudo ip netns exec {} ".format(ns_arg) + + try: + rc = self.shell("{}ping -6 -q -c{} {} > /dev/null".format( + netns_arg, count, ipv6 + )) + except RunAnsibleModuleFail: + return False + return not rc['failed'] def is_backend_portchannel(self, port_channel, mg_facts): ports = mg_facts["minigraph_portchannels"].get(port_channel) diff --git a/tests/common/devices/sonic_asic.py b/tests/common/devices/sonic_asic.py index 7f8ff9c1190..89e1b33f8b7 100644 --- a/tests/common/devices/sonic_asic.py +++ b/tests/common/devices/sonic_asic.py @@ -267,12 +267,34 @@ def ping_v4(self, ipv4, count=1): raise Exception("Invalid IPv4 address {}".format(ipv4)) try: - self.sonichost.shell("{}ping -q -c{} {} > /dev/null".format( + rc = self.sonichost.shell("{}ping -q -c{} {} > /dev/null".format( self.ns_arg, count, ipv4 )) except RunAnsibleModuleFail: return False - return True + return not rc['failed'] + + def ping_v6(self, ipv6, count=1): + """ + Returns 'True' if ping to IP address works, else 'False' + Args: + IPv6 address + + Returns: + True or False + """ + try: + socket.inet_pton(socket.AF_INET6, ipv6) + except socket.error: + raise Exception("Invalid IPv6 address {}".format(ipv6)) + + try: + rc = self.sonichost.shell("{}ping -6 -q -c{} {} > /dev/null".format( + self.ns_arg, count, ipv6 + )) + except RunAnsibleModuleFail: + return False + return not rc['failed'] def is_backend_portchannel(self, port_channel): mg_facts = self.sonichost.minigraph_facts(host=self.sonichost.hostname)['ansible_facts'] diff --git a/tests/pc/test_lag_member_forwarding.py b/tests/pc/test_lag_member_forwarding.py new file mode 100644 index 00000000000..d6dea34e233 --- /dev/null +++ b/tests/pc/test_lag_member_forwarding.py @@ -0,0 +1,181 @@ +import ipaddr as ipaddress +import json +import pytest +import time +from tests.common import config_reload +from ptf.mask import Mask +import ptf.packet as scapy +import ptf.testutils as testutils +from tests.common.helpers.assertions import pytest_assert + +pytestmark = [ + pytest.mark.topology('any') +] + + +def build_pkt(dest_mac, ip_addr, ttl): + pkt = testutils.simple_tcp_packet( + eth_dst=dest_mac, + eth_src="00:11:22:33:44:55", + pktlen=100, + ip_src="19.0.0.100", + ip_dst=ip_addr, + ip_ttl=ttl, + tcp_dport=200, + tcp_sport=100 + ) + exp_packet = Mask(pkt) + exp_packet.set_do_not_care_scapy(scapy.Ether, "dst") + exp_packet.set_do_not_care_scapy(scapy.Ether, "src") + + exp_packet.set_do_not_care_scapy(scapy.IP, "version") + exp_packet.set_do_not_care_scapy(scapy.IP, "ihl") + exp_packet.set_do_not_care_scapy(scapy.IP, "tos") + exp_packet.set_do_not_care_scapy(scapy.IP, "len") + exp_packet.set_do_not_care_scapy(scapy.IP, "flags") + exp_packet.set_do_not_care_scapy(scapy.IP, "id") + exp_packet.set_do_not_care_scapy(scapy.IP, "frag") + exp_packet.set_do_not_care_scapy(scapy.IP, "ttl") + exp_packet.set_do_not_care_scapy(scapy.IP, "chksum") + exp_packet.set_do_not_care_scapy(scapy.IP, "options") + + exp_packet.set_do_not_care_scapy(scapy.TCP, "seq") + exp_packet.set_do_not_care_scapy(scapy.TCP, "ack") + exp_packet.set_do_not_care_scapy(scapy.TCP, "reserved") + exp_packet.set_do_not_care_scapy(scapy.TCP, "dataofs") + exp_packet.set_do_not_care_scapy(scapy.TCP, "window") + exp_packet.set_do_not_care_scapy(scapy.TCP, "chksum") + exp_packet.set_do_not_care_scapy(scapy.TCP, "urgptr") + + exp_packet.set_ignore_extra_bytes() + return pkt, exp_packet + + +def test_lag_member_forwarding_packets(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, ptfadapter): + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + lag_facts = duthost.lag_facts(host=duthost.hostname)['ansible_facts']['lag_facts'] + if not len(lag_facts['lags'].keys()): + pytest.skip("No Lag found in this topology") + portchannel_name = list(lag_facts['lags'].keys())[0] + portchannel_dest_name = None + recv_port = [] + if len(lag_facts['lags'].keys()) > 1: + portchannel_dest_name = list(lag_facts['lags'].keys())[1] + portchannel_dest_members = list(lag_facts['lags'][portchannel_dest_name]['po_stats']['ports'].keys()) + assert len(portchannel_dest_members) > 0 + for member in portchannel_dest_members: + recv_port.append(mg_facts['minigraph_ptf_indices'][member]) + + portchannel_members = list(lag_facts['lags'][portchannel_name]['po_stats']['ports'].keys()) + assert len(portchannel_members) > 0 + asic_name = lag_facts['names'][portchannel_name] + dest_asic_name = lag_facts['names'][portchannel_dest_name] + asic_idx = duthost.get_asic_id_from_namespace(asic_name) + asichost = duthost.asic_instance_from_namespace(asic_name) + dest_asichost = duthost.asic_instance_from_namespace(dest_asic_name) + + config_facts = asichost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + dest_config_facts = dest_asichost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + send_port = mg_facts['minigraph_ptf_indices'][portchannel_members[0]] + holdtime = 0 + + peer_device_ip_set = set() + peer_device_dest_ip = None + + # Find test (1st) port channel and fetch it's BGP neighbors + # ipv4 and ipv6 ip address to verify case of ping to neighbor + for peer_device_ip, peer_device_bgp_data in config_facts['BGP_NEIGHBOR'].items(): + if peer_device_bgp_data["name"] == config_facts['DEVICE_NEIGHBOR'][portchannel_members[0]]['name']: + peer_device_ip_set.add(peer_device_ip) + # holdtime to wait for BGP session to go down when lag member is marked as disable state. + if not holdtime: + holdtime = duthost.get_bgp_neighbor_info(peer_device_ip, asic_idx)["bgpTimerHoldTimeMsecs"] + # Find test (2nd) port channel and fetch it's BGP neighbors + # ipv4 and ipv6 ip address to verify data forwarding across port-channel + elif (portchannel_dest_name and not peer_device_dest_ip + and peer_device_bgp_data["name"] == + dest_config_facts['DEVICE_NEIGHBOR'][portchannel_dest_members[0]]['name'] and + ipaddress.IPNetwork(peer_device_ip).version == 4): + peer_device_dest_ip = peer_device_ip + + # we should have v4 and v6 peer neighbors + assert len(peer_device_ip_set) == 2 + assert holdtime > 0 + + bgp_fact_info = asichost.bgp_facts() + + for ip in peer_device_ip_set: + assert bgp_fact_info['ansible_facts']['bgp_neighbors'][ip]['state'] == 'established' + + for ip in peer_device_ip_set: + if ipaddress.IPNetwork(ip).version == 4: + rc = asichost.ping_v4(ip) + else: + rc = asichost.ping_v6(ip) + assert rc + + rtr_mac = asichost.get_router_mac() + ip_ttl = 121 + ip_route = peer_device_dest_ip + + def built_and_send_tcp_ip_packet(expected): + pkt, exp_pkt = build_pkt(rtr_mac, ip_route, ip_ttl) + testutils.send(ptfadapter, send_port, pkt, 10) + if expected: + (_, recv_pkt) = testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, + ports=recv_port) + assert recv_pkt + # Make sure routing is done + pytest_assert(scapy.Ether(recv_pkt).ttl == (ip_ttl - 1), "Routed Packet TTL not decremented") + else: + testutils.verify_no_packet_any(test=ptfadapter, pkt=exp_pkt, + ports=recv_port) + + if peer_device_dest_ip: + ptfadapter.dataplane.flush() + built_and_send_tcp_ip_packet(True) + + lag_member_file_dir = duthost.shell('mktemp')['stdout'] + lag_member_config = [] + for portchannel_member_name in portchannel_members: + lag_member_config.append({ + "LAG_MEMBER_TABLE:{}:{}".format(portchannel_name, portchannel_member_name): { + "status": "disabled" + }, + "OP": "SET" + }) + try: + # Copy json file to DUT + duthost.copy(content=json.dumps(lag_member_config, indent=4), dest=lag_member_file_dir, verbose=False) + json_set = "/dev/stdin < {}".format(lag_member_file_dir) + result = duthost.docker_exec_swssconfig(json_set, "swss", asic_idx) + if result["rc"] != 0: + pytest.fail( + "Failed to apply lag member configuration file: {}".format(result["stderr"]) + ) + + # Make sure data forwarding starts to fail + if peer_device_dest_ip: + ptfadapter.dataplane.flush() + built_and_send_tcp_ip_packet(False) + + # make sure ping should fail + for ip in peer_device_ip_set: + if ipaddress.IPNetwork(ip).version == 4: + rc = asichost.ping_v4(ip) + else: + rc = asichost.ping_v6(ip) + + if rc: + pytest.fail("Ping is still working on lag disable member for neighbor {}", ip) + + time.sleep(holdtime/1000) + # Make sure BGP goes down + bgp_fact_info = asichost.bgp_facts() + for ip in peer_device_ip_set: + if bgp_fact_info['ansible_facts']['bgp_neighbors'][ip]['state'] == 'established': + pytest.fail("BGP is still enable on lag disable member for neighbor {}", ip) + finally: + duthost.shell('rm -f {}'.format(lag_member_file_dir)) + config_reload(duthost, config_source='config_db') From 47b734a0919acac7cb1645306a34c8a0cdc0fd2c Mon Sep 17 00:00:00 2001 From: ryanzhu706 Date: Mon, 11 Nov 2024 16:51:49 -0800 Subject: [PATCH 056/175] Add missed import fixture (#15369) What is the motivation for this PR? test_advanced_reboot test failed due to fixture 'advanceboot_neighbor_restore' not found How did you do it? Add missed fixture "advanceboot_neighbor_restore" --- tests/platform_tests/test_advanced_reboot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/platform_tests/test_advanced_reboot.py b/tests/platform_tests/test_advanced_reboot.py index d036f275156..52246c4405a 100644 --- a/tests/platform_tests/test_advanced_reboot.py +++ b/tests/platform_tests/test_advanced_reboot.py @@ -8,7 +8,7 @@ from tests.common.fixtures.advanced_reboot import get_advanced_reboot # noqa F401 from tests.platform_tests.verify_dut_health import add_fail_step_to_reboot # noqa F401 from tests.common.platform.warmboot_sad_cases import get_sad_case_list, SAD_CASE_LIST -from tests.common.platform.device_utils import advanceboot_loganalyzer, verify_dut_health # noqa F401 +from tests.common.platform.device_utils import advanceboot_loganalyzer, verify_dut_health, advanceboot_neighbor_restore # noqa F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service # noqa F401 from tests.common.dualtor.dual_tor_utils import mux_cable_server_ip, show_muxcable_status From 1f9072790ea9f1fd7a93d4f166a64ca17ed6dd9a Mon Sep 17 00:00:00 2001 From: rraghav-cisco <58446052+rraghav-cisco@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:05:52 -0800 Subject: [PATCH 057/175] Adding minor misc fixes for pfcwd scripts. (#15286) Description of PR This PR addresses the following issues: The dut-counter check on pfcwd scripts is using a wrong logic, it attempts to read the incoming drops and incoming packets and compares. Unfortunately, the packets are not being dropped in ingress, but they are dropped in egress. So this PR changes the logic to read the outgoing port's drops, and incoming ports' total packets. The flow-completion-check in the common code is for 20 seconds. But some of the traffic streams is configured for 26 seconds, so we need to check for atleast 26 seconds. So I have increased the time to 30 seconds. Along with the above, the pfcwd_basic script is updated to use the common code as implemented in the PR:15099. Summary: Pls see above. Approach What is the motivation for this PR? Fixing some of the issues that we came across during the testing. move the repeated code to the common code. How did you do it? Updated the counter reading mechanism. Updated the max_attempts for flow-completion to 30 seconds. Updated the pfcwd_basic to use the new lib code from Move the common code from testcases to the fixtures. #15099. How did you verify/test it? Ran it on my TB: Any platform specific information? Changes were tested on cisco-8000 only. The one fail below is not related to this change. My run result: =========================================================================================================================== PASSES =========================================================================================================================== ______________________________________________________________________________________________ test_pfcwd_basic_single_lossless_prio[multidut_port_info0-True] _______________________________________________________________________________________________ ______________________________________________________________________________________________ test_pfcwd_basic_single_lossless_prio[multidut_port_info1-True] _______________________________________________________________________________________________ ______________________________________________________________________________________________ test_pfcwd_basic_single_lossless_prio[multidut_port_info1-False] ______________________________________________________________________________________________ ----------------------------------------------------------------------------- generated xml file: /run_logs/ixia/fixturize-reboot/2024-10-30-21-43-12/tr_2024-10-30-21-43-12.xml ----------------------------------------------------------------------------- INFO:root:Can not get Allure report URL. Please check logs ------------------------------------------------------------------------------------------------------------------- live log sessionfinish ------------------------------------------------------------------------------------------------------------------- 21:54:15 __init__.pytest_terminal_summary L0067 INFO | Can not get Allure report URL. Please check logs ================================================================================================================== short test summary info =================================================================================================================== PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio[multidut_port_info0-True] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio[multidut_port_info1-True] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio[multidut_port_info1-False] FAILED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio[multidut_port_info0-False] - Failed: Loss rate of Data Flow 1 (0.17994320225890004) should be in [0, 0] ==================================================================================================== 1 failed, 3 passed, 7 warnings in 660.78s (0:11:00) ===================================================================================================== sonic@ixia-sonic-mgmt-whitebox:/data/tests$ co-authrized by: jianquanye@microsoft.com --- tests/common/snappi_tests/common_helpers.py | 2 +- ...esponse_to_external_pause_storms_helper.py | 14 ++++----- ...ponse_to_throttling_pause_storms_helper.py | 14 ++++----- .../files/m2o_fluctuating_lossless_helper.py | 16 +++++----- .../m2o_oversubscribe_lossless_helper.py | 14 ++++----- ...m2o_oversubscribe_lossless_lossy_helper.py | 17 +++++------ .../files/m2o_oversubscribe_lossy_helper.py | 15 +++++----- .../pfcwd_multidut_burst_storm_helper.py | 2 +- .../test_multidut_pfcwd_basic_with_snappi.py | 29 ++++++++----------- 9 files changed, 54 insertions(+), 69 deletions(-) diff --git a/tests/common/snappi_tests/common_helpers.py b/tests/common/snappi_tests/common_helpers.py index d7f79642b4a..5521b6c2c97 100644 --- a/tests/common/snappi_tests/common_helpers.py +++ b/tests/common/snappi_tests/common_helpers.py @@ -1134,9 +1134,9 @@ def get_interface_stats(duthost, port): """ # Initializing nested dictionary i_stats i_stats = defaultdict(dict) - i_stats[duthost.hostname][port] = {} n_out = parse_portstat(duthost.command('portstat -i {}'.format(port))['stdout_lines'])[port] + i_stats[duthost.hostname][port] = n_out # rx_err, rx_ovr and rx_drp are counted in single counter rx_fail # tx_err, tx_ovr and tx_drp are counted in single counter tx_fail rx_err = ['rx_err', 'rx_ovr', 'rx_drp'] diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py index 56fc66aabf5..fb139f3f255 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py @@ -137,15 +137,13 @@ def run_lossless_response_to_external_pause_storms_test(api, exp_dur_sec=DATA_FLOW_DURATION_SEC + DATA_FLOW_DELAY_SEC, snappi_extra_params=snappi_extra_params) - tx_port1 = tx_port[0]['peer_port'] - tx_port2 = tx_port[1]['peer_port'] + dut_tx_port = rx_port['peer_port'] + dut_rx_port1 = tx_port[0]['peer_port'] + dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_fail'] - pkt_drop2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_fail'] - rx_pkts_1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_pkts'] - rx_pkts_2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_pkts'] - # Calculate the total packet drop - pkt_drop = pkt_drop1 + pkt_drop2 + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py index 216a22ece41..b177fd58282 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py @@ -144,15 +144,13 @@ def run_lossless_response_to_throttling_pause_storms_test(api, exp_dur_sec=DATA_FLOW_DURATION_SEC + DATA_FLOW_DELAY_SEC, snappi_extra_params=snappi_extra_params) - tx_port1 = tx_port[0]['peer_port'] - tx_port2 = tx_port[1]['peer_port'] + dut_tx_port = rx_port['peer_port'] + dut_rx_port1 = tx_port[0]['peer_port'] + dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_fail'] - pkt_drop2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_fail'] - rx_pkts_1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_pkts'] - rx_pkts_2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_pkts'] - # Calculate the total packet drop - pkt_drop = pkt_drop1 + pkt_drop2 + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py index ad1a5bb7f81..5da4ec7d6bf 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py @@ -1,5 +1,6 @@ import logging # noqa: F401 import random +from math import ceil from tests.common.helpers.assertions import pytest_assert, pytest_require # noqa: F401 from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts # noqa: F401 from tests.common.snappi_tests.snappi_helpers import get_dut_port_id # noqa: F401 @@ -10,7 +11,6 @@ from tests.common.snappi_tests.traffic_generation import run_traffic, \ setup_base_traffic_config # noqa: F401 from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict -from math import ceil logger = logging.getLogger(__name__) PAUSE_FLOW_NAME = 'Pause Storm' @@ -126,15 +126,13 @@ def run_m2o_fluctuating_lossless_test(api, exp_dur_sec=DATA_FLOW_DURATION_SEC + DATA_FLOW_DELAY_SEC, snappi_extra_params=snappi_extra_params) - tx_port1 = tx_port[0]['peer_port'] - tx_port2 = tx_port[1]['peer_port'] + dut_tx_port = rx_port['peer_port'] + dut_rx_port1 = tx_port[0]['peer_port'] + dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_fail'] - pkt_drop2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_fail'] - rx_pkts_1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_pkts'] - rx_pkts_2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_pkts'] - # Calculate the total packet drop - pkt_drop = pkt_drop1 + pkt_drop2 + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py index 550f94a0236..3f34d6a341b 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py @@ -129,15 +129,13 @@ def run_m2o_oversubscribe_lossless_test(api, exp_dur_sec=DATA_FLOW_DURATION_SEC + DATA_FLOW_DELAY_SEC, snappi_extra_params=snappi_extra_params) - tx_port1 = tx_port[0]['peer_port'] - tx_port2 = tx_port[1]['peer_port'] + dut_tx_port = rx_port['peer_port'] + dut_rx_port1 = tx_port[0]['peer_port'] + dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_fail'] - pkt_drop2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_fail'] - rx_pkts_1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_pkts'] - rx_pkts_2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_pkts'] - # Calculate the total packet drop - pkt_drop = pkt_drop1 + pkt_drop2 + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py index 8efd129cff3..302ea6b852a 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py @@ -10,12 +10,13 @@ from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts # noqa: F401 from tests.common.snappi_tests.snappi_helpers import get_dut_port_id # noqa: F401 from tests.common.snappi_tests.common_helpers import pfc_class_enable_vector, \ - stop_pfcwd, disable_packet_aging, sec_to_nanosec, get_interface_stats # noqa: F401 + stop_pfcwd, disable_packet_aging, sec_to_nanosec, get_interface_stats # noqa: F401 from tests.common.snappi_tests.port import select_ports # noqa: F401 from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import run_traffic, \ setup_base_traffic_config # noqa: F401 from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.portstat_utilities import parse_portstat # noqa: F401 logger = logging.getLogger(__name__) @@ -133,15 +134,13 @@ def run_pfc_m2o_oversubscribe_lossless_lossy_test(api, exp_dur_sec=DATA_FLOW_DURATION_SEC + DATA_FLOW_DELAY_SEC, snappi_extra_params=snappi_extra_params) - tx_port1 = tx_port[0]['peer_port'] - tx_port2 = tx_port[1]['peer_port'] + dut_rx_port1 = tx_port[0]['peer_port'] + dut_rx_port2 = tx_port[1]['peer_port'] + dut_tx_port = rx_port['peer_port'] # Fetch relevant statistics - pkt_drop1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_fail'] - pkt_drop2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_fail'] - rx_pkts_1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_pkts'] - rx_pkts_2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_pkts'] - # Calculate the total packet drop - pkt_drop = pkt_drop1 + pkt_drop2 + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py index ac5defb9bdf..9bacdc7ade5 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py @@ -135,14 +135,13 @@ def run_pfc_m2o_oversubscribe_lossy_test(api, exp_dur_sec=DATA_FLOW_DURATION_SEC + DATA_FLOW_DELAY_SEC, snappi_extra_params=snappi_extra_params) - tx_port1 = tx_port[0]['peer_port'] - tx_port2 = tx_port[1]['peer_port'] - pkt_drop1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_fail'] - pkt_drop2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_fail'] - rx_pkts_1 = get_interface_stats(ingress_duthost, tx_port1)[ingress_duthost.hostname][tx_port1]['rx_pkts'] - rx_pkts_2 = get_interface_stats(ingress_duthost, tx_port2)[ingress_duthost.hostname][tx_port2]['rx_pkts'] - # Calculate the total packet drop - pkt_drop = pkt_drop1 + pkt_drop2 + dut_tx_port = rx_port['peer_port'] + dut_rx_port1 = tx_port[0]['peer_port'] + dut_rx_port2 = tx_port[1]['peer_port'] + + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py index 9b8b02e9257..daa2048a2af 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py @@ -285,7 +285,7 @@ def __run_traffic(api, config, all_flow_names, exp_dur_sec): time.sleep(exp_dur_sec) attempts = 0 - max_attempts = 20 + max_attempts = 30 while attempts < max_attempts: request = api.metrics_request() diff --git a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py index 126a5fe14dd..daf00e18751 100644 --- a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py +++ b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py @@ -11,13 +11,12 @@ get_snappi_ports_multi_dut, is_snappi_multidut, \ snappi_api, snappi_dut_base_config, get_snappi_ports, get_snappi_ports_for_rdma, cleanup_config # noqa: F401 from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, lossless_prio_list # noqa F401 -from tests.snappi_tests.variables import MULTIDUT_PORT_INFO, MULTIDUT_TESTBED # noqa: F401 from tests.common.reboot import reboot # noqa: F401 from tests.common.utilities import wait_until # noqa: F401 from tests.snappi_tests.multidut.pfcwd.files.pfcwd_multidut_basic_helper import run_pfcwd_basic_test from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.snappi_tests.files.helper import skip_pfcwd_test, reboot_duts, \ - setup_ports_and_dut, multidut_port_info # noqa: F401 + setup_ports_and_dut # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] @@ -124,13 +123,12 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no fanout_graph_facts_multidut, # noqa F811 localhost, duthosts, - lossless_prio_list, # noqa: F811 - tbinfo, # noqa: F811 - prio_dscp_map, # noqa: F811 - setup_ports_and_dut, # noqa: F811 + enum_dut_lossless_prio_with_completeness_level, # noqa: F811 + get_snappi_ports, # noqa: F811 + prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 reboot_duts, # noqa: F811 - trigger_pfcwd # noqa: F811 - ): + trigger_pfcwd): """ Verify PFC watchdog basic test works on a single lossless priority after various types of reboot @@ -141,7 +139,6 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no localhost (pytest fixture): localhost handle duthosts (pytest fixture): list of DUTs prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority) - reboot_type (str): reboot type to be issued on the DUT trigger_pfcwd (bool): if PFC watchdog is expected to be triggered Returns: @@ -150,8 +147,8 @@ def test_pfcwd_basic_single_lossless_prio_reboot(snappi_api, # no testbed_config, port_config_list, snappi_ports = setup_ports_and_dut - lossless_prio = random.sample(lossless_prio_list, 1) - lossless_prio = int(lossless_prio[0]) + _, lossless_prio = enum_dut_lossless_prio_with_completeness_level.split('|') + lossless_prio = int(lossless_prio) snappi_extra_params = SnappiTestParams() snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports @@ -174,11 +171,10 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # no fanout_graph_facts_multidut, # noqa F811 localhost, duthosts, - lossless_prio_list, # noqa: F811 - tbinfo, # noqa: F811 - prio_dscp_map, # noqa F811 - setup_ports_and_dut, # noqa: F811 - reboot_duts, # noqa: F811 + enum_dut_lossless_prio_with_completeness_level, # noqa: F811 + tbinfo, # noqa: F811 + prio_dscp_map, # noqa F811 + reboot_duts, # noqa: F811 trigger_pfcwd): """ Verify PFC watchdog basic test works on multiple lossless priorities after various kinds of reboots @@ -191,7 +187,6 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # no duthosts (pytest fixture): list of DUTs lossless_prio_list (pytest fixture): list of all the lossless priorities prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority) - reboot_type (str): reboot type to be issued on the DUT trigger_pfcwd (bool): if PFC watchdog is expected to be triggered Returns: From f7a4584d8811abe9d1eed16d01c98d88d1a72277 Mon Sep 17 00:00:00 2001 From: Rajendra Kumar Thirumurthi Date: Mon, 11 Nov 2024 17:38:07 -0800 Subject: [PATCH 058/175] Skipping test_dip_sip for Cisco 8122 platforms (#15479) --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index d91c871a263..a4737b99dc2 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1103,8 +1103,10 @@ ip/test_mgmt_ipv6_only.py: ####################################### ipfwd/test_dip_sip.py: skip: - reason: "Unsupported topology." + reason: "Unsupported topology or unsupported in specific Cisco platforms." + conditions_logical_operator: or conditions: + - "platform in ['x86_64-8122_64eh_o-r0', 'x86_64-8122_64ehf_o-r0']" - "topo_type not in ['t0', 't1', 't2', 'm0', 'mx']" ipfwd/test_dir_bcast.py: From 979b9a2c1ee40c25c55ad1451c0c9be310f11fd2 Mon Sep 17 00:00:00 2001 From: zitingguo-ms Date: Mon, 11 Nov 2024 18:55:35 -0800 Subject: [PATCH 059/175] Add a new case to check if config exist (#15080) Description of PR Summary: Fixes # (issue) There has been an issue that missing exit in the FRR template that caused nht config is missed in the FRR config when vrf is configurated. After fixing the issue in sonic-net/sonic-buildimage#19587, add a new case to verify the 'ip nht resolve-via-default' should be present in FRR config whether vrf is configurated. --- tests/bgp/test_bgpmon.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/bgp/test_bgpmon.py b/tests/bgp/test_bgpmon.py index a2b413252ed..8954753d9b8 100644 --- a/tests/bgp/test_bgpmon.py +++ b/tests/bgp/test_bgpmon.py @@ -109,6 +109,17 @@ def build_syn_pkt(local_addr, peer_addr): return exp_packet +def test_resolve_via_default_exist(duthost): + """ + Test to verify if 'ip nht resolve-via-default' and 'ipv6 nht resolve-via-default' are present in global FRR config. + """ + frr_global_config = duthost.shell("vtysh -c 'show running-config'")['stdout'] + pytest_assert("ip nht resolve-via-default" in frr_global_config, + "ip nht resolve-via-default not present in global FRR config") + pytest_assert("ipv6 nht resolve-via-default" in frr_global_config, + "ipv6 nht resolve-via-default not present in global FRR config") + + def test_bgpmon(dut_with_default_route, localhost, enum_rand_one_frontend_asic_index, common_setup_teardown, set_timeout_for_bgpmon, ptfadapter, ptfhost): """ From 953710b13f8245c4750425bc8d9b2fa909f528d3 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:35:30 +0800 Subject: [PATCH 060/175] Remove skip_traffic_test fixture in sub_port_interfaces tests (#15457) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in sub_port_interfaces tests How did you verify/test it? --- .../sub_port_interfaces/sub_ports_helpers.py | 21 +++++++++++-------- .../test_sub_port_interfaces.py | 21 +++++++------------ 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/tests/sub_port_interfaces/sub_ports_helpers.py b/tests/sub_port_interfaces/sub_ports_helpers.py index f92730b54b4..f509b6a8abc 100644 --- a/tests/sub_port_interfaces/sub_ports_helpers.py +++ b/tests/sub_port_interfaces/sub_ports_helpers.py @@ -86,7 +86,7 @@ def create_packet(eth_dst, eth_src, ip_dst, ip_src, vlan_vid, tr_type, ttl, dl_v def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ptfhost=None, ip_src='', ip_dst='', pkt_action=None, type_of_traffic='ICMP', ttl=64, pktlen=100, ip_tunnel=None, - skip_traffic_test=False, **kwargs): + **kwargs): """ Send packet from PTF to DUT and verify that DUT sends/doesn't packet to PTF. @@ -105,9 +105,6 @@ def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ptfhost pktlen: packet length ip_tunnel: Tunnel IP address of DUT """ - if skip_traffic_test is True: - logger.info("Skipping traffic test") - return type_of_traffic = [type_of_traffic] if not isinstance(type_of_traffic, list) else type_of_traffic for tr_type in type_of_traffic: @@ -150,6 +147,7 @@ def generate_and_verify_tcp_udp_traffic(duthost, ptfadapter, src_port, dst_port, src_dl_vlan_enable = False dst_dl_vlan_enable = False router_mac = duthost.facts['router_mac'] + asic_type = duthost.facts['asic_type'] src_port_number = int(get_port_number(src_port)) dst_port_number = int(get_port_number(dst_port)) src_mac = ptfadapter.dataplane.get_mac(0, src_port_number).decode() @@ -198,7 +196,8 @@ def generate_and_verify_tcp_udp_traffic(duthost, ptfadapter, src_port, dst_port, pkt_in_buffer = pkt_filter.filter_pkt_in_buffer() - pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) + if asic_type != 'vs': + pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) def generate_and_verify_icmp_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action, tr_type, @@ -288,6 +287,7 @@ def generate_and_verify_decap_traffic(duthost, ptfadapter, src_port, dst_port, i ip_tunnel: Tunnel IP address of DUT """ router_mac = duthost.facts['router_mac'] + asic_type = duthost.facts['asic_type'] src_port_number = int(get_port_number(src_port)) dst_port_number = int(get_port_number(dst_port)) @@ -327,7 +327,8 @@ def generate_and_verify_decap_traffic(duthost, ptfadapter, src_port, dst_port, i pkt_in_buffer = pkt_filter.filter_pkt_in_buffer() - pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) + if asic_type != 'vs': + pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) def generate_and_verify_balancing_traffic(duthost, ptfhost, ptfadapter, src_port, dst_port, ip_src, ip_dst, @@ -347,6 +348,7 @@ def generate_and_verify_balancing_traffic(duthost, ptfhost, ptfadapter, src_port ttl: Time to live """ router_mac = duthost.facts['router_mac'] + asic_type = duthost.facts['asic_type'] src_port_number = int(get_port_number(src_port)) src_mac = ptfadapter.dataplane.get_mac(0, src_port_number) ip_src = '10.0.0.1' @@ -403,9 +405,10 @@ def generate_and_verify_balancing_traffic(duthost, ptfhost, ptfadapter, src_port pkt_in_buffer = pkt_filter.filter_pkt_in_buffer() - pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) - pytest_assert(check_balancing(pkt_filter.matched_index), - "Balancing error:\n{}".format(pkt_filter.matched_index)) + if asic_type != 'vs': + pytest_assert(pkt_in_buffer is True, "Expected packet not available:\n{}".format(pkt_in_buffer)) + pytest_assert(check_balancing(pkt_filter.matched_index), + "Balancing error:\n{}".format(pkt_filter.matched_index)) def shutdown_port(duthost, interface): diff --git a/tests/sub_port_interfaces/test_sub_port_interfaces.py b/tests/sub_port_interfaces/test_sub_port_interfaces.py index c3bba0e73d5..bde70b883f9 100644 --- a/tests/sub_port_interfaces/test_sub_port_interfaces.py +++ b/tests/sub_port_interfaces/test_sub_port_interfaces.py @@ -15,7 +15,6 @@ from sub_ports_helpers import check_sub_port from sub_ports_helpers import remove_sub_port from sub_ports_helpers import create_sub_port_on_dut -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 pytestmark = [ pytest.mark.topology('t0', 't1') @@ -347,7 +346,7 @@ def test_routing_between_sub_ports_and_port(self, request, type_of_traffic, duth pktlen=pktlen) def test_tunneling_between_sub_ports(self, duthost, ptfadapter, apply_tunnel_table_to_dut, - apply_route_config, skip_traffic_test): # noqa F811 + apply_route_config): """ Validates that packets are routed between sub-ports. @@ -380,11 +379,10 @@ def test_tunneling_between_sub_ports(self, duthost, ptfadapter, apply_tunnel_tab ip_tunnel=sub_ports[src_port]['ip'], pkt_action='fwd', type_of_traffic='decap', - ttl=63, - skip_traffic_test=skip_traffic_test) + ttl=63) def test_balancing_sub_ports(self, duthost, ptfhost, ptfadapter, - apply_balancing_config, skip_traffic_test): # noqa F811 + apply_balancing_config): """ Validates load-balancing when sub-port is part of ECMP Test steps: @@ -417,13 +415,12 @@ def test_balancing_sub_ports(self, duthost, ptfhost, ptfadapter, dst_port=dst_ports, ip_dst=ip_dst, type_of_traffic='balancing', - ttl=63, - skip_traffic_test=skip_traffic_test) + ttl=63) class TestSubPortsNegative(object): def test_packet_routed_with_invalid_vlan(self, duthost, ptfadapter, apply_config_on_the_dut, - apply_config_on_the_ptf, skip_traffic_test): # noqa F811 + apply_config_on_the_ptf): """ Validates that packet aren't routed if sub-ports have invalid VLAN ID. @@ -447,13 +444,12 @@ def test_packet_routed_with_invalid_vlan(self, duthost, ptfadapter, apply_config ip_src=value['neighbor_ip'], dst_port=sub_port, ip_dst=value['ip'], - pkt_action='drop', - skip_traffic_test=skip_traffic_test) + pkt_action='drop') class TestSubPortStress(object): def test_max_numbers_of_sub_ports(self, duthost, ptfadapter, apply_config_on_the_dut, - apply_config_on_the_ptf, skip_traffic_test): # noqa F811 + apply_config_on_the_ptf): """ Validates that 256 sub-ports can be created per port or LAG @@ -486,5 +482,4 @@ def test_max_numbers_of_sub_ports(self, duthost, ptfadapter, apply_config_on_the ip_src=value['neighbor_ip'], dst_port=sub_port, ip_dst=value['ip'], - pkt_action='fwd', - skip_traffic_test=skip_traffic_test) + pkt_action='fwd') From 1bff8b830142ba2c1673641e3b1dc60cfea7cf8e Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:35:45 +0800 Subject: [PATCH 061/175] Remove skip_traffic_test fixture in ip tests (#15455) What is the motivation for this PR? Currently we are using conditional mark to add marker, then use pytest hook to redirect testutils.verify function to a function always return True to skip traffic test. With this change, the skip_traffic_test fixture is no longer needed in test cases, streamlining the test code and improving clarity. How did you do it? Remove skip_traffic_test fixture in ip tests How did you verify/test it? --- tests/ip/test_ip_packet.py | 50 ++++++++++++++++++++++------------- tests/ipfwd/test_dir_bcast.py | 6 ++--- 2 files changed, 34 insertions(+), 22 deletions(-) diff --git a/tests/ip/test_ip_packet.py b/tests/ip/test_ip_packet.py index ac0ad5ef303..9d12aa3ee79 100644 --- a/tests/ip/test_ip_packet.py +++ b/tests/ip/test_ip_packet.py @@ -12,7 +12,6 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.portstat_utilities import parse_column_positions from tests.common.portstat_utilities import parse_portstat -from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.dut_utils import is_mellanox_fanout @@ -195,12 +194,13 @@ def common_param(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbin .format(prefix, selected_peer_ip_ifaces_pairs[1][0]), ptf_port_idx_namespace)) def test_forward_ip_packet_with_0x0000_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param, skip_traffic_test): # noqa F811 + ptfadapter, common_param): # GIVEN a ip packet with checksum 0x0000(compute from scratch) # WHEN send the packet to DUT # THEN DUT should forward it as normal ip packet duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, pc_ports_map, ptf_indices, ingress_router_mac) = common_param pkt = testutils.simple_ip_packet( @@ -251,7 +251,8 @@ def test_forward_ip_packet_with_0x0000_chksum(self, duthosts, enum_rand_one_per_ tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) @@ -266,12 +267,13 @@ def test_forward_ip_packet_with_0x0000_chksum(self, duthosts, enum_rand_one_per_ .format(tx_ok, match_cnt)) def test_forward_ip_packet_with_0xffff_chksum_tolerant(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param, skip_traffic_test): # noqa F811 + ptfadapter, common_param): # GIVEN a ip packet with checksum 0x0000(compute from scratch) # WHEN manually set checksum as 0xffff and send the packet to DUT # THEN DUT should tolerant packet with 0xffff, forward it as normal packet duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, pc_ports_map, ptf_indices, ingress_router_mac) = common_param pkt = testutils.simple_ip_packet( @@ -322,7 +324,8 @@ def test_forward_ip_packet_with_0xffff_chksum_tolerant(self, duthosts, enum_rand tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) @@ -338,13 +341,14 @@ def test_forward_ip_packet_with_0xffff_chksum_tolerant(self, duthosts, enum_rand def test_forward_ip_packet_with_0xffff_chksum_drop(self, duthosts, localhost, enum_rand_one_per_hwsku_frontend_hostname, ptfadapter, - common_param, tbinfo, skip_traffic_test): # noqa F811 + common_param, tbinfo): # GIVEN a ip packet with checksum 0x0000(compute from scratch) # WHEN manually set checksum as 0xffff and send the packet to DUT # THEN DUT should drop packet with 0xffff and add drop count duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] if is_mellanox_fanout(duthost, localhost): pytest.skip("Not supported at Mellanox fanout") (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, @@ -404,7 +408,8 @@ def test_forward_ip_packet_with_0xffff_chksum_drop(self, duthosts, localhost, logger.info("Setting PKT_NUM_ZERO for t2 max topology with 0.2 tolerance") self.PKT_NUM_ZERO = self.PKT_NUM * 0.2 - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) @@ -419,7 +424,7 @@ def test_forward_ip_packet_with_0xffff_chksum_drop(self, duthosts, localhost, .format(match_cnt)) def test_forward_ip_packet_recomputed_0xffff_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param, skip_traffic_test): # noqa F811 + ptfadapter, common_param): # GIVEN a ip packet, after forwarded(ttl-1) by DUT, # it's checksum will be 0xffff after wrongly incrementally recomputed # ref to https://datatracker.ietf.org/doc/html/rfc1624 @@ -428,6 +433,7 @@ def test_forward_ip_packet_recomputed_0xffff_chksum(self, duthosts, enum_rand_on # THEN DUT recompute new checksum correctly and forward packet as expected. duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, pc_ports_map, ptf_indices, ingress_router_mac) = common_param pkt = testutils.simple_ip_packet( @@ -477,7 +483,8 @@ def test_forward_ip_packet_recomputed_0xffff_chksum(self, duthosts, enum_rand_on tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) @@ -492,12 +499,13 @@ def test_forward_ip_packet_recomputed_0xffff_chksum(self, duthosts, enum_rand_on .format(tx_ok, match_cnt)) def test_forward_ip_packet_recomputed_0x0000_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param, skip_traffic_test): # noqa F811 + ptfadapter, common_param): # GIVEN a ip packet, after forwarded(ttl-1) by DUT, it's checksum will be 0x0000 after recompute from scratch # WHEN send the packet to DUT # THEN DUT recompute new checksum as 0x0000 and forward packet as expected. duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, pc_ports_map, ptf_indices, ingress_router_mac) = common_param pkt = testutils.simple_ip_packet( @@ -547,7 +555,8 @@ def test_forward_ip_packet_recomputed_0x0000_chksum(self, duthosts, enum_rand_on tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) @@ -562,11 +571,12 @@ def test_forward_ip_packet_recomputed_0x0000_chksum(self, duthosts, enum_rand_on .format(tx_ok, match_cnt)) def test_forward_normal_ip_packet(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param, skip_traffic_test): # noqa F811 + ptfadapter, common_param): # GIVEN a random normal ip packet # WHEN send the packet to DUT # THEN DUT should forward it as normal ip packet, nothing change but ttl-1 duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, pc_ports_map, ptf_indices, ingress_router_mac) = common_param pkt = testutils.simple_ip_packet( @@ -610,7 +620,8 @@ def test_forward_normal_ip_packet(self, duthosts, enum_rand_one_per_hwsku_fronte tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) @@ -625,11 +636,12 @@ def test_forward_normal_ip_packet(self, duthosts, enum_rand_one_per_hwsku_fronte .format(tx_ok, match_cnt)) def test_drop_ip_packet_with_wrong_0xffff_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param, skip_traffic_test): # noqa F811 + ptfadapter, common_param): # GIVEN a random normal ip packet, and manually modify checksum to 0xffff # WHEN send the packet to DUT # THEN DUT should drop it and add drop count duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, pc_ports_map, ptf_indices, ingress_router_mac) = common_param pkt = testutils.simple_ip_packet( @@ -665,8 +677,8 @@ def test_drop_ip_packet_with_wrong_0xffff_chksum(self, duthosts, enum_rand_one_p tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 - asic_type = duthost.facts['asic_type'] - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) @@ -678,11 +690,12 @@ def test_drop_ip_packet_with_wrong_0xffff_chksum(self, duthosts, enum_rand_one_p "Dropped {} packets in tx, not in expected range".format(tx_err)) def test_drop_l3_ip_packet_non_dut_mac(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param, skip_traffic_test): # noqa F811 + ptfadapter, common_param): # GIVEN a random normal ip packet, and random dest mac address # WHEN send the packet to DUT with dst_mac != ingress_router_mac to a layer 3 interface # THEN DUT should drop it and add drop count duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asic_type = duthost.facts["asic_type"] (peer_ip_ifaces_pair, rif_rx_ifaces, rif_support, ptf_port_idx, pc_ports_map, _, ingress_router_mac) = common_param @@ -721,7 +734,8 @@ def test_drop_l3_ip_packet_non_dut_mac(self, duthosts, enum_rand_one_per_hwsku_f tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_rif_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 - if skip_traffic_test is True: + if asic_type == "vs": + logger.info("Skipping packet count check on VS platform") return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) diff --git a/tests/ipfwd/test_dir_bcast.py b/tests/ipfwd/test_dir_bcast.py index 0e07ce18111..1c462271bb5 100644 --- a/tests/ipfwd/test_dir_bcast.py +++ b/tests/ipfwd/test_dir_bcast.py @@ -2,7 +2,7 @@ import json import logging -from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory, skip_traffic_test # noqa F401 +from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.ptf_runner import ptf_runner from datetime import datetime from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -65,7 +65,7 @@ def ptf_test_port_map(duthost, ptfhost, mg_facts, testbed_type, tbinfo): def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor_m, skip_traffic_test): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 duthost = duthosts[rand_one_dut_hostname] testbed_type = tbinfo['topo']['name'] @@ -81,8 +81,6 @@ def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, 'ptf_test_port_map': PTF_TEST_PORT_MAP } log_file = "/tmp/dir_bcast.BcastTest.{}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) - if skip_traffic_test is True: - return ptf_runner( ptfhost, 'ptftests', From 9b81013c3bbcfa9d680ff36be813882a6d3fa5ca Mon Sep 17 00:00:00 2001 From: Chun'ang Li <39114813+lerry-lee@users.noreply.github.com> Date: Tue, 12 Nov 2024 14:31:35 +0800 Subject: [PATCH 062/175] [CI] Enhance elastictest template and test_plan.py, fix az token issue (#15497) * enhance elastictest template, use bash script instead of azcli task, improve and fix azlogin and get token when requesting APIs * Directly specify the value of MGMT_BRANCH as master. Because dynamic assignment does not take effect immediately for the conditional statement of pipeline yaml, the expected value of MGMT_BRANCH cannot be obtained, and the locally updated testplan.py cannot be used. Signed-off-by: Chun'ang Li --- .../run-test-elastictest-template.yml | 343 ++++++++---------- .azure-pipelines/test_plan.py | 334 ++++++++--------- azure-pipelines.yml | 20 +- 3 files changed, 331 insertions(+), 366 deletions(-) diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml index 595a6cb3136..882ab9ce6b9 100644 --- a/.azure-pipelines/run-test-elastictest-template.yml +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -1,3 +1,10 @@ +# Description: +# - This template manages the entire life cycle of the Elastictest test plan in test pipelines. +# +# Important!!!: +# - This template is referenced in multiple pipelines. +# - Any updates to this file must be tested on all dependent pipelines to ensure compatibility and prevent disruptions. + parameters: - name: TOPOLOGY type: string @@ -184,206 +191,176 @@ steps: fi displayName: "Install azure-cli" - - task: AzureCLI@2 - inputs: - azureSubscription: "SONiC-Automation" - scriptType: 'bash' - scriptLocation: 'inlineScript' - inlineScript: | - set -e - - pip install PyYAML - - rm -f new_test_plan_id.txt - - python ./.azure-pipelines/test_plan.py create \ - -t ${{ parameters.TOPOLOGY }} \ - -o new_test_plan_id.txt \ - --min-worker ${{ parameters.MIN_WORKER }} \ - --max-worker ${{ parameters.MAX_WORKER }} \ - --lock-wait-timeout-seconds ${{ parameters.LOCK_WAIT_TIMEOUT_SECONDS }} \ - --test-set ${{ parameters.TEST_SET }} \ - --kvm-build-id $(KVM_BUILD_ID) \ - --kvm-image-branch "${{ parameters.KVM_IMAGE_BRANCH }}" \ - --deploy-mg-extra-params="${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" \ - --common-extra-params="${{ parameters.COMMON_EXTRA_PARAMS }}" \ - --vm-type ${{ parameters.VM_TYPE }} --num-asic ${{ parameters.NUM_ASIC }} \ - --ptf_image_tag ${{ parameters.PTF_IMAGE_TAG }} \ - --image_url ${{ parameters.IMAGE_URL }} \ - --upgrade-image-param="${{ parameters.UPGRADE_IMAGE_PARAM }}" \ - --hwsku ${{ parameters.HWSKU }} \ - --test-plan-type ${{ parameters.TEST_PLAN_TYPE }} \ - --platform ${{ parameters.PLATFORM }} \ - --testbed-name "${{ parameters.TESTBED_NAME }}" \ - --scripts "${{ parameters.SCRIPTS }}" \ - --features "${{ parameters.FEATURES }}" \ - --scripts-exclude "${{ parameters.SCRIPTS_EXCLUDE }}" \ - --features-exclude "${{ parameters.FEATURES_EXCLUDE }}" \ - --specific-param='${{ parameters.SPECIFIC_PARAM }}' \ - --affinity='${{ parameters.AFFINITY }}' \ - --build-reason ${{ parameters.BUILD_REASON }} \ - --repo-name ${{ parameters.REPO_NAME }} \ - --mgmt-branch ${{ parameters.MGMT_BRANCH }} \ - --stop-on-failure ${{ parameters.STOP_ON_FAILURE }} \ - --retry-times ${{ parameters.RETRY_TIMES }} \ - --dump-kvm-if-fail ${{ parameters.DUMP_KVM_IF_FAIL }} \ - --requester "${{ parameters.REQUESTER }}" \ - --max-execute-seconds $((${{ parameters.MAX_RUN_TEST_MINUTES }} * 60)) \ - --test-plan-num ${{ parameters.TEST_PLAN_NUM }} - - TEST_PLAN_ID_LIST=( $(cat new_test_plan_id.txt) ) - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo "Created test plan $TEST_PLAN_ID" - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - done - TEST_PLAN_ID_LIST_STRING=$(printf "%s," "${TEST_PLAN_ID_LIST[@]}") - TEST_PLAN_ID_LIST_STRING=${TEST_PLAN_ID_LIST_STRING%,} - echo "##vso[task.setvariable variable=TEST_PLAN_ID_LIST_STRING]$TEST_PLAN_ID_LIST_STRING" + - script: | + set -e + + pip install PyYAML + + rm -f new_test_plan_id.txt + + python ./.azure-pipelines/test_plan.py create \ + -t ${{ parameters.TOPOLOGY }} \ + -o new_test_plan_id.txt \ + --min-worker ${{ parameters.MIN_WORKER }} \ + --max-worker ${{ parameters.MAX_WORKER }} \ + --lock-wait-timeout-seconds ${{ parameters.LOCK_WAIT_TIMEOUT_SECONDS }} \ + --test-set ${{ parameters.TEST_SET }} \ + --kvm-build-id $(KVM_BUILD_ID) \ + --kvm-image-branch "${{ parameters.KVM_IMAGE_BRANCH }}" \ + --deploy-mg-extra-params="${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" \ + --common-extra-params="${{ parameters.COMMON_EXTRA_PARAMS }}" \ + --vm-type ${{ parameters.VM_TYPE }} --num-asic ${{ parameters.NUM_ASIC }} \ + --ptf_image_tag ${{ parameters.PTF_IMAGE_TAG }} \ + --image_url ${{ parameters.IMAGE_URL }} \ + --upgrade-image-param="${{ parameters.UPGRADE_IMAGE_PARAM }}" \ + --hwsku ${{ parameters.HWSKU }} \ + --test-plan-type ${{ parameters.TEST_PLAN_TYPE }} \ + --platform ${{ parameters.PLATFORM }} \ + --testbed-name "${{ parameters.TESTBED_NAME }}" \ + --scripts "${{ parameters.SCRIPTS }}" \ + --features "${{ parameters.FEATURES }}" \ + --scripts-exclude "${{ parameters.SCRIPTS_EXCLUDE }}" \ + --features-exclude "${{ parameters.FEATURES_EXCLUDE }}" \ + --specific-param='${{ parameters.SPECIFIC_PARAM }}' \ + --affinity='${{ parameters.AFFINITY }}' \ + --build-reason ${{ parameters.BUILD_REASON }} \ + --repo-name ${{ parameters.REPO_NAME }} \ + --mgmt-branch ${{ parameters.MGMT_BRANCH }} \ + --stop-on-failure ${{ parameters.STOP_ON_FAILURE }} \ + --retry-times ${{ parameters.RETRY_TIMES }} \ + --dump-kvm-if-fail ${{ parameters.DUMP_KVM_IF_FAIL }} \ + --requester "${{ parameters.REQUESTER }}" \ + --max-execute-seconds $((${{ parameters.MAX_RUN_TEST_MINUTES }} * 60)) \ + --test-plan-num ${{ parameters.TEST_PLAN_NUM }} + + TEST_PLAN_ID_LIST=( $(cat new_test_plan_id.txt) ) + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo "Created test plan $TEST_PLAN_ID" + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + done + TEST_PLAN_ID_LIST_STRING=$(printf "%s," "${TEST_PLAN_ID_LIST[@]}") + TEST_PLAN_ID_LIST_STRING=${TEST_PLAN_ID_LIST_STRING%,} + echo "##vso[task.setvariable variable=TEST_PLAN_ID_LIST_STRING]$TEST_PLAN_ID_LIST_STRING" displayName: "Trigger test" - - task: AzureCLI@2 - inputs: - azureSubscription: "SONiC-Automation" - scriptType: 'bash' - scriptLocation: 'inlineScript' - inlineScript: | - set -o - echo "Lock testbed" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - failure_count=0 - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "LOCK_TESTBED" finish, it changes into "PREPARE_TESTBED" - echo "[test_plan.py] poll LOCK_TESTBED status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state LOCK_TESTBED - RET=$? - if [ $RET -ne 0 ]; then - ((failure_count++)) - fi - done - - if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then - echo "All testplan failed, cancel following steps" - exit 3 - fi + - script: | + set -o + echo "Lock testbed" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + failure_count=0 + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "LOCK_TESTBED" finish, it changes into "PREPARE_TESTBED" + echo "[test_plan.py] poll LOCK_TESTBED status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state LOCK_TESTBED + RET=$? + if [ $RET -ne 0 ]; then + ((failure_count++)) + fi + done + + if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then + echo "All testplan failed, cancel following steps" + exit 3 + fi displayName: "Lock testbed" - - task: AzureCLI@2 - inputs: - azureSubscription: "SONiC-Automation" - scriptType: 'bash' - scriptLocation: 'inlineScript' - inlineScript: | - set -o - echo "Prepare testbed" - echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - failure_count=0 - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "PREPARE_TESTBED" finish, it changes into "EXECUTING" - echo "[test_plan.py] poll PREPARE_TESTBED status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state PREPARE_TESTBED - RET=$? - if [ $RET -ne 0 ]; then - ((failure_count++)) - fi - done - - if [ "$failure_count" -eq ${#TEST_PLAN_ID_LIST[@]} ]; then - echo "All testplan failed, cancel following steps" - exit 3 - fi + - script: | + set -o + echo "Prepare testbed" + echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + failure_count=0 + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "PREPARE_TESTBED" finish, it changes into "EXECUTING" + echo "[test_plan.py] poll PREPARE_TESTBED status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state PREPARE_TESTBED + RET=$? + if [ $RET -ne 0 ]; then + ((failure_count++)) + fi + done + + if [ "$failure_count" -eq ${#TEST_PLAN_ID_LIST[@]} ]; then + echo "All testplan failed, cancel following steps" + exit 3 + fi displayName: "Prepare testbed" - - task: AzureCLI@2 - inputs: - azureSubscription: "SONiC-Automation" - scriptType: 'bash' - scriptLocation: 'inlineScript' - inlineScript: | - set -o - echo "Run test" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - failure_count=0 - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "EXECUTING" finish, it changes into "KVMDUMP", "FAILED", "CANCELLED" or "FINISHED" - echo "[test_plan.py] poll EXECUTING status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state EXECUTING --expected-result ${{ parameters.EXPECTED_RESULT }} - RET=$? - if [ $RET -ne 0 ]; then - ((failure_count++)) - fi - done - - if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then - echo "All testplan failed, cancel following steps" - exit 3 - fi + - script: | + set -o + echo "Run test" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + failure_count=0 + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "EXECUTING" finish, it changes into "KVMDUMP", "FAILED", "CANCELLED" or "FINISHED" + echo "[test_plan.py] poll EXECUTING status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state EXECUTING --expected-result ${{ parameters.EXPECTED_RESULT }} + RET=$? + if [ $RET -ne 0 ]; then + ((failure_count++)) + fi + done + + if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then + echo "All testplan failed, cancel following steps" + exit 3 + fi displayName: "Run test" timeoutInMinutes: ${{ parameters.MAX_RUN_TEST_MINUTES }} - ${{ if eq(parameters.DUMP_KVM_IF_FAIL, 'True') }}: - - task: AzureCLI@2 - inputs: - azureSubscription: "SONiC-Automation" - scriptType: 'bash' - scriptLocation: 'inlineScript' - inlineScript: | - set -e - echo "KVM dump" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED" - echo "##[group][test_plan.py] poll KVMDUMP status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state KVMDUMP - done + - script: | + set -e + echo "KVM dump" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED" + echo "##[group][test_plan.py] poll KVMDUMP status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state KVMDUMP + done condition: succeededOrFailed() displayName: "KVM dump" - - task: AzureCLI@2 - inputs: - azureSubscription: "SONiC-Automation" - scriptType: 'bash' - scriptLocation: 'inlineScript' - inlineScript: | - set -e - echo "Try to cancel test plan $TEST_PLAN_ID, cancelling finished test plan has no effect." - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - python ./.azure-pipelines/test_plan.py cancel -i $TEST_PLAN_ID - done + - script: | + set -e + echo "Try to cancel test plan $TEST_PLAN_ID, cancelling finished test plan has no effect." + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + python ./.azure-pipelines/test_plan.py cancel -i $TEST_PLAN_ID + done condition: always() displayName: "Finalize running test plan" diff --git a/.azure-pipelines/test_plan.py b/.azure-pipelines/test_plan.py index f4b07bb2d18..1cc48fdbd31 100644 --- a/.azure-pipelines/test_plan.py +++ b/.azure-pipelines/test_plan.py @@ -1,3 +1,12 @@ +""" +Description: +- This script provides access to Elastictest test plan API, including creating, canceling, and polling status. + +Important!!!: +- This script is downloaded in multiple pipelines. +- Any updates to this file must be tested on all dependent pipelines to ensure compatibility and prevent disruptions. +""" + from __future__ import print_function, division import argparse @@ -8,7 +17,7 @@ import subprocess import copy import time -from datetime import datetime, timedelta +from datetime import datetime, timezone import requests import yaml @@ -22,8 +31,7 @@ INTERNAL_SONIC_MGMT_REPO = "https://dev.azure.com/mssonic/internal/_git/sonic-mgmt-int" PR_TEST_SCRIPTS_FILE = "pr_test_scripts.yaml" SPECIFIC_PARAM_KEYWORD = "specific_param" -TOLERATE_HTTP_EXCEPTION_TIMES = 20 -TOKEN_EXPIRE_HOURS = 1 +MAX_POLL_RETRY_TIMES = 10 MAX_GET_TOKEN_RETRY_TIMES = 3 TEST_PLAN_STATUS_UNSUCCESSFUL_FINISHED = ["FAILED", "CANCELLED"] TEST_PLAN_STEP_STATUS_UNFINISHED = ["EXECUTING", None] @@ -83,13 +91,15 @@ def __init__(self, status): def get_status(self): return self.status.value - def print_logs(self, test_plan_id, resp_data, start_time): + def print_logs(self, test_plan_id, resp_data, expected_status, start_time): status = resp_data.get("status", None) current_status = test_plan_status_factory(status).get_status() if current_status == self.get_status(): - print("Test plan id: {}, status: {}, elapsed: {:.0f} seconds" - .format(test_plan_id, resp_data.get("status", None), time.time() - start_time)) + print( + f"Test plan id: {test_plan_id}, status: {resp_data.get('status', None)}, " + f"expected_status: {expected_status}, elapsed: {time.time() - start_time:.0f} seconds" + ) class InitStatus(AbstractStatus): @@ -111,10 +121,12 @@ class ExecutingStatus(AbstractStatus): def __init__(self): super(ExecutingStatus, self).__init__(TestPlanStatus.EXECUTING) - def print_logs(self, test_plan_id, resp_data, start_time): - print("Test plan id: {}, status: {}, progress: {:.2f}%, elapsed: {:.0f} seconds" - .format(test_plan_id, resp_data.get("status", None), - resp_data.get("progress", 0) * 100, time.time() - start_time)) + def print_logs(self, test_plan_id, resp_data, expected_status, start_time): + print( + f"Test plan id: {test_plan_id}, status: {resp_data.get('status', None)}, " + f"expected_status: {expected_status}, progress: {resp_data.get('progress', 0) * 100:.2f}%, " + f"elapsed: {time.time() - start_time:.0f} seconds" + ) class KvmDumpStatus(AbstractStatus): @@ -150,74 +162,81 @@ def parse_list_from_str(s): if single_str.strip()] +def run_cmd(cmd): + process = subprocess.Popen( + cmd.split(), + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = process.communicate() + return_code = process.returncode + + if return_code != 0: + raise Exception(f'Command {cmd} execution failed, rc={return_code}, error={stderr}') + return stdout, stderr, return_code + + class TestPlanManager(object): - def __init__(self, scheduler_url, community_url, frontend_url, client_id=None): + def __init__(self, scheduler_url, frontend_url, client_id, managed_identity_id): self.scheduler_url = scheduler_url - self.community_url = community_url self.frontend_url = frontend_url self.client_id = client_id - self.with_auth = False - self._token = None - self._token_expires_on = None - if self.client_id: - self.with_auth = True - self.get_token() - - def cmd(self, cmds): - process = subprocess.Popen( - cmds, - shell=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - stdout, stderr = process.communicate() - return_code = process.returncode - - return stdout, stderr, return_code - - def az_run(self, cmd): - stdout, stderr, retcode = self.cmd(cmd.split()) - if retcode != 0: - raise Exception(f'Command {cmd} execution failed, rc={retcode}, error={stderr}') - return stdout, stderr, retcode + self.managed_identity_id = managed_identity_id def get_token(self): - token_is_valid = \ - self._token_expires_on is not None and \ - (self._token_expires_on - datetime.now()) > timedelta(hours=TOKEN_EXPIRE_HOURS) + # 1. Run az login with re-try + az_login_cmd = f"az login --identity --username {self.managed_identity_id}" + az_login_attempts = 0 + while az_login_attempts < MAX_GET_TOKEN_RETRY_TIMES: + try: + stdout, _, _ = run_cmd(az_login_cmd) + print(f"Az login successfully. Login time: {datetime.now(timezone.utc)}") + break + except Exception as exception: + az_login_attempts += 1 + print( + f"Failed to az login with exception: {repr(exception)}. " + f"Retry {MAX_GET_TOKEN_RETRY_TIMES - az_login_attempts} times to login." + ) - if self._token is not None and token_is_valid: - return self._token + # If az login failed, return with exception + if az_login_attempts >= MAX_GET_TOKEN_RETRY_TIMES: + raise Exception(f"Failed to az login after {MAX_GET_TOKEN_RETRY_TIMES} attempts.") - cmd = 'az account get-access-token --resource {}'.format(self.client_id) - attempt = 0 - while attempt < MAX_GET_TOKEN_RETRY_TIMES: + # 2. Get access token with re-try + get_token_cmd = f"az account get-access-token --resource {self.client_id}" + get_token_attempts = 0 + while get_token_attempts < MAX_GET_TOKEN_RETRY_TIMES: try: - stdout, _, _ = self.az_run(cmd) + stdout, _, _ = run_cmd(get_token_cmd) token = json.loads(stdout.decode("utf-8")) - self._token = token.get("accessToken", None) - if not self._token: - raise Exception("Parse token from stdout failed") + access_token = token.get("accessToken", None) + if not access_token: + raise Exception("Parse token from stdout failed, accessToken is None.") # Parse token expires time from string token_expires_on = token.get("expiresOn", "") - self._token_expires_on = datetime.strptime(token_expires_on, "%Y-%m-%d %H:%M:%S.%f") - print("Get token successfully.") - return self._token + if token_expires_on: + print(f"Get token successfully. Token will expire on {token_expires_on}.") + + return access_token except Exception as exception: - attempt += 1 - print("Failed to get token with exception: {}".format(repr(exception))) + get_token_attempts += 1 + print(f"Failed to get token with exception: {repr(exception)}.") - raise Exception("Failed to get token after {} attempts".format(MAX_GET_TOKEN_RETRY_TIMES)) + # If az get token failed, return with exception + if get_token_attempts >= MAX_GET_TOKEN_RETRY_TIMES: + raise Exception(f"Failed to get token after {MAX_GET_TOKEN_RETRY_TIMES} attempts") def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params="", kvm_build_id="", min_worker=None, max_worker=None, pr_id="unknown", output=None, common_extra_params="", **kwargs): - tp_url = "{}/test_plan".format(self.scheduler_url) + tp_url = f"{self.scheduler_url}/test_plan" testbed_name = parse_list_from_str(kwargs.get("testbed_name", None)) image_url = kwargs.get("image_url", None) hwsku = kwargs.get("hwsku", None) @@ -229,8 +248,10 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params features_exclude = parse_list_from_str(kwargs.get("features_exclude", None)) ptf_image_tag = kwargs.get("ptf_image_tag", None) - print("Creating test plan, topology: {}, name: {}, build info:{} {} {}".format(topology, test_plan_name, - repo_name, pr_id, build_id)) + print( + f"Creating test plan, topology: {topology}, name: {test_plan_name}, " + f"build info:{repo_name} {pr_id} {build_id}" + ) print("Test scripts to be covered in this test plan:") print(json.dumps(scripts, indent=4)) @@ -320,10 +341,9 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params "extra_params": {}, "priority": 10 } - print('Creating test plan with payload:\n{}'.format(json.dumps(payload, indent=4))) + print(f"Creating test plan with payload:\n{json.dumps(payload, indent=4)}") headers = { - "Authorization": "Bearer {}".format(self.get_token()), - "scheduler-site": "PRTest", + "Authorization": f"Bearer {self.get_token()}", "Content-Type": "application/json" } raw_resp = {} @@ -331,17 +351,16 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params raw_resp = requests.post(tp_url, headers=headers, data=json.dumps(payload), timeout=10) resp = raw_resp.json() except Exception as exception: - raise Exception("HTTP execute failure, url: {}, raw_resp: {}, exception: {}" - .format(tp_url, str(raw_resp), str(exception))) + raise Exception(f"HTTP execute failure, url: {tp_url}, raw_resp: {raw_resp}, exception: {str(exception)}") if not resp["data"]: - raise Exception("Pre deploy action failed with error: {}".format(resp["errmsg"])) + raise Exception(f"Create test plan failed with error: {resp['errmsg']}") if not resp["success"]: - raise Exception("Create test plan failed with error: {}".format(resp["errmsg"])) + raise Exception(f"Create test plan failed with error: {resp['errmsg']}") - print("Result of creating test plan: {}".format(str(resp["data"]))) + print(f"Result of creating test plan: {str(resp['data'])}") if output: - print("Store new test plan id to file {}".format(output)) + print(f"Store new test plan id to file {output}") with open(output, "a") as f: f.write(str(resp["data"]) + "\n") @@ -349,15 +368,14 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params def cancel(self, test_plan_id): - tp_url = "{}/test_plan/{}".format(self.scheduler_url, test_plan_id) - cancel_url = "{}/cancel".format(tp_url) + tp_url = f"{self.scheduler_url}/test_plan/{test_plan_id}" + cancel_url = f"{tp_url}/cancel" - print("Cancelling test plan at {}".format(cancel_url)) + print(f"Cancelling test plan at {cancel_url}") payload = json.dumps({}) headers = { - "Authorization": "Bearer {}".format(self.get_token()), - "scheduler-site": "PRTest", + "Authorization": f"Bearer {self.get_token()}", "Content-Type": "application/json" } @@ -366,73 +384,57 @@ def cancel(self, test_plan_id): raw_resp = requests.post(cancel_url, headers=headers, data=payload, timeout=10) resp = raw_resp.json() except Exception as exception: - raise Exception("HTTP execute failure, url: {}, raw_resp: {}, exception: {}" - .format(cancel_url, str(raw_resp), str(exception))) + raise Exception(f"HTTP execute failure, url: {cancel_url}, raw_resp: {str(raw_resp)}, " + f"exception: {str(exception)}") if not resp["success"]: - raise Exception("Cancel test plan failed with error: {}".format(resp["errmsg"])) + raise Exception(f"Cancel test plan failed with error: {resp['errmsg']}") - print("Result of cancelling test plan at {}:".format(tp_url)) + print(f"Result of cancelling test plan at {tp_url}:") print(str(resp["data"])) def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expected_result=None): - print("Polling progress and status of test plan at {}/scheduler/testplan/{}" - .format(self.frontend_url, test_plan_id)) - print("Polling interval: {} seconds".format(interval)) + print(f"Polling progress and status of test plan at {self.frontend_url}/scheduler/testplan/{test_plan_id}") + print(f"Polling interval: {interval} seconds") - poll_url = "{}/test_plan/{}/get_test_plan_status".format(self.scheduler_url, test_plan_id) - poll_url_no_auth = "{}/get_test_plan_status/{}".format(self.community_url, test_plan_id) + poll_url = f"{self.scheduler_url}/test_plan/{test_plan_id}/get_test_plan_status" + # In current polling task, initialize headers one time to avoid frequent token accessing + # For some tasks running over 24h, then token may expire, need a fresh headers = { + "Authorization": f"Bearer {self.get_token()}", "Content-Type": "application/json" } start_time = time.time() - http_exception_times = 0 - http_exception_times_no_auth = 0 - failed_poll_auth_url = False + poll_retry_times = 0 while timeout < 0 or (time.time() - start_time) < timeout: resp = None - # To make the transition smoother, first try to access the original API - if not failed_poll_auth_url: - try: - if self.with_auth: - headers["Authorization"] = "Bearer {}".format(self.get_token()) - resp = requests.get(poll_url, headers=headers, timeout=10).json() - except Exception as exception: - print("HTTP execute failure, url: {}, raw_resp: {}, exception: {}".format(poll_url, resp, - str(exception))) - http_exception_times = http_exception_times + 1 - if http_exception_times >= TOLERATE_HTTP_EXCEPTION_TIMES: - failed_poll_auth_url = True - else: - time.sleep(interval) - continue - - # If failed on poll auth url(most likely token has expired), try with no-auth url - else: - print("Polling test plan status failed with auth url, try with no-auth url.") - try: - resp = requests.get(poll_url_no_auth, headers={"Content-Type": "application/json"}, - timeout=10).json() - except Exception as e: - print("HTTP execute failure, url: {}, raw_resp: {}, exception: {}".format(poll_url_no_auth, resp, - repr(e))) - http_exception_times_no_auth = http_exception_times_no_auth + 1 - if http_exception_times_no_auth >= TOLERATE_HTTP_EXCEPTION_TIMES: - raise Exception( - "HTTP execute failure, url: {}, raw_resp: {}, exception: {}".format(poll_url_no_auth, resp, - repr(e))) - else: - time.sleep(interval) - continue + try: + resp = requests.get(poll_url, headers=headers, timeout=10).json() - if not resp: - raise Exception("Poll test plan status failed with request error, no response!") + if not resp: + raise Exception("Poll test plan status failed with request error, no response!") - if not resp["success"]: - raise Exception("Query test plan at {} failed with error: {}".format(poll_url, resp["errmsg"])) + if not resp["success"]: + raise Exception(f"Get test plan status failed with error: {resp['errmsg']}") + + resp_data = resp.get("data", None) + if not resp_data: + raise Exception("No valid data in response.") + + except Exception as exception: + print(f"Failed to get valid response, url: {poll_url}, raw_resp: {resp}, exception: {str(exception)}") - resp_data = resp.get("data", None) - if not resp_data: - raise Exception("No valid data in response: {}".format(str(resp))) + # Refresh headers token to address token expiration issue + headers = { + "Authorization": f"Bearer {self.get_token()}", + "Content-Type": "application/json" + } + + poll_retry_times = poll_retry_times + 1 + if poll_retry_times >= MAX_POLL_RETRY_TIMES: + raise Exception("Poll test plan status failed, exceeded the maximum number of retries.") + else: + time.sleep(interval) + continue current_tp_status = resp_data.get("status", None) current_tp_result = resp_data.get("result", None) @@ -441,11 +443,10 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte current_status = test_plan_status_factory(current_tp_status) expected_status = test_plan_status_factory(expected_state) - print("current test plan status: {}, expected status: {}".format(current_tp_status, expected_state)) + current_status.print_logs(test_plan_id, resp_data, expected_state, start_time) - if expected_status.get_status() == current_status.get_status(): - current_status.print_logs(test_plan_id, resp_data, start_time) - elif expected_status.get_status() < current_status.get_status(): + # If test plan has finished current step, its now status will behind the expected status + if expected_status.get_status() < current_status.get_status(): steps = None step_status = None runtime = resp_data.get("runtime", None) @@ -460,7 +461,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte # Print test summary test_summary = resp_data.get("runtime", {}).get("test_summary", None) if test_summary: - print("Test summary:\n{}".format(json.dumps(test_summary, indent=4))) + print(f"Test summary:\n{json.dumps(test_summary, indent=4)}") """ In below scenarios, need to return false to pipeline. @@ -477,38 +478,34 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte # Print error type and message err_code = resp_data.get("runtime", {}).get("err_code", None) if err_code: - print("Error type: {}".format(err_code)) + print(f"Error type: {err_code}") err_msg = resp_data.get("runtime", {}).get("message", None) if err_msg: - print("Error message: {}".format(err_msg)) + print(f"Error message: {err_msg}") - raise Exception("Test plan id: {}, status: {}, result: {}, Elapsed {:.0f} seconds. " - "Check {}/scheduler/testplan/{} for test plan status" - .format(test_plan_id, step_status, current_tp_result, time.time() - start_time, - self.frontend_url, - test_plan_id)) + raise Exception( + f"Test plan id: {test_plan_id}, status: {step_status}, " + f"result: {current_tp_result}, Elapsed {time.time() - start_time:.0f} seconds. " + f"Check {self.frontend_url}/scheduler/testplan/{test_plan_id} for test plan status" + ) if expected_result: if current_tp_result != expected_result: - raise Exception("Test plan id: {}, status: {}, result: {} not match expected result: {}, " - "Elapsed {:.0f} seconds. " - "Check {}/scheduler/testplan/{} for test plan status" - .format(test_plan_id, step_status, current_tp_result, - expected_result, time.time() - start_time, - self.frontend_url, - test_plan_id)) - - print("Current step status is {}".format(step_status)) + raise Exception( + f"Test plan id: {test_plan_id}, status: {step_status}, " + f"result: {current_tp_result} not match expected result: {expected_result}, " + f"Elapsed {time.time() - start_time:.0f} seconds. " + f"Check {self.frontend_url}/scheduler/testplan/{test_plan_id} for test plan status" + ) + + print(f"Current step status is {step_status}.") return - else: - print("Current test plan state is {}, waiting for the expected state {}".format(current_tp_status, - expected_state)) time.sleep(interval) else: raise PollTimeoutException( - "Max polling time reached, test plan at {} is not successfully finished or cancelled".format(poll_url) + f"Max polling time reached, test plan at {poll_url} is not successfully finished or cancelled" ) @@ -930,30 +927,28 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte # https://github.com/microsoft/azure-pipelines-tasks/issues/10331 args.test_plan_id = args.test_plan_id.replace("'", "") - print("Test plan utils parameters: {}".format(args)) - auth_env = ["CLIENT_ID"] - required_env = ["ELASTICTEST_SCHEDULER_BACKEND_URL"] + print(f"Test plan utils parameters: {args}") - if args.action in ["create", "cancel"]: - required_env.extend(auth_env) + required_env = ["ELASTICTEST_SCHEDULER_BACKEND_URL", "CLIENT_ID", "SONIC_AUTOMATION_UMI"] env = { - "elastictest_scheduler_backend_url": os.environ.get("ELASTICTEST_SCHEDULER_BACKEND_URL"), - "elastictest_community_url": os.environ.get("ELASTICTEST_COMMUNITY_URL"), - "client_id": os.environ.get("ELASTICTEST_MSAL_CLIENT_ID"), - "frontend_url": os.environ.get("ELASTICTEST_FRONTEND_URL", "https://elastictest.org"), + "ELASTICTEST_SCHEDULER_BACKEND_URL": os.environ.get("ELASTICTEST_SCHEDULER_BACKEND_URL"), + "CLIENT_ID": os.environ.get("ELASTICTEST_MSAL_CLIENT_ID"), + "FRONTEND_URL": os.environ.get("ELASTICTEST_FRONTEND_URL", "https://elastictest.org"), + "SONIC_AUTOMATION_UMI": os.environ.get("SONIC_AUTOMATION_UMI"), } env_missing = [k.upper() for k, v in env.items() if k.upper() in required_env and not v] if env_missing: - print("Missing required environment variables: {}".format(env_missing)) + print(f"Missing required environment variables: {env_missing}.") sys.exit(1) try: tp = TestPlanManager( - env["elastictest_scheduler_backend_url"], - env["elastictest_community_url"], - env["frontend_url"], - env["client_id"]) + env["ELASTICTEST_SCHEDULER_BACKEND_URL"], + env["FRONTEND_URL"], + env["CLIENT_ID"], + env["SONIC_AUTOMATION_UMI"] + ) if args.action == "create": pr_id = os.environ.get("SYSTEM_PULLREQUEST_PULLREQUESTNUMBER") or os.environ.get( @@ -964,14 +959,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte job_name = os.environ.get("SYSTEM_JOBDISPLAYNAME") repo_name = args.repo_name if args.repo_name else os.environ.get("BUILD_REPOSITORY_NAME") - test_plan_prefix = "{repo}_{reason}_PR_{pr_id}_BUILD_{build_id}_JOB_{job_name}" \ - .format( - repo=repo, - reason=reason, - pr_id=pr_id, - build_id=build_id, - job_name=job_name - ).replace(' ', '_') + test_plan_prefix = f"{repo}_{reason}_PR_{pr_id}_BUILD_{build_id}_JOB_{job_name}".replace(' ', '_') scripts = args.scripts specific_param = json.loads(args.specific_param) @@ -989,7 +977,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte for num in range(args.test_plan_num): test_plan_name = copy.copy(test_plan_prefix) if args.test_plan_num > 1: - test_plan_name = "{}_{}".format(test_plan_name, num + 1) + test_plan_name = f"{test_plan_name}_{num + 1}" tp.create( args.topology, @@ -1033,8 +1021,8 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte tp.cancel(args.test_plan_id) sys.exit(0) except PollTimeoutException as e: - print("Polling test plan failed with exception: {}".format(repr(e))) + print(f"Polling test plan failed with exception: {repr(e)}") sys.exit(2) except Exception as e: - print("Operation failed with exception: {}".format(repr(e))) + print(f"Operation failed with exception: {repr(e)}") sys.exit(3) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 1256f817404..d268873c065 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -71,7 +71,7 @@ stages: MIN_WORKER: $(T0_INSTANCE_NUM) MAX_WORKER: $(T0_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: t0_2vlans_elastictest displayName: "kvmtest-t0-2vlans by Elastictest" @@ -87,7 +87,7 @@ stages: MAX_WORKER: $(T0_2VLANS_INSTANCE_NUM) DEPLOY_MG_EXTRA_PARAMS: "-e vlan_config=two_vlan_a" KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: t1_lag_elastictest displayName: "kvmtest-t1-lag by Elastictest" @@ -101,7 +101,7 @@ stages: MIN_WORKER: $(T1_LAG_INSTANCE_NUM) MAX_WORKER: $(T1_LAG_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: dualtor_elastictest displayName: "kvmtest-dualtor-t0 by Elastictest" @@ -116,7 +116,7 @@ stages: MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM) COMMON_EXTRA_PARAMS: "--disable_loganalyzer " KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: multi_asic_elastictest displayName: "kvmtest-multi-asic-t1-lag by Elastictest" @@ -132,7 +132,7 @@ stages: MAX_WORKER: $(MULTI_ASIC_INSTANCE_NUM) NUM_ASIC: 4 KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: sonic_t0_elastictest displayName: "kvmtest-t0-sonic by Elastictest" @@ -149,7 +149,7 @@ stages: COMMON_EXTRA_PARAMS: "--neighbor_type=sonic " VM_TYPE: vsonic KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: dpu_elastictest displayName: "kvmtest-dpu by Elastictest" @@ -163,7 +163,7 @@ stages: MIN_WORKER: $(T0_SONIC_INSTANCE_NUM) MAX_WORKER: $(T0_SONIC_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: onboarding_elastictest_t0 displayName: "onboarding t0 testcases by Elastictest - optional" @@ -179,7 +179,7 @@ stages: MIN_WORKER: $(T0_ONBOARDING_SONIC_INSTANCE_NUM) MAX_WORKER: $(T0_ONBOARDING_SONIC_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" TEST_SET: onboarding_t0 - job: onboarding_elastictest_t1 @@ -196,7 +196,7 @@ stages: MIN_WORKER: $(T1_LAG_ONBOARDING_INSTANCE_NUM) MAX_WORKER: $(T1_LAG_ONBOARDING_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" TEST_SET: onboarding_t1 # - job: onboarding_elastictest_dualtor @@ -213,7 +213,7 @@ stages: # MIN_WORKER: $(T0_DUALTOR_INSTANCE_NUM) # MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM) # KVM_IMAGE_BRANCH: $(BUILD_BRANCH) -# MGMT_BRANCH: $(BUILD_BRANCH) +# MGMT_BRANCH: "master" # TEST_SET: onboarding_dualtor # - job: wan_elastictest From 1690f53aaa547c52c7ec868e6d7e682724b4c329 Mon Sep 17 00:00:00 2001 From: Chun'ang Li <39114813+lerry-lee@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:00:01 +0800 Subject: [PATCH 063/175] =?UTF-8?q?Revert=20"[CI]=20Enhance=20elastictest?= =?UTF-8?q?=20template=20and=20test=5Fplan.py,=20fix=20az=20token=20issu?= =?UTF-8?q?=E2=80=A6"=20(#15502)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 9b81013c3bbcfa9d680ff36be813882a6d3fa5ca. --- .../run-test-elastictest-template.yml | 343 ++++++++++-------- .azure-pipelines/test_plan.py | 334 +++++++++-------- azure-pipelines.yml | 20 +- 3 files changed, 366 insertions(+), 331 deletions(-) diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml index 882ab9ce6b9..595a6cb3136 100644 --- a/.azure-pipelines/run-test-elastictest-template.yml +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -1,10 +1,3 @@ -# Description: -# - This template manages the entire life cycle of the Elastictest test plan in test pipelines. -# -# Important!!!: -# - This template is referenced in multiple pipelines. -# - Any updates to this file must be tested on all dependent pipelines to ensure compatibility and prevent disruptions. - parameters: - name: TOPOLOGY type: string @@ -191,176 +184,206 @@ steps: fi displayName: "Install azure-cli" - - script: | - set -e - - pip install PyYAML - - rm -f new_test_plan_id.txt - - python ./.azure-pipelines/test_plan.py create \ - -t ${{ parameters.TOPOLOGY }} \ - -o new_test_plan_id.txt \ - --min-worker ${{ parameters.MIN_WORKER }} \ - --max-worker ${{ parameters.MAX_WORKER }} \ - --lock-wait-timeout-seconds ${{ parameters.LOCK_WAIT_TIMEOUT_SECONDS }} \ - --test-set ${{ parameters.TEST_SET }} \ - --kvm-build-id $(KVM_BUILD_ID) \ - --kvm-image-branch "${{ parameters.KVM_IMAGE_BRANCH }}" \ - --deploy-mg-extra-params="${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" \ - --common-extra-params="${{ parameters.COMMON_EXTRA_PARAMS }}" \ - --vm-type ${{ parameters.VM_TYPE }} --num-asic ${{ parameters.NUM_ASIC }} \ - --ptf_image_tag ${{ parameters.PTF_IMAGE_TAG }} \ - --image_url ${{ parameters.IMAGE_URL }} \ - --upgrade-image-param="${{ parameters.UPGRADE_IMAGE_PARAM }}" \ - --hwsku ${{ parameters.HWSKU }} \ - --test-plan-type ${{ parameters.TEST_PLAN_TYPE }} \ - --platform ${{ parameters.PLATFORM }} \ - --testbed-name "${{ parameters.TESTBED_NAME }}" \ - --scripts "${{ parameters.SCRIPTS }}" \ - --features "${{ parameters.FEATURES }}" \ - --scripts-exclude "${{ parameters.SCRIPTS_EXCLUDE }}" \ - --features-exclude "${{ parameters.FEATURES_EXCLUDE }}" \ - --specific-param='${{ parameters.SPECIFIC_PARAM }}' \ - --affinity='${{ parameters.AFFINITY }}' \ - --build-reason ${{ parameters.BUILD_REASON }} \ - --repo-name ${{ parameters.REPO_NAME }} \ - --mgmt-branch ${{ parameters.MGMT_BRANCH }} \ - --stop-on-failure ${{ parameters.STOP_ON_FAILURE }} \ - --retry-times ${{ parameters.RETRY_TIMES }} \ - --dump-kvm-if-fail ${{ parameters.DUMP_KVM_IF_FAIL }} \ - --requester "${{ parameters.REQUESTER }}" \ - --max-execute-seconds $((${{ parameters.MAX_RUN_TEST_MINUTES }} * 60)) \ - --test-plan-num ${{ parameters.TEST_PLAN_NUM }} - - TEST_PLAN_ID_LIST=( $(cat new_test_plan_id.txt) ) - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo "Created test plan $TEST_PLAN_ID" - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - done - TEST_PLAN_ID_LIST_STRING=$(printf "%s," "${TEST_PLAN_ID_LIST[@]}") - TEST_PLAN_ID_LIST_STRING=${TEST_PLAN_ID_LIST_STRING%,} - echo "##vso[task.setvariable variable=TEST_PLAN_ID_LIST_STRING]$TEST_PLAN_ID_LIST_STRING" + - task: AzureCLI@2 + inputs: + azureSubscription: "SONiC-Automation" + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + set -e + + pip install PyYAML + + rm -f new_test_plan_id.txt + + python ./.azure-pipelines/test_plan.py create \ + -t ${{ parameters.TOPOLOGY }} \ + -o new_test_plan_id.txt \ + --min-worker ${{ parameters.MIN_WORKER }} \ + --max-worker ${{ parameters.MAX_WORKER }} \ + --lock-wait-timeout-seconds ${{ parameters.LOCK_WAIT_TIMEOUT_SECONDS }} \ + --test-set ${{ parameters.TEST_SET }} \ + --kvm-build-id $(KVM_BUILD_ID) \ + --kvm-image-branch "${{ parameters.KVM_IMAGE_BRANCH }}" \ + --deploy-mg-extra-params="${{ parameters.DEPLOY_MG_EXTRA_PARAMS }}" \ + --common-extra-params="${{ parameters.COMMON_EXTRA_PARAMS }}" \ + --vm-type ${{ parameters.VM_TYPE }} --num-asic ${{ parameters.NUM_ASIC }} \ + --ptf_image_tag ${{ parameters.PTF_IMAGE_TAG }} \ + --image_url ${{ parameters.IMAGE_URL }} \ + --upgrade-image-param="${{ parameters.UPGRADE_IMAGE_PARAM }}" \ + --hwsku ${{ parameters.HWSKU }} \ + --test-plan-type ${{ parameters.TEST_PLAN_TYPE }} \ + --platform ${{ parameters.PLATFORM }} \ + --testbed-name "${{ parameters.TESTBED_NAME }}" \ + --scripts "${{ parameters.SCRIPTS }}" \ + --features "${{ parameters.FEATURES }}" \ + --scripts-exclude "${{ parameters.SCRIPTS_EXCLUDE }}" \ + --features-exclude "${{ parameters.FEATURES_EXCLUDE }}" \ + --specific-param='${{ parameters.SPECIFIC_PARAM }}' \ + --affinity='${{ parameters.AFFINITY }}' \ + --build-reason ${{ parameters.BUILD_REASON }} \ + --repo-name ${{ parameters.REPO_NAME }} \ + --mgmt-branch ${{ parameters.MGMT_BRANCH }} \ + --stop-on-failure ${{ parameters.STOP_ON_FAILURE }} \ + --retry-times ${{ parameters.RETRY_TIMES }} \ + --dump-kvm-if-fail ${{ parameters.DUMP_KVM_IF_FAIL }} \ + --requester "${{ parameters.REQUESTER }}" \ + --max-execute-seconds $((${{ parameters.MAX_RUN_TEST_MINUTES }} * 60)) \ + --test-plan-num ${{ parameters.TEST_PLAN_NUM }} + + TEST_PLAN_ID_LIST=( $(cat new_test_plan_id.txt) ) + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo "Created test plan $TEST_PLAN_ID" + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + done + TEST_PLAN_ID_LIST_STRING=$(printf "%s," "${TEST_PLAN_ID_LIST[@]}") + TEST_PLAN_ID_LIST_STRING=${TEST_PLAN_ID_LIST_STRING%,} + echo "##vso[task.setvariable variable=TEST_PLAN_ID_LIST_STRING]$TEST_PLAN_ID_LIST_STRING" displayName: "Trigger test" - - script: | - set -o - echo "Lock testbed" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - failure_count=0 - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "LOCK_TESTBED" finish, it changes into "PREPARE_TESTBED" - echo "[test_plan.py] poll LOCK_TESTBED status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state LOCK_TESTBED - RET=$? - if [ $RET -ne 0 ]; then - ((failure_count++)) - fi - done - - if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then - echo "All testplan failed, cancel following steps" - exit 3 - fi + - task: AzureCLI@2 + inputs: + azureSubscription: "SONiC-Automation" + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + set -o + echo "Lock testbed" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + failure_count=0 + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "LOCK_TESTBED" finish, it changes into "PREPARE_TESTBED" + echo "[test_plan.py] poll LOCK_TESTBED status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state LOCK_TESTBED + RET=$? + if [ $RET -ne 0 ]; then + ((failure_count++)) + fi + done + + if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then + echo "All testplan failed, cancel following steps" + exit 3 + fi displayName: "Lock testbed" - - script: | - set -o - echo "Prepare testbed" - echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - failure_count=0 - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "PREPARE_TESTBED" finish, it changes into "EXECUTING" - echo "[test_plan.py] poll PREPARE_TESTBED status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state PREPARE_TESTBED - RET=$? - if [ $RET -ne 0 ]; then - ((failure_count++)) - fi - done - - if [ "$failure_count" -eq ${#TEST_PLAN_ID_LIST[@]} ]; then - echo "All testplan failed, cancel following steps" - exit 3 - fi + - task: AzureCLI@2 + inputs: + azureSubscription: "SONiC-Automation" + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + set -o + echo "Prepare testbed" + echo "Preparing the testbed(add-topo, deploy-mg) may take 15-30 minutes. Before the testbed is ready, the progress of the test plan keeps displayed as 0, please be patient" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + failure_count=0 + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "PREPARE_TESTBED" finish, it changes into "EXECUTING" + echo "[test_plan.py] poll PREPARE_TESTBED status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state PREPARE_TESTBED + RET=$? + if [ $RET -ne 0 ]; then + ((failure_count++)) + fi + done + + if [ "$failure_count" -eq ${#TEST_PLAN_ID_LIST[@]} ]; then + echo "All testplan failed, cancel following steps" + exit 3 + fi displayName: "Prepare testbed" - - script: | - set -o - echo "Run test" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - failure_count=0 - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "EXECUTING" finish, it changes into "KVMDUMP", "FAILED", "CANCELLED" or "FINISHED" - echo "[test_plan.py] poll EXECUTING status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state EXECUTING --expected-result ${{ parameters.EXPECTED_RESULT }} - RET=$? - if [ $RET -ne 0 ]; then - ((failure_count++)) - fi - done - - if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then - echo "All testplan failed, cancel following steps" - exit 3 - fi + - task: AzureCLI@2 + inputs: + azureSubscription: "SONiC-Automation" + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + set -o + echo "Run test" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + failure_count=0 + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "EXECUTING" finish, it changes into "KVMDUMP", "FAILED", "CANCELLED" or "FINISHED" + echo "[test_plan.py] poll EXECUTING status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state EXECUTING --expected-result ${{ parameters.EXPECTED_RESULT }} + RET=$? + if [ $RET -ne 0 ]; then + ((failure_count++)) + fi + done + + if [ $failure_count -eq ${#TEST_PLAN_ID_LIST[@]} ]; then + echo "All testplan failed, cancel following steps" + exit 3 + fi displayName: "Run test" timeoutInMinutes: ${{ parameters.MAX_RUN_TEST_MINUTES }} - ${{ if eq(parameters.DUMP_KVM_IF_FAIL, 'True') }}: - - script: | - set -e - echo "KVM dump" - - echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - echo -e -n "\033[33mPlease visit Elastictest page \033[0m" - echo -n "$(ELASTICTEST_FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " - echo -e "\033[33mfor detailed test plan progress \033[0m" - # When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED" - echo "##[group][test_plan.py] poll KVMDUMP status" - python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state KVMDUMP - done + - task: AzureCLI@2 + inputs: + azureSubscription: "SONiC-Automation" + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + set -e + echo "KVM dump" + + echo -e "\033[33mSONiC PR system-level test is powered by SONiC Elastictest, for any issue, please send email to sonicelastictest@microsoft.com \033[0m" + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + echo -e -n "\033[33mPlease visit Elastictest page \033[0m" + echo -n "$(FRONTEND_URL)/scheduler/testplan/$TEST_PLAN_ID " + echo -e "\033[33mfor detailed test plan progress \033[0m" + # When "KVMDUMP" finish, it changes into "FAILED", "CANCELLED" or "FINISHED" + echo "##[group][test_plan.py] poll KVMDUMP status" + python ./.azure-pipelines/test_plan.py poll -i $TEST_PLAN_ID --expected-state KVMDUMP + done condition: succeededOrFailed() displayName: "KVM dump" - - script: | - set -e - echo "Try to cancel test plan $TEST_PLAN_ID, cancelling finished test plan has no effect." - IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" - for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" - do - python ./.azure-pipelines/test_plan.py cancel -i $TEST_PLAN_ID - done + - task: AzureCLI@2 + inputs: + azureSubscription: "SONiC-Automation" + scriptType: 'bash' + scriptLocation: 'inlineScript' + inlineScript: | + set -e + echo "Try to cancel test plan $TEST_PLAN_ID, cancelling finished test plan has no effect." + IFS=',' read -ra TEST_PLAN_ID_LIST <<< "$TEST_PLAN_ID_LIST_STRING" + for TEST_PLAN_ID in "${TEST_PLAN_ID_LIST[@]}" + do + python ./.azure-pipelines/test_plan.py cancel -i $TEST_PLAN_ID + done condition: always() displayName: "Finalize running test plan" diff --git a/.azure-pipelines/test_plan.py b/.azure-pipelines/test_plan.py index 1cc48fdbd31..f4b07bb2d18 100644 --- a/.azure-pipelines/test_plan.py +++ b/.azure-pipelines/test_plan.py @@ -1,12 +1,3 @@ -""" -Description: -- This script provides access to Elastictest test plan API, including creating, canceling, and polling status. - -Important!!!: -- This script is downloaded in multiple pipelines. -- Any updates to this file must be tested on all dependent pipelines to ensure compatibility and prevent disruptions. -""" - from __future__ import print_function, division import argparse @@ -17,7 +8,7 @@ import subprocess import copy import time -from datetime import datetime, timezone +from datetime import datetime, timedelta import requests import yaml @@ -31,7 +22,8 @@ INTERNAL_SONIC_MGMT_REPO = "https://dev.azure.com/mssonic/internal/_git/sonic-mgmt-int" PR_TEST_SCRIPTS_FILE = "pr_test_scripts.yaml" SPECIFIC_PARAM_KEYWORD = "specific_param" -MAX_POLL_RETRY_TIMES = 10 +TOLERATE_HTTP_EXCEPTION_TIMES = 20 +TOKEN_EXPIRE_HOURS = 1 MAX_GET_TOKEN_RETRY_TIMES = 3 TEST_PLAN_STATUS_UNSUCCESSFUL_FINISHED = ["FAILED", "CANCELLED"] TEST_PLAN_STEP_STATUS_UNFINISHED = ["EXECUTING", None] @@ -91,15 +83,13 @@ def __init__(self, status): def get_status(self): return self.status.value - def print_logs(self, test_plan_id, resp_data, expected_status, start_time): + def print_logs(self, test_plan_id, resp_data, start_time): status = resp_data.get("status", None) current_status = test_plan_status_factory(status).get_status() if current_status == self.get_status(): - print( - f"Test plan id: {test_plan_id}, status: {resp_data.get('status', None)}, " - f"expected_status: {expected_status}, elapsed: {time.time() - start_time:.0f} seconds" - ) + print("Test plan id: {}, status: {}, elapsed: {:.0f} seconds" + .format(test_plan_id, resp_data.get("status", None), time.time() - start_time)) class InitStatus(AbstractStatus): @@ -121,12 +111,10 @@ class ExecutingStatus(AbstractStatus): def __init__(self): super(ExecutingStatus, self).__init__(TestPlanStatus.EXECUTING) - def print_logs(self, test_plan_id, resp_data, expected_status, start_time): - print( - f"Test plan id: {test_plan_id}, status: {resp_data.get('status', None)}, " - f"expected_status: {expected_status}, progress: {resp_data.get('progress', 0) * 100:.2f}%, " - f"elapsed: {time.time() - start_time:.0f} seconds" - ) + def print_logs(self, test_plan_id, resp_data, start_time): + print("Test plan id: {}, status: {}, progress: {:.2f}%, elapsed: {:.0f} seconds" + .format(test_plan_id, resp_data.get("status", None), + resp_data.get("progress", 0) * 100, time.time() - start_time)) class KvmDumpStatus(AbstractStatus): @@ -162,81 +150,74 @@ def parse_list_from_str(s): if single_str.strip()] -def run_cmd(cmd): - process = subprocess.Popen( - cmd.split(), - shell=False, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - stdout, stderr = process.communicate() - return_code = process.returncode - - if return_code != 0: - raise Exception(f'Command {cmd} execution failed, rc={return_code}, error={stderr}') - return stdout, stderr, return_code - - class TestPlanManager(object): - def __init__(self, scheduler_url, frontend_url, client_id, managed_identity_id): + def __init__(self, scheduler_url, community_url, frontend_url, client_id=None): self.scheduler_url = scheduler_url + self.community_url = community_url self.frontend_url = frontend_url self.client_id = client_id - self.managed_identity_id = managed_identity_id + self.with_auth = False + self._token = None + self._token_expires_on = None + if self.client_id: + self.with_auth = True + self.get_token() + + def cmd(self, cmds): + process = subprocess.Popen( + cmds, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = process.communicate() + return_code = process.returncode + + return stdout, stderr, return_code + + def az_run(self, cmd): + stdout, stderr, retcode = self.cmd(cmd.split()) + if retcode != 0: + raise Exception(f'Command {cmd} execution failed, rc={retcode}, error={stderr}') + return stdout, stderr, retcode def get_token(self): - # 1. Run az login with re-try - az_login_cmd = f"az login --identity --username {self.managed_identity_id}" - az_login_attempts = 0 - while az_login_attempts < MAX_GET_TOKEN_RETRY_TIMES: - try: - stdout, _, _ = run_cmd(az_login_cmd) - print(f"Az login successfully. Login time: {datetime.now(timezone.utc)}") - break - except Exception as exception: - az_login_attempts += 1 - print( - f"Failed to az login with exception: {repr(exception)}. " - f"Retry {MAX_GET_TOKEN_RETRY_TIMES - az_login_attempts} times to login." - ) + token_is_valid = \ + self._token_expires_on is not None and \ + (self._token_expires_on - datetime.now()) > timedelta(hours=TOKEN_EXPIRE_HOURS) - # If az login failed, return with exception - if az_login_attempts >= MAX_GET_TOKEN_RETRY_TIMES: - raise Exception(f"Failed to az login after {MAX_GET_TOKEN_RETRY_TIMES} attempts.") + if self._token is not None and token_is_valid: + return self._token - # 2. Get access token with re-try - get_token_cmd = f"az account get-access-token --resource {self.client_id}" - get_token_attempts = 0 - while get_token_attempts < MAX_GET_TOKEN_RETRY_TIMES: + cmd = 'az account get-access-token --resource {}'.format(self.client_id) + attempt = 0 + while attempt < MAX_GET_TOKEN_RETRY_TIMES: try: - stdout, _, _ = run_cmd(get_token_cmd) + stdout, _, _ = self.az_run(cmd) token = json.loads(stdout.decode("utf-8")) - access_token = token.get("accessToken", None) - if not access_token: - raise Exception("Parse token from stdout failed, accessToken is None.") + self._token = token.get("accessToken", None) + if not self._token: + raise Exception("Parse token from stdout failed") # Parse token expires time from string token_expires_on = token.get("expiresOn", "") - if token_expires_on: - print(f"Get token successfully. Token will expire on {token_expires_on}.") - - return access_token + self._token_expires_on = datetime.strptime(token_expires_on, "%Y-%m-%d %H:%M:%S.%f") + print("Get token successfully.") + return self._token except Exception as exception: - get_token_attempts += 1 - print(f"Failed to get token with exception: {repr(exception)}.") + attempt += 1 + print("Failed to get token with exception: {}".format(repr(exception))) - # If az get token failed, return with exception - if get_token_attempts >= MAX_GET_TOKEN_RETRY_TIMES: - raise Exception(f"Failed to get token after {MAX_GET_TOKEN_RETRY_TIMES} attempts") + raise Exception("Failed to get token after {} attempts".format(MAX_GET_TOKEN_RETRY_TIMES)) def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params="", kvm_build_id="", min_worker=None, max_worker=None, pr_id="unknown", output=None, common_extra_params="", **kwargs): - tp_url = f"{self.scheduler_url}/test_plan" + tp_url = "{}/test_plan".format(self.scheduler_url) testbed_name = parse_list_from_str(kwargs.get("testbed_name", None)) image_url = kwargs.get("image_url", None) hwsku = kwargs.get("hwsku", None) @@ -248,10 +229,8 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params features_exclude = parse_list_from_str(kwargs.get("features_exclude", None)) ptf_image_tag = kwargs.get("ptf_image_tag", None) - print( - f"Creating test plan, topology: {topology}, name: {test_plan_name}, " - f"build info:{repo_name} {pr_id} {build_id}" - ) + print("Creating test plan, topology: {}, name: {}, build info:{} {} {}".format(topology, test_plan_name, + repo_name, pr_id, build_id)) print("Test scripts to be covered in this test plan:") print(json.dumps(scripts, indent=4)) @@ -341,9 +320,10 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params "extra_params": {}, "priority": 10 } - print(f"Creating test plan with payload:\n{json.dumps(payload, indent=4)}") + print('Creating test plan with payload:\n{}'.format(json.dumps(payload, indent=4))) headers = { - "Authorization": f"Bearer {self.get_token()}", + "Authorization": "Bearer {}".format(self.get_token()), + "scheduler-site": "PRTest", "Content-Type": "application/json" } raw_resp = {} @@ -351,16 +331,17 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params raw_resp = requests.post(tp_url, headers=headers, data=json.dumps(payload), timeout=10) resp = raw_resp.json() except Exception as exception: - raise Exception(f"HTTP execute failure, url: {tp_url}, raw_resp: {raw_resp}, exception: {str(exception)}") + raise Exception("HTTP execute failure, url: {}, raw_resp: {}, exception: {}" + .format(tp_url, str(raw_resp), str(exception))) if not resp["data"]: - raise Exception(f"Create test plan failed with error: {resp['errmsg']}") + raise Exception("Pre deploy action failed with error: {}".format(resp["errmsg"])) if not resp["success"]: - raise Exception(f"Create test plan failed with error: {resp['errmsg']}") + raise Exception("Create test plan failed with error: {}".format(resp["errmsg"])) - print(f"Result of creating test plan: {str(resp['data'])}") + print("Result of creating test plan: {}".format(str(resp["data"]))) if output: - print(f"Store new test plan id to file {output}") + print("Store new test plan id to file {}".format(output)) with open(output, "a") as f: f.write(str(resp["data"]) + "\n") @@ -368,14 +349,15 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params def cancel(self, test_plan_id): - tp_url = f"{self.scheduler_url}/test_plan/{test_plan_id}" - cancel_url = f"{tp_url}/cancel" + tp_url = "{}/test_plan/{}".format(self.scheduler_url, test_plan_id) + cancel_url = "{}/cancel".format(tp_url) - print(f"Cancelling test plan at {cancel_url}") + print("Cancelling test plan at {}".format(cancel_url)) payload = json.dumps({}) headers = { - "Authorization": f"Bearer {self.get_token()}", + "Authorization": "Bearer {}".format(self.get_token()), + "scheduler-site": "PRTest", "Content-Type": "application/json" } @@ -384,57 +366,73 @@ def cancel(self, test_plan_id): raw_resp = requests.post(cancel_url, headers=headers, data=payload, timeout=10) resp = raw_resp.json() except Exception as exception: - raise Exception(f"HTTP execute failure, url: {cancel_url}, raw_resp: {str(raw_resp)}, " - f"exception: {str(exception)}") + raise Exception("HTTP execute failure, url: {}, raw_resp: {}, exception: {}" + .format(cancel_url, str(raw_resp), str(exception))) if not resp["success"]: - raise Exception(f"Cancel test plan failed with error: {resp['errmsg']}") + raise Exception("Cancel test plan failed with error: {}".format(resp["errmsg"])) - print(f"Result of cancelling test plan at {tp_url}:") + print("Result of cancelling test plan at {}:".format(tp_url)) print(str(resp["data"])) def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expected_result=None): - print(f"Polling progress and status of test plan at {self.frontend_url}/scheduler/testplan/{test_plan_id}") - print(f"Polling interval: {interval} seconds") + print("Polling progress and status of test plan at {}/scheduler/testplan/{}" + .format(self.frontend_url, test_plan_id)) + print("Polling interval: {} seconds".format(interval)) - poll_url = f"{self.scheduler_url}/test_plan/{test_plan_id}/get_test_plan_status" - # In current polling task, initialize headers one time to avoid frequent token accessing - # For some tasks running over 24h, then token may expire, need a fresh + poll_url = "{}/test_plan/{}/get_test_plan_status".format(self.scheduler_url, test_plan_id) + poll_url_no_auth = "{}/get_test_plan_status/{}".format(self.community_url, test_plan_id) headers = { - "Authorization": f"Bearer {self.get_token()}", "Content-Type": "application/json" } start_time = time.time() - poll_retry_times = 0 + http_exception_times = 0 + http_exception_times_no_auth = 0 + failed_poll_auth_url = False while timeout < 0 or (time.time() - start_time) < timeout: resp = None - try: - resp = requests.get(poll_url, headers=headers, timeout=10).json() - - if not resp: - raise Exception("Poll test plan status failed with request error, no response!") - - if not resp["success"]: - raise Exception(f"Get test plan status failed with error: {resp['errmsg']}") + # To make the transition smoother, first try to access the original API + if not failed_poll_auth_url: + try: + if self.with_auth: + headers["Authorization"] = "Bearer {}".format(self.get_token()) + resp = requests.get(poll_url, headers=headers, timeout=10).json() + except Exception as exception: + print("HTTP execute failure, url: {}, raw_resp: {}, exception: {}".format(poll_url, resp, + str(exception))) + http_exception_times = http_exception_times + 1 + if http_exception_times >= TOLERATE_HTTP_EXCEPTION_TIMES: + failed_poll_auth_url = True + else: + time.sleep(interval) + continue + + # If failed on poll auth url(most likely token has expired), try with no-auth url + else: + print("Polling test plan status failed with auth url, try with no-auth url.") + try: + resp = requests.get(poll_url_no_auth, headers={"Content-Type": "application/json"}, + timeout=10).json() + except Exception as e: + print("HTTP execute failure, url: {}, raw_resp: {}, exception: {}".format(poll_url_no_auth, resp, + repr(e))) + http_exception_times_no_auth = http_exception_times_no_auth + 1 + if http_exception_times_no_auth >= TOLERATE_HTTP_EXCEPTION_TIMES: + raise Exception( + "HTTP execute failure, url: {}, raw_resp: {}, exception: {}".format(poll_url_no_auth, resp, + repr(e))) + else: + time.sleep(interval) + continue - resp_data = resp.get("data", None) - if not resp_data: - raise Exception("No valid data in response.") + if not resp: + raise Exception("Poll test plan status failed with request error, no response!") - except Exception as exception: - print(f"Failed to get valid response, url: {poll_url}, raw_resp: {resp}, exception: {str(exception)}") + if not resp["success"]: + raise Exception("Query test plan at {} failed with error: {}".format(poll_url, resp["errmsg"])) - # Refresh headers token to address token expiration issue - headers = { - "Authorization": f"Bearer {self.get_token()}", - "Content-Type": "application/json" - } - - poll_retry_times = poll_retry_times + 1 - if poll_retry_times >= MAX_POLL_RETRY_TIMES: - raise Exception("Poll test plan status failed, exceeded the maximum number of retries.") - else: - time.sleep(interval) - continue + resp_data = resp.get("data", None) + if not resp_data: + raise Exception("No valid data in response: {}".format(str(resp))) current_tp_status = resp_data.get("status", None) current_tp_result = resp_data.get("result", None) @@ -443,10 +441,11 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte current_status = test_plan_status_factory(current_tp_status) expected_status = test_plan_status_factory(expected_state) - current_status.print_logs(test_plan_id, resp_data, expected_state, start_time) + print("current test plan status: {}, expected status: {}".format(current_tp_status, expected_state)) - # If test plan has finished current step, its now status will behind the expected status - if expected_status.get_status() < current_status.get_status(): + if expected_status.get_status() == current_status.get_status(): + current_status.print_logs(test_plan_id, resp_data, start_time) + elif expected_status.get_status() < current_status.get_status(): steps = None step_status = None runtime = resp_data.get("runtime", None) @@ -461,7 +460,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte # Print test summary test_summary = resp_data.get("runtime", {}).get("test_summary", None) if test_summary: - print(f"Test summary:\n{json.dumps(test_summary, indent=4)}") + print("Test summary:\n{}".format(json.dumps(test_summary, indent=4))) """ In below scenarios, need to return false to pipeline. @@ -478,34 +477,38 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte # Print error type and message err_code = resp_data.get("runtime", {}).get("err_code", None) if err_code: - print(f"Error type: {err_code}") + print("Error type: {}".format(err_code)) err_msg = resp_data.get("runtime", {}).get("message", None) if err_msg: - print(f"Error message: {err_msg}") + print("Error message: {}".format(err_msg)) - raise Exception( - f"Test plan id: {test_plan_id}, status: {step_status}, " - f"result: {current_tp_result}, Elapsed {time.time() - start_time:.0f} seconds. " - f"Check {self.frontend_url}/scheduler/testplan/{test_plan_id} for test plan status" - ) + raise Exception("Test plan id: {}, status: {}, result: {}, Elapsed {:.0f} seconds. " + "Check {}/scheduler/testplan/{} for test plan status" + .format(test_plan_id, step_status, current_tp_result, time.time() - start_time, + self.frontend_url, + test_plan_id)) if expected_result: if current_tp_result != expected_result: - raise Exception( - f"Test plan id: {test_plan_id}, status: {step_status}, " - f"result: {current_tp_result} not match expected result: {expected_result}, " - f"Elapsed {time.time() - start_time:.0f} seconds. " - f"Check {self.frontend_url}/scheduler/testplan/{test_plan_id} for test plan status" - ) - - print(f"Current step status is {step_status}.") + raise Exception("Test plan id: {}, status: {}, result: {} not match expected result: {}, " + "Elapsed {:.0f} seconds. " + "Check {}/scheduler/testplan/{} for test plan status" + .format(test_plan_id, step_status, current_tp_result, + expected_result, time.time() - start_time, + self.frontend_url, + test_plan_id)) + + print("Current step status is {}".format(step_status)) return + else: + print("Current test plan state is {}, waiting for the expected state {}".format(current_tp_status, + expected_state)) time.sleep(interval) else: raise PollTimeoutException( - f"Max polling time reached, test plan at {poll_url} is not successfully finished or cancelled" + "Max polling time reached, test plan at {} is not successfully finished or cancelled".format(poll_url) ) @@ -927,28 +930,30 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte # https://github.com/microsoft/azure-pipelines-tasks/issues/10331 args.test_plan_id = args.test_plan_id.replace("'", "") - print(f"Test plan utils parameters: {args}") + print("Test plan utils parameters: {}".format(args)) + auth_env = ["CLIENT_ID"] + required_env = ["ELASTICTEST_SCHEDULER_BACKEND_URL"] - required_env = ["ELASTICTEST_SCHEDULER_BACKEND_URL", "CLIENT_ID", "SONIC_AUTOMATION_UMI"] + if args.action in ["create", "cancel"]: + required_env.extend(auth_env) env = { - "ELASTICTEST_SCHEDULER_BACKEND_URL": os.environ.get("ELASTICTEST_SCHEDULER_BACKEND_URL"), - "CLIENT_ID": os.environ.get("ELASTICTEST_MSAL_CLIENT_ID"), - "FRONTEND_URL": os.environ.get("ELASTICTEST_FRONTEND_URL", "https://elastictest.org"), - "SONIC_AUTOMATION_UMI": os.environ.get("SONIC_AUTOMATION_UMI"), + "elastictest_scheduler_backend_url": os.environ.get("ELASTICTEST_SCHEDULER_BACKEND_URL"), + "elastictest_community_url": os.environ.get("ELASTICTEST_COMMUNITY_URL"), + "client_id": os.environ.get("ELASTICTEST_MSAL_CLIENT_ID"), + "frontend_url": os.environ.get("ELASTICTEST_FRONTEND_URL", "https://elastictest.org"), } env_missing = [k.upper() for k, v in env.items() if k.upper() in required_env and not v] if env_missing: - print(f"Missing required environment variables: {env_missing}.") + print("Missing required environment variables: {}".format(env_missing)) sys.exit(1) try: tp = TestPlanManager( - env["ELASTICTEST_SCHEDULER_BACKEND_URL"], - env["FRONTEND_URL"], - env["CLIENT_ID"], - env["SONIC_AUTOMATION_UMI"] - ) + env["elastictest_scheduler_backend_url"], + env["elastictest_community_url"], + env["frontend_url"], + env["client_id"]) if args.action == "create": pr_id = os.environ.get("SYSTEM_PULLREQUEST_PULLREQUESTNUMBER") or os.environ.get( @@ -959,7 +964,14 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte job_name = os.environ.get("SYSTEM_JOBDISPLAYNAME") repo_name = args.repo_name if args.repo_name else os.environ.get("BUILD_REPOSITORY_NAME") - test_plan_prefix = f"{repo}_{reason}_PR_{pr_id}_BUILD_{build_id}_JOB_{job_name}".replace(' ', '_') + test_plan_prefix = "{repo}_{reason}_PR_{pr_id}_BUILD_{build_id}_JOB_{job_name}" \ + .format( + repo=repo, + reason=reason, + pr_id=pr_id, + build_id=build_id, + job_name=job_name + ).replace(' ', '_') scripts = args.scripts specific_param = json.loads(args.specific_param) @@ -977,7 +989,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte for num in range(args.test_plan_num): test_plan_name = copy.copy(test_plan_prefix) if args.test_plan_num > 1: - test_plan_name = f"{test_plan_name}_{num + 1}" + test_plan_name = "{}_{}".format(test_plan_name, num + 1) tp.create( args.topology, @@ -1021,8 +1033,8 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte tp.cancel(args.test_plan_id) sys.exit(0) except PollTimeoutException as e: - print(f"Polling test plan failed with exception: {repr(e)}") + print("Polling test plan failed with exception: {}".format(repr(e))) sys.exit(2) except Exception as e: - print(f"Operation failed with exception: {repr(e)}") + print("Operation failed with exception: {}".format(repr(e))) sys.exit(3) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d268873c065..1256f817404 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -71,7 +71,7 @@ stages: MIN_WORKER: $(T0_INSTANCE_NUM) MAX_WORKER: $(T0_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) - job: t0_2vlans_elastictest displayName: "kvmtest-t0-2vlans by Elastictest" @@ -87,7 +87,7 @@ stages: MAX_WORKER: $(T0_2VLANS_INSTANCE_NUM) DEPLOY_MG_EXTRA_PARAMS: "-e vlan_config=two_vlan_a" KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) - job: t1_lag_elastictest displayName: "kvmtest-t1-lag by Elastictest" @@ -101,7 +101,7 @@ stages: MIN_WORKER: $(T1_LAG_INSTANCE_NUM) MAX_WORKER: $(T1_LAG_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) - job: dualtor_elastictest displayName: "kvmtest-dualtor-t0 by Elastictest" @@ -116,7 +116,7 @@ stages: MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM) COMMON_EXTRA_PARAMS: "--disable_loganalyzer " KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) - job: multi_asic_elastictest displayName: "kvmtest-multi-asic-t1-lag by Elastictest" @@ -132,7 +132,7 @@ stages: MAX_WORKER: $(MULTI_ASIC_INSTANCE_NUM) NUM_ASIC: 4 KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) - job: sonic_t0_elastictest displayName: "kvmtest-t0-sonic by Elastictest" @@ -149,7 +149,7 @@ stages: COMMON_EXTRA_PARAMS: "--neighbor_type=sonic " VM_TYPE: vsonic KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) - job: dpu_elastictest displayName: "kvmtest-dpu by Elastictest" @@ -163,7 +163,7 @@ stages: MIN_WORKER: $(T0_SONIC_INSTANCE_NUM) MAX_WORKER: $(T0_SONIC_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) - job: onboarding_elastictest_t0 displayName: "onboarding t0 testcases by Elastictest - optional" @@ -179,7 +179,7 @@ stages: MIN_WORKER: $(T0_ONBOARDING_SONIC_INSTANCE_NUM) MAX_WORKER: $(T0_ONBOARDING_SONIC_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) TEST_SET: onboarding_t0 - job: onboarding_elastictest_t1 @@ -196,7 +196,7 @@ stages: MIN_WORKER: $(T1_LAG_ONBOARDING_INSTANCE_NUM) MAX_WORKER: $(T1_LAG_ONBOARDING_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: "master" + MGMT_BRANCH: $(BUILD_BRANCH) TEST_SET: onboarding_t1 # - job: onboarding_elastictest_dualtor @@ -213,7 +213,7 @@ stages: # MIN_WORKER: $(T0_DUALTOR_INSTANCE_NUM) # MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM) # KVM_IMAGE_BRANCH: $(BUILD_BRANCH) -# MGMT_BRANCH: "master" +# MGMT_BRANCH: $(BUILD_BRANCH) # TEST_SET: onboarding_dualtor # - job: wan_elastictest From 3b2d216615e37ddcd5efbce747bc812b2cac698a Mon Sep 17 00:00:00 2001 From: Yawen Date: Tue, 12 Nov 2024 20:04:06 +1100 Subject: [PATCH 064/175] add Cisco-8122-O128 (#15384) --- ansible/module_utils/port_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index 8df35ca1d8c..8f195d1fe2b 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -358,6 +358,10 @@ def get_port_alias_to_name_map(hwsku, asic_name=None): elif hwsku in ["Cisco-8122-O64"]: for i in range(0, 64): port_alias_to_name_map["etp%d" % i] = "Ethernet%d" % (i * 8) + elif hwsku in ["Cisco-8122-O128"]: + for i in range(0, 64): + port_alias_to_name_map["etp%da" % i] = "Ethernet%d" % (i * 4 * 2) + port_alias_to_name_map["etp%db" % i] = "Ethernet%d" % ((i * 4 * 2) + 4) elif hwsku in ["Cisco-8800-LC-48H-C48"]: for i in range(0, 48, 1): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % (i * 4) From e734df6049d5b060c4c022c31c588669372cfd1b Mon Sep 17 00:00:00 2001 From: Charudatta Chitale Date: Tue, 12 Nov 2024 01:04:29 -0800 Subject: [PATCH 065/175] skipping test_route_flow_counter.py on Cisco 8122 platforms (#15017) * skip route_flow_counter TCs on 8122 platforms * correcting conditional mark sort for test_route_flow_counter.py --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index a4737b99dc2..baa5e89f28f 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1633,6 +1633,12 @@ route/test_route_flap.py: - "https://github.com/sonic-net/sonic-mgmt/issues/11324 and 'dualtor-64' in topo_name" - "'standalone' in topo_name" +route/test_route_flow_counter.py: + skip: + reason: "Test not supported for cisco-8122 platform" + conditions: + - "platform in ['x86_64-8122_64eh_o-r0', 'x86_64-8122_64ehf_o-r0']" + route/test_route_perf.py: skip: reason: "Does not apply to standalone topos." From ffcb7aa4781459cca599bb61b42df045e2d24b37 Mon Sep 17 00:00:00 2001 From: dypet Date: Tue, 12 Nov 2024 02:04:58 -0700 Subject: [PATCH 066/175] Use COUNTER_MARGIN in PFCXonTest check. (#15305) --- tests/saitests/py3/sai_qos_tests.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 2d649a4221e..9ec46133975 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -2710,7 +2710,8 @@ def runTest(self): # & may give inconsistent test results # Adding COUNTER_MARGIN to provide room to 2 pkt incase, extra traffic received for cntr in ingress_counters: - if platform_asic and platform_asic == "broadcom-dnx": + if (platform_asic and + platform_asic in ["broadcom-dnx", "cisco-8000"]): qos_test_assert( self, recv_counters[cntr] <= recv_counters_base[cntr] + COUNTER_MARGIN, 'unexpectedly ingress drop on recv port (counter: {}), at step {} {}'.format( From 2adfb21d4526a3398b4b7974f208a168e87a81cc Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Tue, 12 Nov 2024 12:58:13 -0800 Subject: [PATCH 067/175] Fix replace ingress_lossless_pool (#15332) * Fix replace ingress_lossless_pool * Add log * Fix log message. --- tests/common/gu_utils.py | 2 +- .../test_incremental_qos.py | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/tests/common/gu_utils.py b/tests/common/gu_utils.py index cb5b0f96a37..e62ece315cf 100644 --- a/tests/common/gu_utils.py +++ b/tests/common/gu_utils.py @@ -107,7 +107,7 @@ def expect_res_success(duthost, output, expected_content_list, unexpected_conten def expect_op_failure(output): """Expected failure from apply-patch output """ - logger.info("return code {}".format(output['rc'])) + logger.info("Return code: {}, error: {}".format(output['rc'], output['stderr'])) pytest_assert( output['rc'], "The command should fail with non zero return code" diff --git a/tests/generic_config_updater/test_incremental_qos.py b/tests/generic_config_updater/test_incremental_qos.py index 7282f251b6f..7856320fe53 100644 --- a/tests/generic_config_updater/test_incremental_qos.py +++ b/tests/generic_config_updater/test_incremental_qos.py @@ -240,12 +240,17 @@ def test_incremental_qos_config_updates(duthost, tbinfo, ensure_dut_readiness, c try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) if op == "replace" and not field_value: + logger.info("{} expects failure when configdb_field: {} does not have value.".format(op, configdb_field)) expect_op_failure(output) - - if is_valid_platform_and_version(duthost, "BUFFER_POOL", "Shared/headroom pool size changes", op, field_value): - expect_op_success(duthost, output) - ensure_application_of_updated_config(duthost, configdb_field, value) else: - expect_op_failure(output) + if is_valid_platform_and_version(duthost, + "BUFFER_POOL", + "Shared/headroom pool size changes", + op, + field_value): + expect_op_success(duthost, output) + ensure_application_of_updated_config(duthost, configdb_field, value) + else: + expect_op_failure(output) finally: delete_tmpfile(duthost, tmpfile) From dbd6ac8b47d99da52e74bea809ec249d5718cd40 Mon Sep 17 00:00:00 2001 From: Riff Date: Tue, 12 Nov 2024 15:09:51 -0800 Subject: [PATCH 068/175] Update SSH session gen to generate tmuxinator configurations (#15483) * Update ssh session gen. * Minor fix. --- ansible/devutil/device_inventory.py | 124 ++++++++++++++++- ansible/devutil/ssh_session_repo.py | 206 ++++++++++++++++++++-------- ansible/devutil/testbed.py | 75 ++++++++++ ansible/ssh_session_gen.py | 164 ++++++++++++---------- 4 files changed, 437 insertions(+), 132 deletions(-) mode change 100644 => 100755 ansible/ssh_session_gen.py diff --git a/ansible/devutil/device_inventory.py b/ansible/devutil/device_inventory.py index c890e86830f..78d26f34fce 100644 --- a/ansible/devutil/device_inventory.py +++ b/ansible/devutil/device_inventory.py @@ -1,3 +1,4 @@ +import copy import os import csv import glob @@ -22,7 +23,7 @@ def __init__( self.device_type = device_type self.protocol = protocol self.os = os - self.console_device = None + self.physical_hostname = None self.console_port = 0 @staticmethod @@ -50,6 +51,107 @@ def is_ssh_supported(self) -> bool: return True +class DeviceLinkInfo: + """Device link information.""" + + def __init__( + self, + start_device: str, + start_port: str, + end_device: str, + end_port: str, + bandwidth: int, + vlan_ranges: List[range], + vlan_mode: str, + auto_neg: str + ): + self.start_device = start_device + self.start_port = start_port + self.end_device = end_device + self.end_port = end_port + self.bandwidth = bandwidth + self.vlan_ranges = vlan_ranges + self.vlan_mode = vlan_mode + self.auto_neg = auto_neg + + @staticmethod + def from_csv_row(row: List[str]) -> "DeviceLinkInfo": + vlan_list = row[5] if row[5] else "" + vlan_ranges_str = vlan_list.split(",") if vlan_list != "" else [] + vlan_ranges = [] + for vlan_range_str in vlan_ranges_str: + vlan_range = vlan_range_str.split("-") + if len(vlan_range) == 1: + vlan_ranges.append(range(int(vlan_range[0]), int(vlan_range[0]) + 1)) + elif len(vlan_range) == 2: + vlan_ranges.append(range(int(vlan_range[0]), int(vlan_range[1]) + 1)) + else: + raise ValueError(f"Invalid vlan range: {vlan_range_str}") + + return DeviceLinkInfo( + start_device=row[0], + start_port=row[1], + end_device=row[2], + end_port=row[3], + bandwidth=int(row[4]), + vlan_ranges=vlan_ranges, + vlan_mode=row[6], + auto_neg=row[7] if len(row) > 7 else "" + ) + + def create_reverse_link(self) -> "DeviceLinkInfo": + return DeviceLinkInfo( + start_device=self.end_device, + start_port=self.end_port, + end_device=self.start_device, + end_port=self.start_port, + bandwidth=self.bandwidth, + vlan_ranges=self.vlan_ranges, + vlan_mode=self.vlan_mode, + auto_neg=self.auto_neg + ) + + +class DeviceLinkMap: + """Device link map.""" + + @staticmethod + def from_csv_file(file_path: str) -> "DeviceLinkMap": + links = DeviceLinkMap() + with open(file_path, newline="") as file: + reader = csv.reader(file) + + # Skip the header line + next(reader) + + for row in reader: + if row: + device_link = DeviceLinkInfo.from_csv_row(row) + links.add_link(device_link) + + return links + + def __init__(self): + self.links: Dict[str, Dict[str, DeviceLinkInfo]] = {} + + def add_link(self, link: DeviceLinkInfo): + if link.start_device not in self.links: + self.links[link.start_device] = {} + self.links[link.start_device][link.start_port] = link + + reverse_link = link.create_reverse_link() + if reverse_link.start_device not in self.links: + self.links[reverse_link.start_device] = {} + self.links[reverse_link.start_device][reverse_link.start_port] = reverse_link + + def get_links(self, device: str) -> Optional[Dict[str, DeviceLinkInfo]]: + return self.links.get(device) + + def get_link(self, device: str, port: str) -> Optional[DeviceLinkInfo]: + links = self.get_links(device) + return links.get(port) if links else None + + class DeviceInventory(object): """Device inventory from csv files.""" @@ -59,15 +161,22 @@ def __init__( self.inv_name = inv_name self.device_file_name = device_file_name self.devices = devices + self.links = DeviceLinkMap() @staticmethod def from_device_files(device_file_pattern: str) -> "List[DeviceInventory]": inv: List[DeviceInventory] = [] for file_path in glob.glob(device_file_pattern): device_inventory = DeviceInventory.from_device_file(file_path) + console_links_file_path = file_path.replace("_devices", "_console_links") if os.path.exists(console_links_file_path): device_inventory.load_console_links_info(console_links_file_path) + + device_links_file_path = file_path.replace("_devices", "_links") + if os.path.exists(device_links_file_path): + device_inventory.load_device_link_map(device_links_file_path) + inv.append(device_inventory) return inv @@ -116,8 +225,17 @@ def load_console_links_info(self, file_path: str): if not device_info: print(f"Unknown device hostname {device_hostname}, skipping") continue - device_info.console_device = console_device_info - device_info.console_port = console_port + + device_console_device = copy.deepcopy(console_device_info) + device_console_device.hostname = f"{device_hostname}-console" + device_console_device.device_type = "Console" # Make it different from ConsoleServer + device_console_device.physical_hostname = console_hostname + device_console_device.console_port = console_port + self.devices[device_console_device.hostname] = device_console_device + + def load_device_link_map(self, file_path: str): + print(f"Loading device links inventory: {file_path}") + self.links = DeviceLinkMap.from_csv_file(file_path) def get_device(self, hostname: str) -> Optional[DeviceInfo]: return self.devices.get(hostname) diff --git a/ansible/devutil/ssh_session_repo.py b/ansible/devutil/ssh_session_repo.py index 8324c660c3c..3daf5382b54 100644 --- a/ansible/devutil/ssh_session_repo.py +++ b/ansible/devutil/ssh_session_repo.py @@ -5,15 +5,28 @@ """ import os +from typing import Dict, List, Optional import sshconf from Crypto.Hash import SHA256 from Crypto.Cipher import AES +from devutil.device_inventory import DeviceInfo +import jinja2 + + +class DeviceSSHInfo(object): + """SSH info for devices.""" + + def __init__(self, ip: Optional[str], ipv6: Optional[str], user: Optional[str], password: Optional[str]): + self.ip = ip + self.ipv6 = ipv6 + self.user = user + self.password = password class SshSessionRepoGenerator(object): """Base class for ssh session repo generator.""" - def __init__(self, target, template_file): + def __init__(self, target: str, template_file: str): """Store all parameters as attributes. Args: @@ -23,7 +36,7 @@ def __init__(self, target, template_file): self.target = target self.template = self._load_template(template_file) - def _load_template(self, template_file): + def _load_template(self, template_file: str): """Load SSH session template file. Args: @@ -34,17 +47,11 @@ def _load_template(self, template_file): """ raise NotImplementedError - def generate(self, session_path, ssh_ip, ssh_ipv6, ssh_user, ssh_pass, - console_ssh_ip, console_ssh_port, console_ssh_user, console_ssh_pass): + def generate(self, repo_type: str, inv_name: str, testbed_name: str, + device: DeviceInfo, device_name: str, ssh_info: DeviceSSHInfo): """Generate SSH session for a node. This is a virtual method that should be implemented by child class. - - Args: - session_path(str): SSH session path. - ssh_ip (str): SSH IP address. - ssh_user (str): SSH username. - ssh_pass (str): SSH password. """ raise NotImplementedError @@ -53,7 +60,31 @@ def finish(self): This is a virtual method that should be implemented by child class. """ - raise NotImplementedError + pass + + def _get_device_type_short_name(self, device: DeviceInfo) -> str: + """Get the short name of the device type. + + Args: + device_type (str): Device type. + + Returns: + str: Short name of the device type. + """ + device_type = "dut" + + if device.device_type == "PTF": + device_type = "ptf" + elif "Root" in device.device_type: + device_type = "root" + elif "Fanout" in device.device_type: + device_type = "fan" + elif "Console" in device.device_type: + device_type = "console" + elif "Server" in device.device_type: + device_type = "server" + + return device_type class SecureCRTSshSessionRepoGenerator(SshSessionRepoGenerator): @@ -103,22 +134,23 @@ def _load_template(self, template_file): return template - def generate(self, session_path, ssh_ip, ssh_ipv6, ssh_user, ssh_pass, - console_ssh_ip, console_ssh_port, console_ssh_user, console_ssh_pass): + def generate(self, repo_type: str, inv_name: str, testbed_name: str, + device: DeviceInfo, ssh_info: DeviceSSHInfo): """Generate SSH session for a testbed node.""" + device_name = f"{self._get_device_type_short_name(device)}-{device.hostname}" + session_file_matrix = [ - (session_path, ssh_ip, ssh_user, ssh_pass), - (session_path + "-v6", ssh_ipv6, ssh_user, ssh_pass), - (session_path + "-console", console_ssh_ip, f"{console_ssh_user}:{console_ssh_port}", console_ssh_pass), + (device_name, ssh_info.ip, ssh_info), + (device_name + "-v6", ssh_info.ipv6, ssh_info), ] - for (session_name, ip, user, password) in session_file_matrix: - if not ip or not user: + for (device_name, ip, ssh_info) in session_file_matrix: + if not ip or not ssh_info.user: continue # In SecureCRT, every SSH session is stored in a ini file separately, # hence we add .ini extension to the session path in order to generate individual SSH session file. - ssh_session_file_path = os.path.join(self.target, session_name + ".ini") + ssh_session_file_path = os.path.join(self.target, repo_type, inv_name, testbed_name, device_name + ".ini") # Recursively create SSH session file directory ssh_session_folder = os.path.dirname(ssh_session_file_path) @@ -126,7 +158,7 @@ def generate(self, session_path, ssh_ip, ssh_ipv6, ssh_user, ssh_pass, # Generate SSH session file ssh_session_file_content = self._generate_ssh_session_file_content( - session_name, ip, user, password + device_name, ip, ssh_info ) with open(ssh_session_file_path, "w") as ssh_session_file: ssh_session_file.write(ssh_session_file_content) @@ -135,10 +167,10 @@ def generate(self, session_path, ssh_ip, ssh_ipv6, ssh_user, ssh_pass, ssh_session_folder_data = SecureCRTRepoFolderData.from_folder( ssh_session_folder, create_if_not_exist=True ) - ssh_session_folder_data.add_session(session_name) + ssh_session_folder_data.add_session(device_name) ssh_session_folder_data.save() - def _create_ssh_session_folder(self, ssh_session_file_dir): + def _create_ssh_session_folder(self, ssh_session_file_dir: str): """Recursively create SSH session file directory level by level if it does not exist, and init the folder with a folder data ini file. @@ -164,7 +196,7 @@ def _create_ssh_session_folder(self, ssh_session_file_dir): parent_folder_data.save() def _generate_ssh_session_file_content( - self, session_name, ssh_ip, ssh_user, ssh_pass + self, session_name: str, ssh_ip: str, ssh_info: DeviceSSHInfo ): """Generate SSH session file content: @@ -177,17 +209,13 @@ def _generate_ssh_session_file_content( Returns: str: SSH session file content. """ - encrypted_pass = "02:" + self.crypto.encrypt(ssh_pass) + encrypted_pass = "02:" + self.crypto.encrypt(ssh_info.password) return ( - self.template.replace("%USERNAME%", ssh_user) + self.template.replace("%USERNAME%", ssh_info.user) .replace("%HOST%", ssh_ip) .replace("%PASSWORD%", encrypted_pass) ) - def finish(self): - """Finish SSH session generation.""" - pass - class SecureCRTRepoFolderData(object): """This class represents the __FolderData__.ini file in SecureCRT SSH session repository.""" @@ -247,7 +275,7 @@ def _parse_ini_file(self): elif line.startswith('S:"Is Expanded"='): self.is_expanded = bool(int(line.split("=")[1].strip())) - def add_folder(self, folder): + def add_folder(self, folder: str): """Add a folder to the folder list. Args: @@ -255,7 +283,7 @@ def add_folder(self, folder): """ self.folder_list.add(folder) - def add_session(self, session): + def add_session(self, session: str): """Add a session to the session list. Args: @@ -263,7 +291,7 @@ def add_session(self, session): """ self.session_list.add(session) - def set_is_expanded(self, is_expanded): + def set_is_expanded(self, is_expanded: bool): """Set is_expanded. Args: @@ -337,7 +365,7 @@ class SshConfigSshSessionRepoGenerator(SshSessionRepoGenerator): It derives from SshSessionRepoGenerator and implements the generate method. """ - def __init__(self, target, ssh_config_params, console_ssh_config_params): + def __init__(self, target: str, ssh_config_params: Dict[str, str], console_ssh_config_params: Dict[str, str]): super().__init__(target, "") # Load SSH config file from target file path @@ -348,8 +376,8 @@ def __init__(self, target, ssh_config_params, console_ssh_config_params): self.ssh_config = sshconf.read_ssh_config(self.target) # Add SSH config parameters - self.ssh_config_params = ssh_config_params - self.console_ssh_config_params = console_ssh_config_params + self.ssh_config_params = ssh_config_params if ssh_config_params is not None else {} + self.console_ssh_config_params = console_ssh_config_params if console_ssh_config_params is not None else {} def _load_template(self, template_file): """Load SSH session template file. @@ -358,43 +386,113 @@ def _load_template(self, template_file): """ pass - def generate(self, session_path, ssh_ip, ssh_ipv6, ssh_user, ssh_pass, - console_ssh_ip, console_ssh_port, console_ssh_user, console_ssh_pass): + def generate(self, repo_type: str, inv_name: str, testbed_name: str, + device: DeviceInfo, ssh_info: DeviceSSHInfo): """Generate SSH session for a testbed node.""" - ssh_session_name = os.path.basename(session_path) + ssh_session_name = device.hostname current_hosts = self.ssh_config.hosts() ssh_config = {} - if ssh_user: - ssh_config["User"] = ssh_user + if ssh_info.user: + ssh_config["User"] = ssh_info.user # Add new host config - if ssh_ip: + if ssh_info.ip: session_name = ssh_session_name - ssh_config["Hostname"] = ssh_ip + ssh_config["Hostname"] = ssh_info.ip if session_name in current_hosts: self.ssh_config.set(session_name, **ssh_config, **self.ssh_config_params) else: self.ssh_config.add(session_name, **ssh_config, **self.ssh_config_params) - if ssh_ipv6: + + if ssh_info.ipv6: session_name = ssh_session_name + "-v6" - ssh_config["Hostname"] = ssh_ipv6 + ssh_config["Hostname"] = ssh_info.ipv6 if session_name in current_hosts: self.ssh_config.set(session_name, **ssh_config, **self.ssh_config_params) else: self.ssh_config.add(session_name, **ssh_config, **self.ssh_config_params) - if console_ssh_ip: - session_name = ssh_session_name + "-console" - ssh_config["User"] = f"{console_ssh_user}:{console_ssh_port}" - ssh_config["Hostname"] = console_ssh_ip - if session_name in current_hosts: - self.ssh_config.set(session_name, **ssh_config, **self.ssh_config_params, - **self.console_ssh_config_params) - else: - self.ssh_config.add(session_name, **ssh_config, **self.ssh_config_params, - **self.console_ssh_config_params) def finish(self): """Finish SSH session generation.""" # Write SSH config to target file path self.ssh_config.write(self.target) + + +class SshConfigTmuxinatorSessionRepoGenerator(SshSessionRepoGenerator): + """Tmuxinator session repo generator for tmuxinator configs. + + It derives from SshSessionRepoGenerator and implements the generate method. + """ + + def __init__(self, target: str, ssh_config_params: Dict[str, str], console_ssh_config_params: Dict[str, str]): + super().__init__(target, "") + + self.testbeds = {} + + # Create target folder + self.target = os.path.expanduser(self.target) + os.makedirs(self.target, exist_ok=True) + + # Add SSH config parameters + self.ssh_config_params = "".join([f" -o {k}={v}" for k, v in ssh_config_params.items()] + if ssh_config_params is not None else []) + + self.console_ssh_config_params = "".join([f" -o {k}={v}" for k, v in console_ssh_config_params.items()] + if console_ssh_config_params is not None else []) + + def _load_template(self, template_file): + """Load SSH session template file. + + This function will pass since tmuxinator config does not need a template file. + """ + + template = """ +name: {{ testbed_name }} +root: . +enable_pane_titles: true + +windows: +{%- for device_type, panes in config.items() %} + - {{ device_type }}: + layout: main-vertical + panes: + {%- for title, command in panes.items() %} + - {{ title }}: + - {{ command }} + {%- endfor %} +{%- endfor %} +""" + return jinja2.Template(template) + + def generate(self, repo_type: str, inv_name: str, testbed_name: str, + device: DeviceInfo, ssh_info: DeviceSSHInfo): + config = self.testbeds.setdefault(testbed_name, {}) + self._generate_tmuxinator_config_for_device(config, device, ssh_info.ip, ssh_info) + + def _generate_tmuxinator_config_for_device(self, config: Dict[str, List[str]], device: DeviceInfo, + ssh_ip: str, ssh_info: DeviceSSHInfo): + device_type = self._get_device_type_short_name(device) + ssh_pass = f"sshpass -p {ssh_info.password} " if ssh_info.password else "" + ssh_common_params = "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" + + if device.device_type == "Console": + command = f"{ssh_pass}ssh {ssh_common_params}{self.console_ssh_config_params} -l {ssh_info.user} {ssh_ip}" + else: + command = f"{ssh_pass}ssh {ssh_common_params}{self.ssh_config_params} {ssh_info.user}@{ssh_ip}" + + panes = config.setdefault(device_type, {}) + panes[device.hostname] = command + + def finish(self): + for testbed_name, config in self.testbeds.items(): + self._generate_tmuxinator_session_file(testbed_name, config) + + def _generate_tmuxinator_session_file(self, testbed_name: str, config: Dict[str, List[str]]): + tmux_config_file_path = os.path.join(self.target, testbed_name + ".yml") + + config_file_content = self.template.render(testbed_name=testbed_name, + config=config) + + with open(tmux_config_file_path, "w") as f: + f.write(config_file_content) diff --git a/ansible/devutil/testbed.py b/ansible/devutil/testbed.py index 58892335494..209431d77d2 100644 --- a/ansible/devutil/testbed.py +++ b/ansible/devutil/testbed.py @@ -2,6 +2,7 @@ Utility classes for loading and managing testbed data. """ +import itertools import os import re import yaml @@ -71,6 +72,11 @@ def __init__(self, raw_dict: Any, device_inventories: List[DeviceInventory]): protocol="ssh", ) + self.console_nodes = {} + self.fanout_nodes = {} + self.root_fanout_nodes = {} + self.server_nodes = {} + # Loop through each DUT in the testbed and find the device info self.dut_nodes = {} for dut in raw_dict["dut"]: @@ -78,6 +84,7 @@ def __init__(self, raw_dict: Any, device_inventories: List[DeviceInventory]): device = inv.get_device(dut) if device is not None: self.dut_nodes[dut] = device + self.link_dut_related_devices(inv, device) break else: print(f"Error: Failed to find device info for DUT {dut}") @@ -86,3 +93,71 @@ def __init__(self, raw_dict: Any, device_inventories: List[DeviceInventory]): # so we need to use "unknown" as inv_name instead. if not hasattr(self, "inv_name"): self.inv_name = "unknown" + + def link_dut_related_devices(self, inv: DeviceInventory, dut: DeviceInfo) -> None: + """Link all devices that is relavent to the given DUT.""" + links = inv.links.get_links(dut.hostname) + if links is None: + return None + + # Get all DUT VLANs + dut_vlan_list = [] + for link in links.values(): + dut_vlan_list.extend(link.vlan_ranges) + dut_vlans = list(itertools.chain(*dut_vlan_list)) + + # Use the VLANs to find all connected nodes + linked_devices = [] + visited_devices = {dut.hostname: True} + pending_devices = [dut] + while len(pending_devices) > 0: + device_name = pending_devices.pop(0).hostname + + # Enumerate all links of the device and find the ones with VLANs used by the DUT + device_links = inv.links.get_links(device_name) + for link in device_links.values(): + link_has_vlan = False + for dut_vlan in dut_vlans: + for link_vlan_range in link.vlan_ranges: + if dut_vlan in link_vlan_range: + link_has_vlan = True + break + if link_has_vlan: + break + + # The link has VLANs used by the DUTs + if link_has_vlan: + if link.end_device in visited_devices: + continue + visited_devices[link.end_device] = True + + peer_device = inv.get_device(link.end_device) + if peer_device is None: + raise ValueError(f"Link to device is defined by failed to find device info: {link.end_device}") + + # Count the peer device as linked and add it to the pending list + linked_devices.append(peer_device) + pending_devices.append(peer_device) + + # print(f"Linked devices for DUT {dut.hostname}:") + for linked_device in linked_devices: + if "Root" in linked_device.device_type: + self.root_fanout_nodes[linked_device.hostname] = linked_device + # print(f" RootFanout: {linked_device.hostname}") + elif "Fanout" in linked_device.device_type: + self.fanout_nodes[linked_device.hostname] = linked_device + # print(f" Fanout: {linked_device.hostname}") + elif linked_device.device_type == "Server": + self.server_nodes[linked_device.hostname] = linked_device + # print(f" Server: {linked_device.hostname}") + elif "Dev" in linked_device.device_type: + print(f"ERROR: Conflicting VLAN ID is found between 2 DUTs: {dut.hostname} and " + f"{linked_device.hostname}! Please fix the testbed config.") + else: + raise ValueError(f"Unknown device type: {linked_device.device_type} " + f"(DUT: {dut.hostname}, Linked: {linked_device.hostname})") + + dut_console_node_name = f"{dut.hostname}-console" + dut_console_node = inv.get_device(dut_console_node_name) + if dut_console_node is not None: + self.console_nodes[dut_console_node.hostname] = dut_console_node diff --git a/ansible/ssh_session_gen.py b/ansible/ssh_session_gen.py old mode 100644 new mode 100755 index 88e0a846d9d..8768c57d3de --- a/ansible/ssh_session_gen.py +++ b/ansible/ssh_session_gen.py @@ -1,17 +1,21 @@ +#!/usr/bin/env python3 + """ Script used to generate SSH session files for console access to devices. """ import argparse -import os +import itertools import re -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional from devutil.device_inventory import DeviceInfo, DeviceInventory from devutil.testbed import TestBed from devutil.inv_helpers import HostManager from devutil.ssh_session_repo import ( + DeviceSSHInfo, SecureCRTSshSessionRepoGenerator, SshConfigSshSessionRepoGenerator, + SshConfigTmuxinatorSessionRepoGenerator, SshSessionRepoGenerator, ) @@ -57,12 +61,13 @@ def __init__( "FanoutLeaf": {"user": leaf_fanout_user, "pass": leaf_fanout_pass}, "FanoutLeafSonic": {"user": leaf_fanout_user, "pass": leaf_fanout_pass}, "FanoutRoot": {"user": root_fanout_user, "pass": root_fanout_pass}, + "Console": {"user": console_server_user, "pass": console_server_pass}, "ConsoleServer": {"user": console_server_user, "pass": console_server_pass}, "MgmtTsToRRouter": {"user": console_server_user, "pass": console_server_pass}, "PTF": {"user": ptf_user, "pass": ptf_pass}, } - def get_ssh_cred(self, device: DeviceInfo) -> Tuple[str, str, str, str]: + def get_ssh_cred(self, device: DeviceInfo) -> DeviceSSHInfo: """ Get SSH info for a testbed node. @@ -85,9 +90,10 @@ def get_ssh_cred(self, device: DeviceInfo) -> Tuple[str, str, str, str]: else "" ) - if not ssh_ip or not ssh_user or not ssh_pass or not ssh_ipv6: + if not ssh_ip or not ssh_user or not ssh_pass or ("Console" not in device.device_type and not ssh_ipv6): try: - host_vars = self.ansible_hosts.get_host_vars(device.hostname) + device_hostname = device.hostname if device.physical_hostname is None else device.physical_hostname + host_vars = self.ansible_hosts.get_host_vars(device_hostname) ssh_ip = host_vars["ansible_host"] if not ssh_ip else ssh_ip ssh_ipv6 = host_vars["ansible_hostv6"] if not ssh_ipv6 and "ansible_hostv6" in host_vars else ssh_ipv6 @@ -97,15 +103,20 @@ def get_ssh_cred(self, device: DeviceInfo) -> Tuple[str, str, str, str]: ) except Exception as e: print( - f"Error: Failed to get SSH credential for device {device.hostname} ({device.device_type}): {str(e)}" + f"Error: Failed to get SSH credential for device {device_hostname} ({device.device_type}): {str(e)}" ) - ssh_ip = "" if ssh_ip is None else ssh_ip - ssh_ipv6 = "" if ssh_ipv6 is None else ssh_ipv6 - ssh_user = "" if ssh_user is None else ssh_user - ssh_pass = "" if ssh_pass is None else ssh_pass + ssh_info = DeviceSSHInfo( + ip="" if ssh_ip is None else ssh_ip, + ipv6="" if ssh_ipv6 is None else ssh_ipv6, + user="" if ssh_user is None else ssh_user, + password="" if ssh_pass is None else ssh_pass + ) + + if device.console_port > 0: + ssh_info.user = f"{ssh_info.user}:{device.console_port}" - return ssh_ip, ssh_ipv6, ssh_user, ssh_pass + return ssh_info class DeviceSshSessionRepoGenerator(object): @@ -115,47 +126,40 @@ def __init__( self.repo_generator = repo_generator self.ssh_info_solver = ssh_info_solver - def generate_ssh_session_for_device(self, device: DeviceInfo, session_path: str): + def generate_ssh_session_for_device(self, + device: DeviceInfo, + repo_type: str, + inv_name: str, + testbed_name: str): + """Generate SSH session for a device. Args: device (DeviceInfo): Represents a device. - session_path (str): Path to store the SSH session file. + repo_type (str): Repository type. + inv_name (str): Inventory name. + testbed_name (str): Testbed name. """ if not device.is_ssh_supported(): return - ssh_ip, ssh_ipv6, ssh_user, ssh_pass = self.ssh_info_solver.get_ssh_cred(device) - if not ssh_ip and not ssh_ipv6: + ssh_info = self.ssh_info_solver.get_ssh_cred(device) + if not ssh_info.ip and not ssh_info.ipv6: print( f"WARNING: Management IP is not specified for testbed node, skipped: {device.hostname}" ) return - if device.console_device: - console_ssh_ip, _, console_ssh_user, console_ssh_pass = self.ssh_info_solver.get_ssh_cred( - device.console_device) - console_ssh_port = device.console_port - else: - console_ssh_ip, console_ssh_user, console_ssh_pass, console_ssh_port = None, None, None, 0 - - if not ssh_user: - print( - "WARNING: SSH credential is missing for device: {}".format( - device.hostname - ) - ) + if not ssh_info.user: + print(f"WARNING: SSH credential is missing for device: {device.hostname}") + # print(f"Generating SSH session for device: {device.hostname}") self.repo_generator.generate( - session_path, - ssh_ip, - ssh_ipv6, - ssh_user, - ssh_pass, - console_ssh_ip, - console_ssh_port, - console_ssh_user, - console_ssh_pass + repo_type, + inv_name, + testbed_name, + device, + ssh_info, ) @@ -195,7 +199,15 @@ def _generate_ssh_sessions_for_testbed(self, testbed: TestBed): Args: testbed (object): Represents a testbed setup. """ - devices = [testbed.ptf_node] + list(testbed.dut_nodes.values()) + devices = itertools.chain( + testbed.dut_nodes.values(), + [testbed.ptf_node], + testbed.fanout_nodes.values(), + testbed.root_fanout_nodes.values(), + testbed.console_nodes.values(), + testbed.server_nodes.values() + ) + for device in devices: self._generate_ssh_session_for_testbed_node(testbed, device) @@ -213,16 +225,7 @@ def _generate_ssh_session_for_testbed_node( testbed_node_type (str): Type of the testbed node. It can be "ptf" or "dut". testbed_node (object): Represents a connectable node in the testbed. """ - device_type = "dut" if device.device_type != "PTF" else "ptf" - - session_path = os.path.join( - "testbeds", - testbed.inv_name, - testbed.conf_name, - device_type + "-" + device.hostname, - ) - - self.generate_ssh_session_for_device(device, session_path) + self.generate_ssh_session_for_device(device, "testbeds", testbed.inv_name, testbed.conf_name) device_type_pattern = re.compile(r"(? Date: Tue, 12 Nov 2024 18:30:20 -0500 Subject: [PATCH 069/175] [TACACS] Add --no-pager to tacacs utils (#15491) Summary: Add --no-pager to journalctl to display all logs at once, allowing grep to search through the output even when there are many logs. --- tests/tacacs/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tacacs/utils.py b/tests/tacacs/utils.py index b3424cc04b3..3d398102826 100644 --- a/tests/tacacs/utils.py +++ b/tests/tacacs/utils.py @@ -66,7 +66,7 @@ def log_exist(ptfhost, sed_command): def get_auditd_config_reload_timestamp(duthost): - res = duthost.shell("sudo journalctl -u auditd --boot | grep 'audisp-tacplus re-initializing configuration'") + res = duthost.shell("sudo journalctl -u auditd --boot --no-pager | grep 'audisp-tacplus re-initializing configuration'") # noqa E501 logger.info("aaa config file timestamp {}".format(res["stdout_lines"])) if len(res["stdout_lines"]) == 0: From d21e103a6c61625563b0109d8d7f18b1b61c50d7 Mon Sep 17 00:00:00 2001 From: rraghav-cisco <58446052+rraghav-cisco@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:51:34 -0800 Subject: [PATCH 070/175] Fix the parameters for AllPortQueueWaterMark Testcase. (#15482) The testcase test_qos_sai.py::TestQosSai::testQosSaiQWatermarkAllPorts is failing due to much narrower margins in the lossy queue counter case. So this PR attempts to increase the margin, and also handle a case where the required qos.yml key is not present in the TC's params, but in the params for port-speed structure itself. --- tests/qos/files/qos_params.gb.yaml | 6 +++--- tests/qos/test_qos_sai.py | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/qos/files/qos_params.gb.yaml b/tests/qos/files/qos_params.gb.yaml index 0a098ddee99..4d83f6054d2 100644 --- a/tests/qos/files/qos_params.gb.yaml +++ b/tests/qos/files/qos_params.gb.yaml @@ -414,7 +414,7 @@ qos_params: wm_q_wm_all_ports: ecn: 1 pkt_count: 3000 - pkts_num_margin: 1024 + pkts_num_margin: 3072 cell_size: 384 xon_1: dscp: 3 @@ -538,7 +538,7 @@ qos_params: wm_q_wm_all_ports: ecn: 1 pkt_count: 3000 - pkts_num_margin: 1024 + pkts_num_margin: 3072 cell_size: 384 packet_size: 1350 xon_1: @@ -729,7 +729,7 @@ qos_params: wm_q_wm_all_ports: ecn: 1 pkt_count: 855 - pkts_num_margin: 1024 + pkts_num_margin: 3072 cell_size: 6144 packet_size: 6144 400000_120000m: diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 270799e8f02..03ee7986cec 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -2178,7 +2178,9 @@ def testQosSaiQWatermarkAllPorts( "src_port_id": src_port_id, "src_port_ip": src_port_ip, "src_port_vlan": dutConfig["testPorts"]["src_port_vlan"], - "pkts_num_leak_out": qosConfig[queueProfile]["pkts_num_leak_out"], + "pkts_num_leak_out": qosConfig[queueProfile]["pkts_num_leak_out"] + if ("pkts_num_leak_out" in qosConfig[queueProfile]) else + qosConfig["pkts_num_leak_out"], "pkt_count": qosConfig[queueProfile]["pkt_count"], "cell_size": qosConfig[queueProfile]["cell_size"], "hwsku": dutTestParams['hwsku'], From 633fe601dfd1f6999acc0c863997ad4a684a873c Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:54:24 +0800 Subject: [PATCH 071/175] Move some stable test to PR test set and add test to skip PR test set (#15500) What is the motivation for this PR? Some dataplane test scripts have been added to PR test and put into optional onboarding job to test case performance, I calculated the success rate of test scripts in recent 3 days, I think I can move high success rate test scripts to t0/t1 job now How did you do it? Move test from onboarding job to t0/t1 job Add pfc_asym test to skip PR test set since it's only supported on barefoot platform Add snappi test to skip PR test set Add some ecmp platform specific test to skip PR test set How did you verify/test it? Co-authored-by: xwjiang2021 <96218837+xwjiang2021@users.noreply.github.com> --- .azure-pipelines/pr_test_scripts.yaml | 14 ++++++-------- .azure-pipelines/pr_test_skip_scripts.yaml | 7 +++++++ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index 290ce1d9a7d..cd44402cf13 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -220,6 +220,10 @@ t0: - telemetry/test_telemetry.py - platform_tests/test_cont_warm_reboot.py - snmp/test_snmp_link_local.py + - arp/test_arp_update.py + - decap/test_subnet_decap.py + - fdb/test_fdb_mac_learning.py + - ip/test_mgmt_ipv6_only.py t0-2vlans: - dhcp_relay/test_dhcp_relay.py @@ -423,6 +427,8 @@ t1-lag: - generic_config_updater/test_cacl.py - telemetry/test_telemetry.py - snmp/test_snmp_link_local.py + - mpls/test_mpls.py + - vxlan/test_vxlan_route_advertisement.py multi-asic-t1-lag: - bgp/test_bgp_bbr.py @@ -464,17 +470,9 @@ onboarding_t0: - lldp/test_lldp_syncd.py # Flaky, we will triage and fix it later, move to onboarding to unblock pr check - dhcp_relay/test_dhcp_relay_stress.py - - arp/test_arp_update.py - - decap/test_subnet_decap.py - - fdb/test_fdb_mac_learning.py - - ip/test_mgmt_ipv6_only.py - onboarding_t1: - lldp/test_lldp_syncd.py - - mpls/test_mpls.py - - vxlan/test_vxlan_route_advertisement.py - specific_param: t0-sonic: diff --git a/.azure-pipelines/pr_test_skip_scripts.yaml b/.azure-pipelines/pr_test_skip_scripts.yaml index f233f470736..4b5dd6b943c 100644 --- a/.azure-pipelines/pr_test_skip_scripts.yaml +++ b/.azure-pipelines/pr_test_skip_scripts.yaml @@ -7,6 +7,10 @@ t0: - dualtor_io/test_normal_op.py # This script would toggle PDU, which is not supported on KVM - dualtor_io/test_tor_failure.py + # This script only supported on Broadcom + - ecmp/test_ecmp_sai_value.py + # This script only supported on Mellanox + - ecmp/test_fgnhg.py # This script only supported on Mellanox - generic_config_updater/test_pfcwd_interval.py # There is no k8s in inventory file @@ -22,6 +26,8 @@ t0: - ospf/test_ospf.py - ospf/test_ospf_bfd.py # Test is not supported on vs testbed + - pfc_asym/test_pfc_asym.py + # Test is not supported on vs testbed - platform_tests/test_intf_fec.py # Platform api needs the module `sonic_platform`, which is not included in vs # So skip these scripts @@ -251,6 +257,7 @@ tgen: - snappi_tests/pfc/test_pfc_pause_unset_bit_enable_vector.py - snappi_tests/pfc/test_pfc_pause_zero_mac.py - snappi_tests/pfc/test_valid_pfc_frame_with_snappi.py + - snappi_tests/pfc/test_valid_src_mac_pfc_frame.py - snappi_tests/pfcwd/test_pfcwd_a2a_with_snappi.py - snappi_tests/pfcwd/test_pfcwd_basic_with_snappi.py - snappi_tests/pfcwd/test_pfcwd_burst_storm_with_snappi.py From 265843cafcbc95aefd784a7a1077f9c18bac4166 Mon Sep 17 00:00:00 2001 From: longhuan-cisco <84595962+longhuan-cisco@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:06:57 -0800 Subject: [PATCH 072/175] Add extra logic in test_sfputil to make sure modules are restored (#13108) * Add extra logic in test_sfputil to make sure modules are restored * Fix flake8 E125 * Use config dom enable/disable * flake8 fix * Use context manager and remove unnecessary sleep * Handle breakout case propperly and increase wait_time * Add skip for lpmode handling and fix for breakout * Restore dom_polling to origional * Add comments * Add lpmode on/off seq, remove lpmode restore logic and avoid starting admin-down ports * Fix typo for lpmode on/off --- tests/platform_tests/sfp/test_sfputil.py | 365 ++++++++++++++++++++--- 1 file changed, 321 insertions(+), 44 deletions(-) diff --git a/tests/platform_tests/sfp/test_sfputil.py b/tests/platform_tests/sfp/test_sfputil.py index 472394df318..f088a360b8f 100644 --- a/tests/platform_tests/sfp/test_sfputil.py +++ b/tests/platform_tests/sfp/test_sfputil.py @@ -7,7 +7,7 @@ import logging import time import copy - +from natsort import natsorted import pytest from .util import parse_eeprom @@ -15,12 +15,27 @@ from .util import get_dev_conn from tests.common.utilities import skip_release, wait_until from tests.common.fixtures.duthost_utils import shutdown_ebgp # noqa F401 +from tests.common.port_toggle import default_port_toggle_wait_time +from tests.common.platform.interface_utils import get_physical_port_indices cmd_sfp_presence = "sudo sfputil show presence" cmd_sfp_eeprom = "sudo sfputil show eeprom" cmd_sfp_reset = "sudo sfputil reset" cmd_sfp_show_lpmode = "sudo sfputil show lpmode" cmd_sfp_set_lpmode = "sudo sfputil lpmode" +cmd_config_intf_dom = "config interface {} transceiver dom {} {}" +cmd_config_intf_action = "config interface {} {} {}" +cmd_intf_startup = "startup" +cmd_intf_shutdown = "shutdown" +cmd_dom_disable = "disable" +cmd_dom_enable = "enable" +db_cmd_dom_polling = "sonic-db-cli {} CONFIG_DB {} 'PORT|{}' 'dom_polling' {}" +DOM_DISABLED = "disabled" +DOM_ENABLED = "enabled" +DOM_POLLING_CONFIG_VALUES = [DOM_DISABLED, DOM_ENABLED] + +I2C_WAIT_TIME_AFTER_SFP_RESET = 5 # in seconds +WAIT_TIME_AFTER_LPMODE_SET = 3 # in seconds logger = logging.getLogger(__name__) @@ -30,6 +45,167 @@ ] +class LogicalInterfaceDisabler: + """ + Disable the given logical interface and restore afterwards. + """ + def __init__(self, duthost, enum_frontend_asic_index, logical_intf, phy_intf, + is_admin_up, skip_dom_polling_handle=False): + self.duthost = duthost + self.logical_intf = logical_intf + self.phy_intf = phy_intf + self.skip_dom_polling_handle = skip_dom_polling_handle + self.wait_after_dom_config = 5 + + self.namespace_cmd_opt = get_namespace_cmd_option(duthost, + enum_frontend_asic_index) + self.cmd_down = cmd_config_intf_action.format(self.namespace_cmd_opt, + cmd_intf_shutdown, logical_intf) + self.cmd_up = cmd_config_intf_action.format(self.namespace_cmd_opt, + cmd_intf_startup, logical_intf) + self.cmd_disable_dom = cmd_config_intf_dom.format(self.namespace_cmd_opt, + logical_intf, cmd_dom_disable) + self.cmd_enable_dom = cmd_config_intf_dom.format(self.namespace_cmd_opt, + logical_intf, cmd_dom_enable) + self.cmd_sfp_presence = "{} -p {}".format(cmd_sfp_presence, logical_intf) + self.db_cmd_dom_polling_clear = db_cmd_dom_polling.format(self.namespace_cmd_opt, + "HDEL", + logical_intf, + "") + self.db_cmd_dom_polling_get = db_cmd_dom_polling.format(self.namespace_cmd_opt, + "HGET", + logical_intf, + "") + self.orig_dom_polling_value = None + self.is_admin_up = is_admin_up + + def disable(self): + """ + Disable a logical interface by doing below: + * Disable DOM polling + * Shutdown port + """ + if not self.skip_dom_polling_handle: + orig_dom_get_result = self.duthost.command(self.db_cmd_dom_polling_get) + if orig_dom_get_result["stdout"] in DOM_POLLING_CONFIG_VALUES: + self.orig_dom_polling_value = orig_dom_get_result["stdout"] + logging.info("Disable DOM polling to avoid race condition during sfp reset" + " for {}".format(self.logical_intf)) + disable_dom_result = self.duthost.command(self.cmd_disable_dom) + assert disable_dom_result["rc"] == 0, \ + "Disable DOM polling failed for {}".format(self.logical_intf) + time.sleep(self.wait_after_dom_config) + + if not self.is_admin_up: + logging.info("Skip shutdown {} as it's already admin down pre-test".format(self.logical_intf)) + return + # It's needed to shutdown ports before reset and startup ports after reset, + # to get config/state machine/etc replayed, so that the modules can be fully + # restored. + logging.info("Shutdown {} before sfp reset".format(self.logical_intf)) + shutdown_result = self.duthost.command(self.cmd_down) + assert shutdown_result["rc"] == 0, "Shutdown {} failed".format(self.logical_intf) + assert check_interface_status(self.duthost, [self.logical_intf], expect_up=False) + + def restore(self): + """ + Restore a logical interface from disabled state by doing below: + * Startup port + * Enable DOM polling + """ + if self.is_admin_up: + logging.info("Startup {} after sfp reset to restore module".format(self.logical_intf)) + startup_result = self.duthost.command(self.cmd_up) + assert startup_result["rc"] == 0, "Startup {} failed".format(self.logical_intf) + assert check_interface_status(self.duthost, [self.logical_intf], expect_up=True) + else: + logging.info("Skip startup {} after sfp reset as it's admin down pre-test".format(self.logical_intf)) + + if not self.skip_dom_polling_handle: + logging.info("Restore DOM polling to {} after sfp reset for {}".format(self.orig_dom_polling_value, + self.logical_intf)) + if not self.orig_dom_polling_value: + restore_dom_result = self.duthost.command(self.db_cmd_dom_polling_clear) + else: + restore_dom_result = self.duthost.command(db_cmd_dom_polling.format(self.namespace_cmd_opt, + "HSET", + self.logical_intf, + self.orig_dom_polling_value)) + assert restore_dom_result["rc"] == 0, "Restore DOM polling failed for {}".format(self.logical_intf) + + +class DisablePhysicalInterface: + """ + Context manager to disable the given physical interface (as wells as its + logical interfaces if needed) and restore afterwards. + + Disable/enable port includes: + * Disable/enable DOM polling + * Shutdown/startup port + """ + def __init__(self, duthost, enum_frontend_asic_index, phy_intf, logical_intfs_dict): + self.duthost = duthost + self.phy_intf = phy_intf + self.original_lpmode_state = None + self.wait_after_dom_config = 1 + self.logical_intf_disablers = \ + [LogicalInterfaceDisabler(duthost, + enum_frontend_asic_index, + logical_intf, + phy_intf, + is_admin_up, + skip_dom_polling_handle=(i != 0)) + for i, (logical_intf, is_admin_up) in enumerate(logical_intfs_dict.items())] + + def __enter__(self): + """ + Disable a physical port by doing below: + * Disable DOM polling + * Shutdown port + """ + for logical_intf_disabler in self.logical_intf_disablers: + logical_intf_disabler.disable() + + def __exit__(self, exc_type, exc_val, exc_tb): + """ + Restore a physical port from disabled state by doing below: + * Startup port + * Enable DOM polling + """ + for logical_intf_disabler in self.logical_intf_disablers: + logical_intf_disabler.restore() + + +def get_transceiver_info(duthost, enum_frontend_asic_index, logical_intf): + namespace_cmd_opt = get_namespace_cmd_option(duthost, enum_frontend_asic_index) + cmd = "sonic-db-cli {} STATE_DB HGETALL 'TRANSCEIVER_INFO|{}'".format(namespace_cmd_opt, logical_intf) + xcvr_info_output = duthost.command(cmd)["stdout"] + return xcvr_info_output + + +def is_cmis_module(duthost, enum_frontend_asic_index, logical_intf): + return "cmis_rev" in get_transceiver_info(duthost, enum_frontend_asic_index, logical_intf) + + +def is_power_class_1_module(duthost, enum_frontend_asic_index, logical_intf): + return "Power Class 1" in get_transceiver_info(duthost, enum_frontend_asic_index, logical_intf) + + +def set_lpmode(duthost, logical_intf, lpmode): + """ + Set the low power mode of the given interface. + + Args: + duthost: DUT host object + logical_intf: Logical interface to set lpmode + lpmode: Low power mode to set, 'on' or 'off' + """ + cmd = "{} {} {}".format(cmd_sfp_set_lpmode, lpmode, logical_intf) + lpmode_set_result = duthost.command(cmd) + assert lpmode_set_result["rc"] == 0, "'{}' failed".format(cmd) + time.sleep(WAIT_TIME_AFTER_LPMODE_SET) + + def check_interfaces_up(duthost, namespace, up_ports): logging.info("Checking interface status") intf_facts = duthost.interface_facts(namespace=namespace, up_ports=up_ports)["ansible_facts"] @@ -40,6 +216,100 @@ def check_interfaces_up(duthost, namespace, up_ports): return False +def get_namespace_cmd_option(duthost, asic_index): + """Get the namespace option used in the command""" + namespace = duthost.get_namespace_from_asic_id(asic_index) + return "-n {}".format(namespace) if namespace else "" + + +def get_down_ports(duthost, ports): + """Check and return the down ports among the given ports.""" + return duthost.show_interface(command="status", up_ports=ports)["ansible_facts"][ + "ansible_interface_link_down_ports"] + + +def is_interface_status_expected(duthost, ports, expect_up=True): + """Check if the given ports are up or down as expected.""" + if expect_up: + return len(get_down_ports(duthost, ports)) == 0 + else: + return len(get_down_ports(duthost, ports)) == len(ports) + + +def check_interface_status(duthost, ports, expect_up=True, wait_time=None): + """ + Check if the given ports are up or down as expected. + + Args: + duthost: DUT host object + ports: List of ports to check + expect_up: True if the ports are expected to be up, False if down + wait_time: Time to wait for the ports to come up or down + """ + expect_status_str = "up" if expect_up else "down" + err_msg = "" + + if wait_time is None: + port_down_wait_time, port_up_wait_time = \ + default_port_toggle_wait_time(duthost, len(ports)) + if expect_up: + wait_time = port_up_wait_time + else: + wait_time = port_down_wait_time + + logging.info("Wait for ports to come {}: {}".format(expect_status_str, ports)) + is_ok = wait_until(wait_time, 1, 0, + is_interface_status_expected, + duthost, ports, expect_up) + + if not is_ok: + down_ports = get_down_ports(duthost, ports) + if expect_up: + problematic_ports = down_ports + else: + problematic_ports = set(ports) - down_ports + + err_msg = "Some ports did not come {} as expected: {}".format( + expect_status_str, str(problematic_ports)) + return is_ok, err_msg + + +def get_phy_intfs_to_test_per_asic(duthost, + conn_graph_facts, + enum_frontend_asic_index, + xcvr_skip_list): + """ + Get the interfaces to test for given asic, excluding the skipped ones. + + return: + dict of all physical interfaces to test (key: physical port number, + value: dict of logical interfaces under this physical port whose value + is True if the interface is admin-up) + """ + _, dev_conn = get_dev_conn(duthost, + conn_graph_facts, + enum_frontend_asic_index) + physical_port_idx_map = get_physical_port_indices(duthost, logical_intfs=dev_conn) + phy_intfs_to_test_per_asic = {} + + pre_test_intf_status_dict = duthost.show_interface(command="status")["ansible_facts"]["int_status"] + for logical_intf in dev_conn: + # Skip the interfaces in the skip list + if logical_intf in xcvr_skip_list[ans_host.hostname]: + continue + physical_port_idx = physical_port_idx_map[logical_intf] + phy_intfs_to_test_per_asic.setdefault(physical_port_idx, {})[logical_intf] = \ + pre_test_intf_status_dict.get(logical_intf, {}).get("admin_state", "down") == "up" + # sort physical interfaces + for phy_intf, logical_intfs_dict in sorted(phy_intfs_to_test_per_asic.items()): + # sort logical interfaces within the same physical interface + phy_intfs_to_test_per_asic[phy_intf] = {lintf: logical_intfs_dict[lintf] + for lintf in natsorted(logical_intfs_dict)} + logging.info("Interfaces to test for asic {}: {}".format(enum_frontend_asic_index, + phy_intfs_to_test_per_asic)) + return phy_intfs_to_test_per_asic + + def test_check_sfputil_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list): """ @@ -122,54 +392,61 @@ def test_check_sfputil_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostnam def test_check_sfputil_reset(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, - tbinfo, xcvr_skip_list, shutdown_ebgp, stop_xcvrd): # noqa F811 + tbinfo, xcvr_skip_list, shutdown_ebgp): # noqa F811 """ - @summary: Check SFP presence using 'sfputil show presence' + @summary: Check SFP reset using 'sfputil reset' """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] global ans_host ans_host = duthost - portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index) - tested_physical_ports = set() - for intf in dev_conn: - if intf not in xcvr_skip_list[duthost.hostname]: - phy_intf = portmap[intf][0] - if phy_intf in tested_physical_ports: - logging.info( - "skip tested SFPs {} to avoid repeating operating physical interface {}".format(intf, phy_intf)) - continue - tested_physical_ports.add(phy_intf) - logging.info("resetting {} physical interface {}".format(intf, phy_intf)) - reset_result = duthost.command("{} {}".format(cmd_sfp_reset, intf)) - assert reset_result["rc"] == 0, "'{} {}' failed".format(cmd_sfp_reset, intf) - time.sleep(5) - sleep_time = 60 - if duthost.shell("show interfaces transceiver eeprom | grep 400ZR", module_ignore_errors=True)['rc'] == 0: - sleep_time = 90 - - logging.info("Wait some time for SFP to fully recover after reset") - time.sleep(sleep_time) - - logging.info("Check sfp presence again after reset") - sfp_presence = duthost.command(cmd_sfp_presence, module_ignore_errors=True) - - # For vs testbed, we will get expected Error code `ERROR_CHASSIS_LOAD = 2` here. - if duthost.facts["asic_type"] == "vs" and sfp_presence['rc'] == 2: - pass - else: - assert sfp_presence['rc'] == 0, "Run command '{}' failed".format(cmd_sfp_presence) - - parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) - for intf in dev_conn: - if intf not in xcvr_skip_list[duthost.hostname]: - assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence) - assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'" - - logging.info("Check interface status") - mg_facts = duthost.get_extended_minigraph_facts(tbinfo) - intf_facts = duthost.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"] - assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \ - "Some interfaces are down: {}".format(intf_facts["ansible_interface_link_down_ports"]) + phy_intfs_to_test_per_asic = get_phy_intfs_to_test_per_asic(duthost, + conn_graph_facts, + enum_frontend_asic_index, + xcvr_skip_list) + for phy_intf, logical_intfs_dict in phy_intfs_to_test_per_asic.items(): + # Only reset the first logical interface, since sfputil command acts on this physical port entirely. + logical_intf = list(logical_intfs_dict.keys())[0] + with DisablePhysicalInterface(duthost, enum_frontend_asic_index, phy_intf, logical_intfs_dict): + cmd_sfp_presence_per_intf = cmd_sfp_presence + " -p {}".format(logical_intf) + + cmd_sfp_reset_intf = "{} {}".format(cmd_sfp_reset, logical_intf) + logging.info("resetting {} physical interface {}".format(logical_intf, phy_intf)) + reset_result = duthost.command(cmd_sfp_reset_intf) + assert reset_result["rc"] == 0, "'{}' failed".format(cmd_sfp_reset_intf) + time.sleep(I2C_WAIT_TIME_AFTER_SFP_RESET) + + if not is_cmis_module(duthost, enum_frontend_asic_index, logical_intf) and \ + not is_power_class_1_module(duthost, enum_frontend_asic_index, logical_intf): + # On platforms where LowPwrRequestHW=DEASSERTED, module will not get reset to low power. + logging.info("Force {} (physical interface {}) to go through the sequence of lpmode on/off".format( + logical_intf, phy_intf)) + set_lpmode(duthost, logical_intf, "on") + time.sleep(WAIT_TIME_AFTER_LPMODE_SET) + set_lpmode(duthost, logical_intf, "off") + time.sleep(WAIT_TIME_AFTER_LPMODE_SET) + + logging.info("Check sfp presence again after reset") + sfp_presence = duthost.command(cmd_sfp_presence_per_intf, module_ignore_errors=True) + + # For vs testbed, we will get expected Error code `ERROR_CHASSIS_LOAD = 2` here. + if duthost.facts["asic_type"] == "vs" and sfp_presence['rc'] == 2: + pass + else: + assert sfp_presence['rc'] == 0, \ + "Run command '{}' failed".format(cmd_sfp_presence_per_intf) + + parsed_presence = parse_output(sfp_presence["stdout_lines"][2:]) + assert logical_intf in parsed_presence, \ + "Interface is not in output of '{}'".format(cmd_sfp_presence_per_intf) + assert parsed_presence[logical_intf] == "Present", \ + "Interface presence is not 'Present' for {}".format(logical_intf) + + # Check interface status for all interfaces in the end just in case + assert check_interface_status(duthost, + [logical_intf + for logical_intfs_dict in phy_intfs_to_test_per_asic.values() + for logical_intf, is_admin_up in logical_intfs_dict.items() if is_admin_up], + expect_up=True) def test_check_sfputil_low_power_mode(duthosts, enum_rand_one_per_hwsku_frontend_hostname, From b27f64f43ffe39819903bc555fa91bc3e098681d Mon Sep 17 00:00:00 2001 From: Dayou Liu <113053330+dayouliu1@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:32:41 -0800 Subject: [PATCH 073/175] fix sample_golden_config_db.j2 portchannel undefined (#15311) Add portchannel check to sample_golden_config_db.j2 to resolve errors for topos (specifically mx) that do not have portchannels for golden_config_infra/test_config_reload_with_rendered_golden_config.py when running `sonic-cfggen -d -t /tmp/golden_config_db.j2 > /etc/sonic/golden_config_db.json` --- .../templates/sample_golden_config_db.j2 | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/golden_config_infra/templates/sample_golden_config_db.j2 b/tests/golden_config_infra/templates/sample_golden_config_db.j2 index 07a119dd2e5..634711e4225 100644 --- a/tests/golden_config_infra/templates/sample_golden_config_db.j2 +++ b/tests/golden_config_infra/templates/sample_golden_config_db.j2 @@ -1,7 +1,9 @@ {% set portchannels= [] %} -{% for pc, value in PORTCHANNEL.items() %} - {% set _ = portchannels.append(pc) %} -{% endfor %} +{% if PORTCHANNEL is defined %} + {% for pc, value in PORTCHANNEL.items() %} + {% set _ = portchannels.append(pc) %} + {% endfor %} +{% endif %} { "NEW_FEATURE": { From 8f3e42ab1a56eb49f92636c5a843ce34228f42f5 Mon Sep 17 00:00:00 2001 From: Chris <156943338+ccroy-arista@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:34:32 -0800 Subject: [PATCH 074/175] Fix asic identification (#15297) * sonic-mgmt: improve asic identification Device ASIC identification is achieved by whole line matches from the output of lspci, which is excessive and subject to fail due to unforeseeable changes in such output. This change reduces the string matching to specific unique differentiators in the output from lspci, while also future-proofing against similar changes in the lspci that could foreseeably occur. * sonic-mgmt: add th4/th5 asic identification Add token matches for identifying the TH4 and TH5 ASICs from the output of lspci. * sonic-mgmt: fix pre-commit issue Fix pre-commit error introduced within the prior two commits. --- tests/common/devices/sonic.py | 37 +++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index b19ee0fb873..c4947d736c8 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -32,6 +32,7 @@ "orchagent": "swss", "syncd": "syncd" } +UNKNOWN_ASIC = "unknown" class SonicHost(AnsibleHostBase): @@ -1759,28 +1760,34 @@ def run_redis_cli_cmd(self, redis_cmd): cmd = "/usr/bin/redis-cli {}".format(redis_cmd) return self.command(cmd, verbose=False) + def _try_get_brcm_asic_name(self, output): + search_sets = { + "td2": {"b85", "BCM5685"}, + "td3": {"b87", "BCM5687"}, + "th": {"b96", "BCM5696"}, + "th2": {"b97", "BCM5697"}, + "th3": {"b98", "BCM5698"}, + "th4": {"b99", "BCM5699"}, + "th5": {"f90", "BCM7890"}, + } + for asic in search_sets.keys(): + for search_term in search_sets[asic]: + if search_term in output: + return asic + return UNKNOWN_ASIC + def get_asic_name(self): - asic = "unknown" + asic = UNKNOWN_ASIC output = self.shell("lspci", module_ignore_errors=True)["stdout"] - if ("Broadcom Limited Device b960" in output or - "Broadcom Limited Broadcom BCM56960" in output): - asic = "th" - elif "Device b971" in output: - asic = "th2" - elif ("Broadcom Limited Device b850" in output or - "Broadcom Limited Broadcom BCM56850" in output or - "Broadcom Inc. and subsidiaries Broadcom BCM56850" in output): - asic = "td2" - elif ("Broadcom Limited Device b870" in output or - "Broadcom Inc. and subsidiaries Device b870" in output): - asic = "td3" - elif "Broadcom Limited Device b980" in output: - asic = "th3" + if "Broadcom" in output: + asic = self._try_get_brcm_asic_name(output) elif "Cisco Systems Inc Device a001" in output: asic = "gb" elif "Mellanox Technologies" in output: asic = "spc" + logger.info("asic: {}".format(asic)) + return asic def is_nvidia_platform(self): From 9ff03875c6801b6996e1bb04a226ab22a5c54a1b Mon Sep 17 00:00:00 2001 From: Zhijian Li Date: Tue, 12 Nov 2024 21:19:15 -0800 Subject: [PATCH 075/175] [M0-2VLAN] Update test_vlan_ping for m0-2vlan topo (#15503) What is the motivation for this PR? Update test_vlan_ping for m0-2vlan topo. How did you do it? Use topo_type instead of topo_name. How did you verify/test it? Verified on Nokia-7215 M0-2VLAN testbed. --- tests/vlan/test_vlan_ping.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/vlan/test_vlan_ping.py b/tests/vlan/test_vlan_ping.py index fd19021c88f..3b02d493852 100644 --- a/tests/vlan/test_vlan_ping.py +++ b/tests/vlan/test_vlan_ping.py @@ -60,15 +60,15 @@ def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo, vm_host_info = {} vm_name, vm_info = None, None - topo_name = tbinfo["topo"]["name"] + topo_type = tbinfo["topo"]["type"] for nbr_name, nbr_info in list(nbrhosts.items()): - if topo_name != "m0" or (topo_name == "m0" and "M1" in nbr_name): + if topo_type != "m0" or (topo_type == "m0" and "M1" in nbr_name): vm_name = nbr_name vm_info = nbr_info break py_assert(vm_name is not None, "Can't get neighbor vm") - if topo_name == "mx": + if topo_type == "mx": vm_ip_with_prefix = six.ensure_text(vm_info['conf']['interfaces']['Ethernet1']['ipv4']) output = vm_info['host'].command("ip addr show dev eth1") else: @@ -104,7 +104,7 @@ def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo, # Get the bgp neighbor connected to the selected VM if a_bgp_nbr['name'] == vm_name and a_bgp_nbr['addr'] == str(vm_host_info['ipv4']): # Find the interface that connects to the selected VM - if topo_name == "mx": + if topo_type == "mx": for intf in mg_facts['minigraph_interfaces']: if intf['peer_addr'] == str(vm_host_info['ipv4']): vm_host_info['port_index_list'] = [mg_facts['minigraph_ptf_indices'][intf['attachto']]] From 48b6c08b7545ca5fd0ce89c19bc5be0b1eea6ea8 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Wed, 13 Nov 2024 13:31:36 +0800 Subject: [PATCH 076/175] Skip traffic test in route perf test for multi-asic (#15515) What is the motivation for this PR? Currently, route/test_route_perf.py does not support traffic tests on multi-asic KVM testbeds and has a high failure rate How did you do it? Skipping traffic test in route perf test for multi-asic KVM platform How did you verify/test it? --- .../tests_mark_conditions_skip_traffic_test.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml index c9e35f27883..18371c03db6 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml @@ -279,6 +279,16 @@ ipfwd/test_dir_bcast.py: conditions: - "asic_type in ['vs']" +####################################### +##### route ##### +####################################### +route/test_route_perf.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + - "is_multi_asic==True" + ####################################### ##### span ##### ####################################### From b24321ea3066843067565cfd36dedcae38076310 Mon Sep 17 00:00:00 2001 From: Kevin Wang <65380078+kevinskwang@users.noreply.github.com> Date: Wed, 13 Nov 2024 14:53:33 +0800 Subject: [PATCH 077/175] Increase the sleep time after change the interface status (#15517) Signed-off-by: Kevin Wang --- tests/fdb/test_fdb_mac_learning.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fdb/test_fdb_mac_learning.py b/tests/fdb/test_fdb_mac_learning.py index e8f192243b4..c11590f5ced 100644 --- a/tests/fdb/test_fdb_mac_learning.py +++ b/tests/fdb/test_fdb_mac_learning.py @@ -200,7 +200,7 @@ def testFdbMacLearning(self, ptfadapter, duthosts, rand_one_dut_hostname, ptfhos # unshut 1 port and populate fdb for that port. make sure fdb entry is populated in mac table duthost = duthosts[rand_one_dut_hostname] duthost.shell("sudo config interface startup {}".format(target_ports_to_ptf_mapping[0][0])) - time.sleep(10) + time.sleep(30) self.dynamic_fdb_oper(duthost, tbinfo, ptfhost, [target_ports_to_ptf_mapping[0]]) pytest_assert(wait_until(300, 2, 1, fdb_table_has_dummy_mac_for_interface, duthost, target_ports_to_ptf_mapping[0][0], self.DUMMY_MAC_PREFIX), "After starting {}" @@ -210,7 +210,7 @@ def testFdbMacLearning(self, ptfadapter, duthosts, rand_one_dut_hostname, ptfhos # unshut 3 more ports and populate fdb for those ports duthost.shell("sudo config interface startup {}-{}".format(target_ports_to_ptf_mapping[1][0], target_ports_to_ptf_mapping[3][0][8:])) - time.sleep(10) + time.sleep(30) self.dynamic_fdb_oper(duthost, tbinfo, ptfhost, target_ports_to_ptf_mapping[1:]) for i in range(1, len(target_ports_to_ptf_mapping)): pytest_assert(wait_until(300, 2, 1, fdb_table_has_dummy_mac_for_interface, duthost, @@ -221,7 +221,7 @@ def testFdbMacLearning(self, ptfadapter, duthosts, rand_one_dut_hostname, ptfhos # shutdown last 3 ports and make sure corresponding entries are gone from MAC address table for i in range(1, len(target_ports_to_ptf_mapping)): duthost.shell("sudo config interface shutdown {}".format(target_ports_to_ptf_mapping[i][0])) - time.sleep(10) + time.sleep(30) for i in range(1, len(target_ports_to_ptf_mapping)): pytest_assert(not (fdb_table_has_dummy_mac_for_interface(duthost, target_ports_to_ptf_mapping[i][0])), "mac entry present when interface {} is down" From f3d2014a06e4114dc74182fefc01bba18413a6c6 Mon Sep 17 00:00:00 2001 From: veronica-arista <117375955+veronica-arista@users.noreply.github.com> Date: Wed, 13 Nov 2024 00:30:39 -0800 Subject: [PATCH 078/175] Fix intermittent issue on reboot in test_lldp_syncd (#15331) At the start of test_lldp_syncd.py::test_lldp_entry_table_after_reboot the test polls and checks that LLDP_ENTRY_TABLE keys match show lldp table output. The eth0 port is added last so sometimes the entry keys will have it but the lldp table output will not. From debugging, this is the case since the end of the previous test (I put a check on the keys vs lldp table output and observed the missing eth0 at the end of test_lldp_syncd.py::test_lldp_entry_table_after_lldp_restart) Added a wait_until at the start of test_lldp_entry_table_after_reboot to wait until the LLDP_ENTRY_TABLE keys match show lldp table output before the tests starts to reboot. --- tests/lldp/test_lldp_syncd.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/lldp/test_lldp_syncd.py b/tests/lldp/test_lldp_syncd.py index 361188f0dc1..75d1e03b090 100644 --- a/tests/lldp/test_lldp_syncd.py +++ b/tests/lldp/test_lldp_syncd.py @@ -90,6 +90,13 @@ def get_show_lldp_table_output(duthost): return interface_list +def check_lldp_table_keys(duthost, db_instance): + # Check if LLDP_ENTRY_TABLE keys match show lldp table output + lldp_entry_keys = get_lldp_entry_keys(db_instance) + show_lldp_table_int_list = get_show_lldp_table_output(duthost) + return sorted(lldp_entry_keys) == sorted(show_lldp_table_int_list) + + def assert_lldp_interfaces( lldp_entry_keys, show_lldp_table_int_list, lldpctl_interface ): @@ -322,6 +329,12 @@ def test_lldp_entry_table_after_reboot( localhost, duthosts, enum_rand_one_per_hwsku_frontend_hostname, db_instance ): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + + # Verify LLDP_ENTRY_TABLE keys match show lldp table output at the start of test + keys_match = wait_until(30, 5, 0, check_lldp_table_keys, duthost, db_instance) + if not keys_match: + assert keys_match, "LLDP_ENTRY_TABLE keys do not match 'show lldp table' output" + # reboot logging.info("Run cold reboot on DUT") reboot( From e3e1c66a37d61bf90e0ddba0e5415cade6ca7bf1 Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Wed, 13 Nov 2024 17:29:16 +0800 Subject: [PATCH 079/175] Add dualtor stanalone and swithover faulty ycable test to PR test (#15519) What is the motivation for this PR? Elastictest performs well in distribute running PR test in multiple KVMs, which support us to add more test scripts to PR checker. But some traffic test using ptfadapter can't be tested on KVM platform, we need to skip traffic test if needed How did you do it? Add dualtor stanalone and swithover faulty ycable test to PR test How did you verify/test it? --- .azure-pipelines/pr_test_scripts.yaml | 2 ++ tests/common/dualtor/tunnel_traffic_utils.py | 11 +++++------ tests/dualtor/test_switchover_faulty_ycable.py | 12 ++++++++++++ 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index cd44402cf13..b48fd5685b5 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -245,7 +245,9 @@ dualtor: - arp/test_arp_dualtor.py - arp/test_arp_extended.py - dualtor/test_ipinip.py + - dualtor/test_standalone_tunnel_route.py - dualtor/test_switchover_failure.py + - dualtor/test_switchover_faulty_ycable.py - dualtor/test_tor_ecn.py - dualtor/test_tunnel_memory_leak.py - dualtor_io/test_heartbeat_failure.py diff --git a/tests/common/dualtor/tunnel_traffic_utils.py b/tests/common/dualtor/tunnel_traffic_utils.py index 059ca8ad703..402061c7dc3 100644 --- a/tests/common/dualtor/tunnel_traffic_utils.py +++ b/tests/common/dualtor/tunnel_traffic_utils.py @@ -262,6 +262,7 @@ def __init__(self, standby_tor, active_tor=None, existing=True, inner_packet=Non self.listen_ports = sorted(self._get_t1_ptf_port_indexes(standby_tor, tbinfo)) self.ptfadapter = ptfadapter self.packet_count = packet_count + self.asic_type = standby_tor.facts["asic_type"] standby_tor_cfg_facts = self.standby_tor.config_facts( host=self.standby_tor.hostname, source="running" @@ -292,17 +293,15 @@ def __enter__(self): def __exit__(self, *exc_info): if exc_info[0]: return + if self.asic_type == "vs": + logging.info("Skipping traffic check on VS platform.") + return try: - result = testutils.verify_packet_any_port( + port_index, rec_pkt = testutils.verify_packet_any_port( ptfadapter, self.exp_pkt, ports=self.listen_ports ) - if isinstance(result, tuple): - port_index, rec_pkt = result - elif isinstance(result, bool): - logging.info("Using dummy testutils to skip traffic test.") - return except AssertionError as detail: logging.debug("Error occurred in polling for tunnel traffic", exc_info=True) if "Did not receive expected packet on any of ports" in str(detail): diff --git a/tests/dualtor/test_switchover_faulty_ycable.py b/tests/dualtor/test_switchover_faulty_ycable.py index c5a47cf43ff..4e68aa14b66 100644 --- a/tests/dualtor/test_switchover_faulty_ycable.py +++ b/tests/dualtor/test_switchover_faulty_ycable.py @@ -21,6 +21,18 @@ ] +@pytest.fixture(autouse=True) +def ignore_expected_loganalyzer_exceptions(duthosts, rand_one_dut_hostname, loganalyzer): + # Ignore in KVM test + KVMIgnoreRegex = [ + ".*Could not establish the active side for Y cable port.*", + ] + duthost = duthosts[rand_one_dut_hostname] + if loganalyzer: # Skip if loganalyzer is disabled + if duthost.facts["asic_type"] == "vs": + loganalyzer[duthost.hostname].ignore_regex.extend(KVMIgnoreRegex) + + @pytest.fixture(scope="module") def simulated_faulty_side(rand_unselected_dut): return rand_unselected_dut From 11dd4b739d9d83339295c1672d494d6318685abb Mon Sep 17 00:00:00 2001 From: Dayou Liu <113053330+dayouliu1@users.noreply.github.com> Date: Wed, 13 Nov 2024 06:52:23 -0800 Subject: [PATCH 080/175] fix check_dut_asic_type fixture index error (#14830) Updated the rand_one_dut_hostname and selected_rand_dut fixtures to set rand_one_dut_hostname_var whenever needed, specifically in the case the fixture is dynamically loaded. Removed old method of setting rand_one_dut_hostname_var. --- tests/conftest.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index b9ba2c76de9..f0531bb5ba0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -493,6 +493,8 @@ def rand_one_dut_hostname(request): """ """ global rand_one_dut_hostname_var + if rand_one_dut_hostname_var is None: + set_rand_one_dut_hostname(request) return rand_one_dut_hostname_var @@ -507,6 +509,8 @@ def rand_selected_dut(duthosts, rand_one_dut_hostname): @pytest.fixture(scope="module") def selected_rand_dut(request): global rand_one_dut_hostname_var + if rand_one_dut_hostname_var is None: + set_rand_one_dut_hostname(request) return rand_one_dut_hostname_var @@ -1699,12 +1703,6 @@ def pytest_generate_tests(metafunc): # noqa E302 if dut_fixture_name and "selected_dut" in metafunc.fixturenames: metafunc.parametrize("selected_dut", duts_selected, scope="module", indirect=True) - # When rand_one_dut_hostname used and select a dut for test, initialize rand_one_dut_hostname_var - # rand_one_dut_hostname and rand_selected_dut will use this variable for setup test case - # selected_rand_dut will use this variable for setup TACACS - if "rand_one_dut_hostname" in metafunc.fixturenames: - set_rand_one_dut_hostname(metafunc) - if "enum_dut_portname" in metafunc.fixturenames: metafunc.parametrize("enum_dut_portname", generate_port_lists(metafunc, "all_ports")) From 17208c4700150f546b6e4b94bb132e6b55cb2573 Mon Sep 17 00:00:00 2001 From: Xu Chen <112069142+XuChen-MSFT@users.noreply.github.com> Date: Wed, 13 Nov 2024 23:21:35 +0800 Subject: [PATCH 081/175] fix 7260 headroom pool watermark test failure (#15536) What is the motivation for this PR? observed consistent headroom wartermark test failure on 7260 and it's known issue of test script, as below RCA: RCA: summarize test step first: PTF send lots of pkt to multiple src ports to fill multiple PG's share buffer PTF send one or a few pkts to multiple src ports to trigger pfc on multiple PG check watermark before test headroom's watermark PTF send pkt to multiple src port to consum headroom pool, and test if watermark changes as expected after step2, already send 20 pkts into headroom to trigger PFC on 10 src ports (20 PG) but, so far, "upper_bound" value is static hardcode "2 * margin + 1", didn't consider headroom pool consumption in step2. since we use dynamically threshold calculating, it can get accurate threshold value, we pretty sure the headroom pool consumption equal "pgs_num" in step2. so I change "upper_bound" value to "2 * margin + self.pgs_num", and it pass the tests. How did you do it? change "upper_bound" value to "2 * margin + self.pgs_num" How did you verify/test it? this change already verified in MSFT nightly for 202305 and 202311 branch, just commit to github. Any platform specific information? this change is dedicated to below platform and topology: if (hwsku == 'Arista-7260CX3-D108C8' and self.testbed_type in ('t0-116', 'dualtor-120')) \ or (hwsku == 'Arista-7260CX3-C64' and self.testbed_type in ('dualtor-aa-56', 't1-64-lag')): upper_bound = 2 * margin + self.pgs_num if other platform and topology have hit similar issue, can add affected platform and topo to above condition checking. Note: for generic fix, qos refactor project will covert. --- tests/saitests/py3/sai_qos_tests.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 9ec46133975..30212910941 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -3158,6 +3158,9 @@ def runTest(self): sys.stderr.flush() upper_bound = 2 * margin + 1 + if (hwsku == 'Arista-7260CX3-D108C8' and self.testbed_type in ('t0-116', 'dualtor-120')) \ + or (hwsku == 'Arista-7260CX3-C64' and self.testbed_type in ('dualtor-aa-56', 't1-64-lag')): + upper_bound = 2 * margin + self.pgs_num if self.wm_multiplier: hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark( self.src_client, self.buf_pool_roid) From bc9aef9be7fc46be0dbd4911c5f03f2891afe001 Mon Sep 17 00:00:00 2001 From: veronica-arista <117375955+veronica-arista@users.noreply.github.com> Date: Wed, 13 Nov 2024 07:56:58 -0800 Subject: [PATCH 082/175] Fix qos node selection for single-asic (#15074) Fix single-asic issues from PR https://github.com/sonic-net/sonic-mgmt/pull/14925 The search for shortlink linecard logic added in that PR does not verify that a DUT is multi-asic for the single_dut_multi_asic tests and incorrectly tries to access the asics on a single-asic DUT. --- tests/qos/qos_sai_base.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 1b2c5d92a66..92e315d128f 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -634,12 +634,18 @@ def select_src_dst_dut_and_asic(self, duthosts, request, tbinfo, lower_tor_host) dst_asic_index = 0 elif test_port_selection_criteria == "single_dut_multi_asic": + found_multi_asic_dut = False if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(duthost): pytest.skip("single_dut_multi_asic is not supported on T0 topologies") if topo not in self.SUPPORTED_T1_TOPOS and shortlink_indices: - src_dut_index = random.choice(shortlink_indices) + random.shuffle(shortlink_indices) + for idx in shortlink_indices: + a_dut = duthosts.frontend_nodes[idx] + if a_dut.sonichost.is_multi_asic: + src_dut_index = idx + found_multi_asic_dut = True + break else: - found_multi_asic_dut = False for a_dut_index in range(len(duthosts.frontend_nodes)): a_dut = duthosts.frontend_nodes[a_dut_index] if a_dut.sonichost.is_multi_asic: @@ -647,9 +653,9 @@ def select_src_dst_dut_and_asic(self, duthosts, request, tbinfo, lower_tor_host) found_multi_asic_dut = True logger.info("Using dut {} for single_dut_multi_asic testing".format(a_dut.hostname)) break - if not found_multi_asic_dut: - pytest.skip( - "Did not find any frontend node that is multi-asic - so can't run single_dut_multi_asic tests") + if not found_multi_asic_dut: + pytest.skip( + "Did not find any frontend node that is multi-asic - so can't run single_dut_multi_asic tests") dst_dut_index = src_dut_index src_asic_index = 0 dst_asic_index = 1 From 1155fb87fe7d7447335231135a3c21f4f8aab77c Mon Sep 17 00:00:00 2001 From: Sai <165318278+saiilla@users.noreply.github.com> Date: Wed, 13 Nov 2024 09:47:24 -0800 Subject: [PATCH 083/175] Watchport blackbox test plan (#15222) * Create Watchport_Blackbox_Test _Plan.md * Update Watchport_Blackbox_Test _Plan.md * Update Watchport_Blackbox_Test _Plan.md * Update Watchport_Blackbox_Test _Plan.md --- .../tests/Watchport_Blackbox_Test _Plan.md | 256 ++++++++++++++++++ 1 file changed, 256 insertions(+) create mode 100644 sdn_tests/tests/Watchport_Blackbox_Test _Plan.md diff --git a/sdn_tests/tests/Watchport_Blackbox_Test _Plan.md b/sdn_tests/tests/Watchport_Blackbox_Test _Plan.md new file mode 100644 index 00000000000..4c6c34a00a9 --- /dev/null +++ b/sdn_tests/tests/Watchport_Blackbox_Test _Plan.md @@ -0,0 +1,256 @@ +# Objective + +This document captures the tests that are intended to be covered in the blackbox test environment for Watchport feature. + +# Overview + +Watchport is a feature that aims to quickly remove a link (that went down) from the WCMP/ECMP group it participates in before the controller (used interchangeably with the external view) can detect the link down event and take the appropriate recovery action. This is mainly to shorten the duration of traffic black hole problems that may arise if a down member exists in a WCMP/ECMP group. + +The test-plan aims to verify the correctness of the feature by picking up certain triggers and common use-cases. The testing will not cover the following: + +- Reference or object dependencies like whether a nexthop member exists before being referenced in the WCMP/ECMP group action. +- Traffic loss/convergence related scenarios. + +# Testbed Requirements + +The testbed requirements are the existence of a basic blackbox setup that comprises a SUT and control switch which are connected to each other on multiple links. + +# Test Cases + +## Configured weights are realized + + + + + + + + + + + + + + + + + + +
TitleVerify basic WCMP/ECMP packet hashing works with watch port actions.
Procedure
    +
  • Create a WCMP/ECMP group (herein referred to as Action Profile Group APG) with multiple members (herein referred to as Action Profile Members APM) with an associated watch port for each member.
  • +
+
    +
  • Send different packets to the SUT from the control switch by varying a field in the packet header that will apply the hashing algorithm to select an APM from the APG.
  • +
+
Expected Results
    +
  • Verify the packets are distributed to all the members in the APG by comparing the actual number of packets received on each port vs the expected up members.
  • +
+
+ +## + +## Member down handling + + + + + + + + + + + + + + + + + + +
TitleVerify the watchport action when the watch port link is forced down.
Procedure
    +
  • Create a WCMP/ECMP APG with multiple APM.
  • +
+
    +
  • Bring down the watch port associated with one member of the APG.
  • +
+
Expected Results
    +
  • Verify that the member of the down port is excluded from the APG (via traffic tests) but the read request from P4RT (as in APP_DB) reflects the original set of Action Profile members.
  • +
+
    +
  • Send different packets as in the earlier step and verify traffic is distributed only to the members whose watch port link is up.
  • +
+
+ +## Member up handling + + + + + + + + + + + + + + + + + + +
TitleVerify the watchport action when the watch port link comes up
Procedure
    +
  • Disable link damping (if any) to ensure link up notifications are delivered instantly.
  • +
+
    +
  • Bring up the watch port of an excluded member of an APG.
  • +
+
    +
  • Resend packets with varying headers that will ensure all members are hashed.
  • +
+
Expected Results
    +
  • Verify that packets are distributed as per the new membership.
  • +
+
+ +## Watch port for a single member group + + + + + + + + + + + + + + + + + + +
TitleVerify watch port functionality for single member.
Procedure
    +
  • Disable link damping (if any) to ensure link up notifications are delivered instantly.
  • +
+
    +
  • Create a WCMP/ECMP APG with only one member
  • +
+
    +
  • Send different packets to the SUT from the control switch by varying a field in the packet header.
  • +
+
    +
  • Bring down the watch port associated with the member.
  • +
+
    +
  • Bring up the watch port associated with the member in the APG.
  • +
+
Expected Results
    +
  • Verify that all packets are sent out on the same member while the associated watch port is up, no traffic loss.
  • +
+
    +
  • Verify that all packets are dropped when the associated watch port is down.
  • +
+
+ +## Modify operation on a watchport member + + + + + + + + + + + + + + + + + + + + + + + + + + +
TitleVerify watch port action along with the controller updates.
Procedure
    +
  • Disable link damping (if any) to ensure link up notifications are delivered instantly.
  • +
+
    +
  • Create a WCMP/ECMP APG with multiple members and watch ports.
  • +
+
    +
  • Bring down one of the watch port associated with a member and verify the member is excluded from the selection process for this APG.
  • +
+
    +
  • Send a modify APG request that removes the member whose watch port was brought down.
  • +
+
    +
  • Bring the associated watch port up and verify that the deleted member does not get added back to the APG.
  • +
+
    +
  • Send traffic with varying packet headers.
  • +
+
Expected Results
    +
  • Verify APP_DB state always reflects the membership consistent to the external view and not the membership that the switch implementation modified when the associated watch port went down/up.
  • +
+
    +
  • Verify traffic is destined only to the members programmed by the controller and whose associated watch port is up.
  • +
+
Procedure
    +
  • Repeat the same steps as above but replace the modify APG with remove APG operation.
  • +
+
Expected Results
    +
  • Verify that bringing up the watch port does not result in any critical error reported by the switch. (No group exists since the group was removed)
  • +
+
+ +## Specifying a down-port as watch port + + + + + + + + + + + + + + + + + + +
TitleVerify the watch port action when the controller adds a member to the APG whose associated watch port is down.
Procedure
    +
  • Disable link damping (if any) to ensure link up notifications are delivered instantly.
  • +
+
    +
  • Create a WCMP/ECMP APG with some members whose watch ports are up and some down.
  • +
+
    +
  • Send traffic and ensure only non-excluded member ports receive it, no traffic loss.
  • +
+
    +
  • Bring up the watch port whose APM was excluded from the APG.
  • +
+
Expected Results
    +
  • Verify APP_STATE DB read always reflect all members.
  • +
+
    +
  • Verify traffic is destined to only members in the APG whose associated watch ports are up and there is no overall traffic loss.
  • +
+
From 709ebdecad3c1b3d6e948578e00475ca4aaebdd2 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:14:35 -0800 Subject: [PATCH 084/175] Skip RX_DRP check on Mellanox platform in test_drop_l3_ip_packet_non_dut_mac (#15248) --- tests/ip/test_ip_packet.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/ip/test_ip_packet.py b/tests/ip/test_ip_packet.py index 9d12aa3ee79..ab47b2cc1f7 100644 --- a/tests/ip/test_ip_packet.py +++ b/tests/ip/test_ip_packet.py @@ -739,8 +739,11 @@ def test_drop_l3_ip_packet_non_dut_mac(self, duthosts, enum_rand_one_per_hwsku_f return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) - pytest_assert(rx_drp >= self.PKT_NUM_MIN, - "Dropped {} packets in rx, not in expected range".format(rx_drp)) + asic_type = duthost.facts["asic_type"] + # Packet is dropped silently on Mellanox platform if the destination MAC address is not the router MAC + if asic_type not in ["mellanox"]: + pytest_assert(rx_drp >= self.PKT_NUM_MIN, + "Dropped {} packets in rx, not in expected range".format(rx_drp)) pytest_assert(tx_ok <= self.PKT_NUM_ZERO, "Forwarded {} packets in tx, not in expected range".format(tx_ok)) pytest_assert(max(tx_drp, tx_rif_err) <= self.PKT_NUM_ZERO, From 0f1148ee02c2edf71b79d259ebbbfaf934f9d20d Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:07:25 -0800 Subject: [PATCH 085/175] Ensure correct testing telemetry config after config reload (#15071) What is the motivation for this PR? After config reload, other telemetry cases fail since testing telemetry config is no longer present. This causes that telemetry config will not have client_auth set to false which results in cert errors when running telemetry query. Added disable_loganalyzer to test_telemetry_queue_buffer_cnt since we see teardown loganalyzer errors complaining about non telemetry related syncd SAI_API logs How did you do it? Ensure that telemetry present after each config reload call. How did you verify/test it? Manual/Pipeline --- tests/common/helpers/telemetry_helper.py | 26 ++++++++++++------------ tests/telemetry/test_telemetry.py | 4 ++++ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/common/helpers/telemetry_helper.py b/tests/common/helpers/telemetry_helper.py index 2ae26114513..4124a43f53a 100644 --- a/tests/common/helpers/telemetry_helper.py +++ b/tests/common/helpers/telemetry_helper.py @@ -52,6 +52,19 @@ def setup_telemetry_forpyclient(duthost): client_auth_out = duthost.shell('sonic-db-cli CONFIG_DB HGET "%s|gnmi" "client_auth"' % (env.gnmi_config_table), module_ignore_errors=False)['stdout_lines'] client_auth = str(client_auth_out[0]) + + if client_auth == "true": + duthost.shell('sonic-db-cli CONFIG_DB HSET "%s|gnmi" "client_auth" "false"' % (env.gnmi_config_table), + module_ignore_errors=False) + duthost.shell("systemctl reset-failed %s" % (env.gnmi_container)) + duthost.service(name=env.gnmi_container, state="restarted") + # Wait until telemetry was restarted + py_assert(wait_until(100, 10, 0, duthost.is_service_fully_started, env.gnmi_container), + "%s not started." % (env.gnmi_container)) + logger.info("telemetry process restarted") + else: + logger.info('client auth is false. No need to restart telemetry') + return client_auth @@ -83,19 +96,6 @@ def _context_for_setup_streaming_telemetry(request, duthosts, enum_rand_one_per_ env = GNMIEnvironment(duthost, GNMIEnvironment.TELEMETRY_MODE) default_client_auth = setup_telemetry_forpyclient(duthost) - if default_client_auth == "true": - duthost.shell('sonic-db-cli CONFIG_DB HSET "%s|gnmi" "client_auth" "false"' % (env.gnmi_config_table), - module_ignore_errors=False) - duthost.shell("systemctl reset-failed %s" % (env.gnmi_container)) - duthost.service(name=env.gnmi_container, state="restarted") - else: - logger.info('client auth is false. No need to restart telemetry') - - # Wait until telemetry was restarted - py_assert(wait_until(100, 10, 0, duthost.is_service_fully_started, env.gnmi_container), - "%s not started." % (env.gnmi_container)) - logger.info("telemetry process restarted. Now run pyclient on ptfdocker") - # Wait until the TCP port was opened dut_ip = duthost.mgmt_ip if is_ipv6: diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index c975f532fd4..be487aac402 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -8,6 +8,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from tests.common.helpers.gnmi_utils import GNMIEnvironment +from tests.common.helpers.telemetry_helper import setup_telemetry_forpyclient from telemetry_utils import assert_equal, get_list_stdout, get_dict_stdout, skip_201911_and_older from telemetry_utils import generate_client_cli, parse_gnmi_output, check_gnmi_cli_running from tests.common import config_reload @@ -31,6 +32,8 @@ def load_new_cfg(duthost, data): duthost.copy(content=json.dumps(data, indent=4), dest=CFG_DB_PATH) config_reload(duthost, config_source='config_db', safe_reload=True) + # config reload overrides testing telemetry config, ensure testing config exists + setup_telemetry_forpyclient(duthost) def get_buffer_queues_cnt(ptfhost, gnxi_path, dut_ip, iface, gnmi_port): @@ -129,6 +132,7 @@ def test_telemetry_ouput(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, @pytest.mark.parametrize('setup_streaming_telemetry', [False], indirect=True) +@pytest.mark.disable_loganalyzer def test_telemetry_queue_buffer_cnt(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, setup_streaming_telemetry, gnxi_path): """ From 4f001242c965f03a626951e18f158b8c03b9083a Mon Sep 17 00:00:00 2001 From: prabhataravind <108555774+prabhataravind@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:27:36 -0800 Subject: [PATCH 086/175] [copp]: Add test cases to verify rate-limiting for the following cases (#14670) * [copp]: Add test cases to verify rate-limiting for the following cases * Neighbor miss (subnet hit) packets * Neighbor miss after decap for IPinIP encapsulated packets Signed-off-by: Prabhat Aravind * Skip traffic tests for kvm testbeds Signed-off-by: Prabhat Aravind * Address review comment * Use default rate of 600PPS for default trap group and associated tests Signed-off-by: Prabhat Aravind * remove skip_traffic_test fixture the fixture has been deprecated recently Signed-off-by: Prabhat Aravind --------- Signed-off-by: Prabhat Aravind --- .../test/files/ptftests/py3/copp_tests.py | 133 ++++++++++++++---- .../tests_mark_conditions.yaml | 5 + tests/copp/conftest.py | 16 +++ tests/copp/copp_utils.py | 40 ++++++ tests/copp/scripts/update_copp_config.py | 9 +- tests/copp/test_copp.py | 45 +++++- 6 files changed, 209 insertions(+), 39 deletions(-) diff --git a/ansible/roles/test/files/ptftests/py3/copp_tests.py b/ansible/roles/test/files/ptftests/py3/copp_tests.py index 42fd1435845..92211065432 100644 --- a/ansible/roles/test/files/ptftests/py3/copp_tests.py +++ b/ansible/roles/test/files/ptftests/py3/copp_tests.py @@ -26,6 +26,8 @@ # SSHTest # IP2METest # DefaultTest +# VlanSubnetTest +# VlanSubnetIPinIPTest import datetime import os @@ -34,6 +36,7 @@ import threading import time +import ptf.packet as scapy import ptf.testutils as testutils from ptf.base_tests import BaseTest @@ -45,9 +48,6 @@ class ControlPlaneBaseTest(BaseTest): PPS_LIMIT = 600 PPS_LIMIT_MIN = PPS_LIMIT * 0.9 PPS_LIMIT_MAX = PPS_LIMIT * 1.3 - DEFAULT_PPS_LIMIT = 300 - DEFAULT_PPS_LIMIT_MIN = DEFAULT_PPS_LIMIT * 0.9 - DEFAULT_PPS_LIMIT_MAX = DEFAULT_PPS_LIMIT * 1.3 NO_POLICER_LIMIT = PPS_LIMIT * 1.4 TARGET_PORT = "3" # Historically we have port 3 as a target port TASK_TIMEOUT = 600 # Wait up to 10 minutes for tasks to complete @@ -69,6 +69,8 @@ def __init__(self): self.myip = test_params.get('myip', None) self.peerip = test_params.get('peerip', None) + self.vlanip = test_params.get('vlanip', None) + self.loopbackip = test_params.get('loopbackip', None) self.default_server_send_rate_limit_pps = test_params.get( 'send_rate_limit', 2000) @@ -83,6 +85,7 @@ def __init__(self): self.asic_type = test_params.get('asic_type', None) self.platform = test_params.get('platform', None) self.topo_type = test_params.get('topo_type', None) + self.ip_version = test_params.get('ip_version', None) def log(self, message, debug=False): current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") @@ -219,14 +222,14 @@ def copp_test(self, packet, send_intf, recv_intf): return send_count, recv_count, time_delta, time_delta_ms, tx_pps, rx_pps - def contruct_packet(self, port_number): + def construct_packet(self, port_number): raise NotImplementedError def check_constraints(self, send_count, recv_count, time_delta_ms, rx_pps): raise NotImplementedError def one_port_test(self, port_number): - packet = self.contruct_packet(port_number) + packet = self.construct_packet(port_number) send_count, recv_count, time_delta, time_delta_ms, tx_pps, rx_pps = \ self.copp_test(bytes(packet), (0, port_number), (1, port_number)) @@ -289,7 +292,7 @@ def runTest(self): self.log("ARPTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] src_ip = self.myip dst_ip = self.peerip @@ -319,7 +322,7 @@ def runTest(self): self.log("DHCPTopoT1Test") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] packet = testutils.simple_udp_packet( @@ -368,7 +371,7 @@ def runTest(self): self.log("DHCPTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] packet = testutils.simple_udp_packet( @@ -417,7 +420,7 @@ def runTest(self): self.log("DHCP6Test") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] packet = testutils.simple_udpv6_packet( @@ -445,7 +448,7 @@ def runTest(self): self.log("DHCP6TopoT1Test") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] packet = testutils.simple_udpv6_packet( @@ -485,7 +488,7 @@ def runTest(self): self.log("LLDPTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] packet = testutils.simple_eth_packet( @@ -525,7 +528,7 @@ def runTest(self): # as its destination MAC address. eth_type is to indicate # the length of the data in Ethernet 802.3 frame. pktlen # = 117 = 103 (0x67) + 6 (dst MAC) + 6 (dst MAC) + 2 (len) - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] packet = testutils.simple_eth_packet( @@ -547,7 +550,7 @@ def runTest(self): self.log("BGPTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): dst_mac = self.peer_mac[port_number] dst_ip = self.peerip @@ -586,15 +589,15 @@ def check_constraints(self, send_count, recv_count, time_delta_ms, rx_pps): else: self.log("Checking constraints (DefaultPolicyApplied):") self.log( - "DEFAULT_PPS_LIMIT_MIN (%d) <= rx_pps (%d) <= DEFAULT_PPS_LIMIT_MAX (%d): %s" % - (int(self.DEFAULT_PPS_LIMIT_MIN), + "PPS_LIMIT_MIN (%d) <= rx_pps (%d) <= PPS_LIMIT_MAX (%d): %s" % + (int(self.PPS_LIMIT_MIN), int(rx_pps), - int(self.DEFAULT_PPS_LIMIT_MAX), - str(self.DEFAULT_PPS_LIMIT_MIN <= rx_pps <= self.DEFAULT_PPS_LIMIT_MAX)) + int(self.PPS_LIMIT_MAX), + str(self.PPS_LIMIT_MIN <= rx_pps <= self.PPS_LIMIT_MAX)) ) - assert self.DEFAULT_PPS_LIMIT_MIN <= rx_pps <= self.DEFAULT_PPS_LIMIT_MAX, "Copp policer constraint " \ + assert self.PPS_LIMIT_MIN <= rx_pps <= self.PPS_LIMIT_MAX, "Copp policer constraint " \ "check failed, Actual PPS: {} Expected PPS range: {} - {}".format( - rx_pps, self.DEFAULT_PPS_LIMIT_MIN, self.DEFAULT_PPS_LIMIT_MAX) + rx_pps, self.PPS_LIMIT_MIN, self.PPS_LIMIT_MAX) # SONIC config contains policer CIR=6000 for LACP @@ -606,7 +609,7 @@ def runTest(self): self.log("LACPTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): packet = testutils.simple_eth_packet( pktlen=14, eth_dst='01:80:c2:00:00:02', @@ -626,7 +629,7 @@ def runTest(self): self.log("SNMPTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] dst_mac = self.peer_mac[port_number] dst_ip = self.peerip @@ -650,7 +653,7 @@ def runTest(self): self.log("SSHTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): dst_mac = self.peer_mac[port_number] src_ip = self.myip dst_ip = self.peerip @@ -681,7 +684,7 @@ def one_port_test(self, port_number): if port[0] == 0: continue - packet = self.contruct_packet(port[1]) + packet = self.construct_packet(port[1]) send_count, recv_count, time_delta, time_delta_ms, tx_pps, rx_pps = \ self.copp_test(bytes(packet), (0, port_number), (1, port_number)) @@ -689,7 +692,7 @@ def one_port_test(self, port_number): self.check_constraints( send_count, recv_count, time_delta_ms, rx_pps) - def contruct_packet(self, port_number): + def construct_packet(self, port_number): src_mac = self.my_mac[port_number] dst_mac = self.peer_mac[port_number] dst_ip = self.peerip @@ -703,6 +706,7 @@ def contruct_packet(self, port_number): return packet +# Verify policer functionality for TTL 1 packets class DefaultTest(PolicyTest): def __init__(self): PolicyTest.__init__(self) @@ -711,7 +715,7 @@ def runTest(self): self.log("DefaultTest") self.run_suite() - def contruct_packet(self, port_number): + def construct_packet(self, port_number): dst_mac = self.peer_mac[port_number] src_ip = self.myip dst_ip = self.peerip @@ -726,3 +730,82 @@ def contruct_packet(self, port_number): ) return packet + + +# Verify policer functionality for Vlan subnet packets +class VlanSubnetTest(PolicyTest): + def __init__(self): + PolicyTest.__init__(self) + + def runTest(self): + self.log("VlanSubnetTest") + self.run_suite() + + def construct_packet(self, port_number): + dst_mac = self.peer_mac[port_number] + src_ip = self.myip + dst_ip = self.vlanip + + if self.ip_version == "4": + packet = testutils.simple_tcp_packet( + eth_dst=dst_mac, + ip_dst=dst_ip, + ip_src=src_ip, + ip_ttl=25, + tcp_sport=5000, + tcp_dport=8000 + ) + else: + packet = testutils.simple_tcpv6_packet( + eth_dst=dst_mac, + ipv6_dst=dst_ip, + ipv6_src=src_ip, + ipv6_hlim=25, + tcp_sport=5000, + tcp_dport=8000 + ) + + return packet + + +# Verify policer functionality for Vlan subnet IPinIP packets +class VlanSubnetIPinIPTest(PolicyTest): + def __init__(self): + PolicyTest.__init__(self) + + def runTest(self): + self.log("VlanSubnetIpinIPTest") + self.run_suite() + + def construct_packet(self, port_number): + dst_mac = self.peer_mac[port_number] + inner_src_ip = self.myip + inner_dst_ip = self.vlanip + outer_dst_ip = self.loopbackip + + if self.ip_version == "4": + inner_packet = testutils.simple_tcp_packet( + ip_dst=inner_dst_ip, + ip_src=inner_src_ip, + ip_ttl=25, + tcp_sport=5000, + tcp_dport=8000 + ).getlayer(scapy.IP) + else: + inner_packet = testutils.simple_tcpv6_packet( + ipv6_dst=inner_dst_ip, + ipv6_src=inner_src_ip, + ipv6_hlim=25, + tcp_sport=5000, + tcp_dport=8000 + ).getlayer(scapy.IPv6) + + packet = testutils.simple_ipv4ip_packet( + eth_dst=dst_mac, + ip_src='1.1.1.1', + ip_dst=outer_dst_ip, + ip_ttl=40, + inner_frame=inner_packet + ) + + return packet diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index baa5e89f28f..05eecad5dc5 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -262,6 +262,11 @@ copp/test_copp.py::TestCOPP::test_trap_config_save_after_reboot: - "build_version.split('.')[0].isdigit() and int(build_version.split('.')[0]) > 20220531 and hwsku in ['Arista-7050-QX-32S', 'Arista-7050QX32S-Q32', 'Arista-7050-QX32', 'Arista-7050QX-32S-S4Q31', 'Arista-7060CX-32S-D48C8', 'Arista-7060CX-32S-C32', 'Arista-7060CX-32S-Q32', 'Arista-7060CX-32S-C32-T1']" - "(topo_name not in ['ptf32', 'ptf64', 't0', 't0-64', 't0-52', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 'm0', 'm0-2vlan', 'mx'] and 't2' not in topo_type)" +copp/test_copp.py::TestCOPP::test_trap_neighbor_miss: + skip: + reason: "Copp test_trap_neighbor_miss is not supported on this topology" + conditions: + - "(topo_name not in ['t0', 't0-64', 't0-52', 't0-116'])" ####################################### ##### crm ##### diff --git a/tests/copp/conftest.py b/tests/copp/conftest.py index e514e983e56..bc7a2cac3c6 100644 --- a/tests/copp/conftest.py +++ b/tests/copp/conftest.py @@ -32,6 +32,22 @@ def pytest_addoption(parser): ) +@pytest.fixture(params=["4", "6"]) +def ip_versions(request): + """ + Parameterized fixture for IP versions. + """ + yield request.param + + +@pytest.fixture(params=["VlanSubnet", "VlanSubnetIPinIP"]) +def packet_type(request): + """ + Parameterized fixture for packet types used for neighbor miss tests + """ + yield request.param + + @pytest.fixture(autouse=True, scope="module") def is_backend_topology(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo): """ diff --git a/tests/copp/copp_utils.py b/tests/copp/copp_utils.py index 0b44aa3bca2..3dad9acf8fb 100644 --- a/tests/copp/copp_utils.py +++ b/tests/copp/copp_utils.py @@ -7,6 +7,7 @@ import re import logging import json +import ipaddress from tests.common.config_reload import config_reload @@ -434,3 +435,42 @@ def install_trap(dut, feature_name): feature_name (str): feature name """ enable_feature_entry(dut, feature_name) + + +def get_vlan_ip(duthost, ip_version): + """ + @Summary: Get an IP on the Vlan subnet + @param duthost: Ansible host instance of the device + @return: Return a vlan IP, e.g., "192.168.0.2" + """ + + mg_facts = duthost.minigraph_facts( + host=duthost.hostname)['ansible_facts'] + mg_vlans = mg_facts['minigraph_vlans'] + + if not mg_vlans: + return None + + mg_vlan_intfs = mg_facts['minigraph_vlan_interfaces'] + + if ip_version == "4": + vlan_subnet = ipaddress.ip_network(mg_vlan_intfs[0]['subnet']) + else: + vlan_subnet = ipaddress.ip_network(mg_vlan_intfs[1]['subnet']) + + ip_addr = str(vlan_subnet[2]) + return ip_addr + + +def get_lo_ipv4(duthost): + + loopback_ip = None + mg_facts = duthost.minigraph_facts( + host=duthost.hostname)['ansible_facts'] + + for intf in mg_facts["minigraph_lo_interfaces"]: + if ipaddress.ip_address(intf["addr"]).version == 4: + loopback_ip = intf["addr"] + break + + return loopback_ip diff --git a/tests/copp/scripts/update_copp_config.py b/tests/copp/scripts/update_copp_config.py index 1322a2bce63..6336cfc9d52 100644 --- a/tests/copp/scripts/update_copp_config.py +++ b/tests/copp/scripts/update_copp_config.py @@ -64,7 +64,6 @@ def generate_limited_pps_config(pps_limit, input_config_file, output_config_file config_format (str): The format of the input COPP config file """ - DEFAULT_PPS_LIMIT = "300" with open(input_config_file) as input_stream: copp_config = json.load(input_stream) @@ -84,13 +83,9 @@ def generate_limited_pps_config(pps_limit, input_config_file, output_config_file # # Setting these two values to pps_limit restricts the policer to allowing exactly # that number of packets per second, which is what we want for our tests. - # For default trap, use a different CIR other than 600 to easily identify - # if it is getting hit. For queue4_group3, use the default value in copp + # For queue4_group3, use the default value in copp # configuration as this is lower than 600 PPS - if tg == "default": - group_config["cir"] = DEFAULT_PPS_LIMIT - group_config["cbs"] = DEFAULT_PPS_LIMIT - elif tg == "queue4_group3": + if tg == "queue4_group3": if asic_type == "cisco-8000": group_config["cir"] = "400" group_config["cbs"] = "400" diff --git a/tests/copp/test_copp.py b/tests/copp/test_copp.py index 324a3e6679e..4dd08bd84d9 100644 --- a/tests/copp/test_copp.py +++ b/tests/copp/test_copp.py @@ -51,7 +51,9 @@ "swap_syncd", "topo", "myip", + "myip6", "peerip", + "peerip6", "nn_target_interface", "nn_target_namespace", "send_rate_limit", @@ -81,7 +83,8 @@ class TestCOPP(object): "BGP", "LACP", "LLDP", - "UDLD"]) + "UDLD", + "Default"]) def test_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, copp_testbed, dut_type): """ @@ -97,6 +100,21 @@ def test_policer(self, protocol, duthosts, enum_rand_one_per_hwsku_frontend_host copp_testbed, dut_type) + @pytest.mark.disable_loganalyzer + def test_trap_neighbor_miss(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, + ptfhost, check_image_version, copp_testbed, dut_type, + ip_versions, packet_type): # noqa F811 + """ + Validates that neighbor miss (subnet hit) packets are rate-limited + + """ + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + logger.info("Verify IPV{} {} packets are rate limited".format(ip_versions, packet_type)) + pytest_assert( + wait_until(60, 20, 0, _copp_runner, duthost, ptfhost, packet_type, copp_testbed, dut_type, + ip_version=ip_versions), + "Traffic check for {} packets failed".format(packet_type)) + @pytest.mark.disable_loganalyzer def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, check_image_version, copp_testbed, dut_type, backup_restore_config_db): @@ -273,21 +291,27 @@ def ignore_expected_loganalyzer_exceptions(enum_rand_one_per_hwsku_frontend_host loganalyzer[enum_rand_one_per_hwsku_frontend_hostname].ignore_regex.extend(ignoreRegex) -def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True): +def _copp_runner(dut, ptf, protocol, test_params, dut_type, has_trap=True, + ip_version="4"): # noqa F811 """ Configures and runs the PTF test cases. """ + is_ipv4 = True if ip_version == "4" else False + params = {"verbose": False, "target_port": test_params.nn_target_port, - "myip": test_params.myip, - "peerip": test_params.peerip, + "myip": test_params.myip if is_ipv4 else test_params.myip6, + "peerip": test_params.peerip if is_ipv4 else test_params.peerip6, + "vlanip": copp_utils.get_vlan_ip(dut, ip_version), + "loopbackip": copp_utils.get_lo_ipv4(dut), "send_rate_limit": test_params.send_rate_limit, "has_trap": has_trap, "hw_sku": dut.facts["hwsku"], "asic_type": dut.facts["asic_type"], "platform": dut.facts["platform"], - "topo_type": test_params.topo_type} + "topo_type": test_params.topo_type, + "ip_version": ip_version} dut_ip = dut.mgmt_ip device_sockets = ["0-{}@tcp://127.0.0.1:10900".format(test_params.nn_target_port), @@ -349,14 +373,19 @@ def _gather_test_params(tbinfo, duthost, request, duts_minigraph_facts): if nn_target_interface not in mg_facts["minigraph_neighbors"]: continue for bgp_peer in mg_facts["minigraph_bgp"]: - if bgp_peer["name"] == mg_facts["minigraph_neighbors"][nn_target_interface]["name"] \ - and ipaddr.IPAddress(bgp_peer["addr"]).version == 4: + if myip is None and \ + bgp_peer["name"] == mg_facts["minigraph_neighbors"][nn_target_interface]["name"] \ + and ipaddr.IPAddress(bgp_peer["addr"]).version == 4: myip = bgp_peer["addr"] peerip = bgp_peer["peer_addr"] nn_target_namespace = mg_facts["minigraph_neighbors"][nn_target_interface]['namespace'] is_backend_topology = mg_facts.get(constants.IS_BACKEND_TOPOLOGY_KEY, False) if is_backend_topology and len(mg_facts["minigraph_vlan_sub_interfaces"]) > 0: nn_target_vlanid = mg_facts["minigraph_vlan_sub_interfaces"][0]["vlan"] + elif bgp_peer["name"] == mg_facts["minigraph_neighbors"][nn_target_interface]["name"] \ + and ipaddr.IPAddress(bgp_peer["addr"]).version == 6: + myip6 = bgp_peer["addr"] + peerip6 = bgp_peer["peer_addr"] break logging.info("nn_target_port {} nn_target_interface {} nn_target_namespace {} nn_target_vlanid {}" @@ -366,7 +395,9 @@ def _gather_test_params(tbinfo, duthost, request, duts_minigraph_facts): swap_syncd=swap_syncd, topo=topo, myip=myip, + myip6=myip6, peerip=peerip, + peerip6=peerip6, nn_target_interface=nn_target_interface, nn_target_namespace=nn_target_namespace, send_rate_limit=send_rate_limit, From 9e248f6451d42170b3c61687f0c648ea1c1fc41d Mon Sep 17 00:00:00 2001 From: ranepbhagyashree Date: Wed, 13 Nov 2024 14:52:50 -0800 Subject: [PATCH 087/175] nhop_group: Fix expected mac address dictionary for Cisco 8122 (#15409) * gr2_nhop_hmap: Fix gr2 mac address dictionary * nhop_group: Fix expected mac address dictionary for Cisco 8122 --- tests/ipfwd/test_nhop_group.py | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/ipfwd/test_nhop_group.py b/tests/ipfwd/test_nhop_group.py index 31711c18098..abfa90d5413 100644 --- a/tests/ipfwd/test_nhop_group.py +++ b/tests/ipfwd/test_nhop_group.py @@ -758,6 +758,36 @@ def built_and_send_tcp_ip_packet(): 45: 'c0:ff:ee:00:00:12', 46: 'c0:ff:ee:00:00:0e', 47: 'c0:ff:ee:00:00:0f', 48: 'c0:ff:ee:00:00:0b', 49: 'c0:ff:ee:00:00:12'} + gr2_asic_flow_map = {0: 'c0:ff:ee:00:00:11', 1: 'c0:ff:ee:00:00:12', + 2: 'c0:ff:ee:00:00:0c', + 3: 'c0:ff:ee:00:00:0f', 4: 'c0:ff:ee:00:00:0b', + 5: 'c0:ff:ee:00:00:10', 6: 'c0:ff:ee:00:00:12', + 7: 'c0:ff:ee:00:00:12', 8: 'c0:ff:ee:00:00:0b', + 9: 'c0:ff:ee:00:00:0e', + 10: 'c0:ff:ee:00:00:10', 11: 'c0:ff:ee:00:00:0c', + 12: 'c0:ff:ee:00:00:0c', 13: 'c0:ff:ee:00:00:11', + 14: 'c0:ff:ee:00:00:0c', + 15: 'c0:ff:ee:00:00:0f', 16: 'c0:ff:ee:00:00:10', + 17: 'c0:ff:ee:00:00:0b', 18: 'c0:ff:ee:00:00:10', + 19: 'c0:ff:ee:00:00:0f', 20: 'c0:ff:ee:00:00:0b', + 21: 'c0:ff:ee:00:00:12', 22: 'c0:ff:ee:00:00:0f', + 23: 'c0:ff:ee:00:00:0d', 24: 'c0:ff:ee:00:00:0c', + 25: 'c0:ff:ee:00:00:0c', + 26: 'c0:ff:ee:00:00:10', 27: 'c0:ff:ee:00:00:0d', + 28: 'c0:ff:ee:00:00:11', 29: 'c0:ff:ee:00:00:12', + 30: 'c0:ff:ee:00:00:0e', 31: 'c0:ff:ee:00:00:11', + 32: 'c0:ff:ee:00:00:0e', 33: 'c0:ff:ee:00:00:0b', + 34: 'c0:ff:ee:00:00:0e', + 35: 'c0:ff:ee:00:00:0b', 36: 'c0:ff:ee:00:00:11', + 37: 'c0:ff:ee:00:00:11', 38: 'c0:ff:ee:00:00:10', + 39: 'c0:ff:ee:00:00:12', + 40: 'c0:ff:ee:00:00:11', 41: 'c0:ff:ee:00:00:0f', + 42: 'c0:ff:ee:00:00:11', 43: 'c0:ff:ee:00:00:0f', + 44: 'c0:ff:ee:00:00:0f', 45: 'c0:ff:ee:00:00:0b', + 46: 'c0:ff:ee:00:00:0f', + 47: 'c0:ff:ee:00:00:0d', 48: 'c0:ff:ee:00:00:0e', + 49: 'c0:ff:ee:00:00:0e'} + # Make sure a given flow always hash to same nexthop/neighbor. This is done to try to find issue # where SAI vendor changes Hash Function across SAI releases. Please note this will not catch the issue every time # as there is always probability even after change of Hash Function same nexthop/neighbor is selected. @@ -768,7 +798,7 @@ def built_and_send_tcp_ip_packet(): "th4": th_asic_flow_map, "td3": td3_asic_flow_map, "gr": gr_asic_flow_map, "spc1": spc_asic_flow_map, "spc2": spc_asic_flow_map, "spc3": spc_asic_flow_map, - "spc4": spc_asic_flow_map} + "spc4": spc_asic_flow_map, "gr2": gr2_asic_flow_map} vendor = duthost.facts["asic_type"] hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname] From 9ab879e7a12cef061163cc4148b6fea6440b7400 Mon Sep 17 00:00:00 2001 From: Dashuai Zhang <164845223+sdszhang@users.noreply.github.com> Date: Thu, 14 Nov 2024 10:30:51 +1100 Subject: [PATCH 088/175] skip multidut bgp instead of assert if testbed doesn't support. (#15537) test case fails if the testbed doesn't support snappi bgp convergence setup. --- .../bgp/test_bgp_outbound_downlink_port_flap.py | 4 ++-- .../test_bgp_outbound_downlink_process_crash.py | 4 ++-- .../multidut/bgp/test_bgp_outbound_tsa.py | 14 +++++++------- .../bgp/test_bgp_outbound_uplink_multi_po_flap.py | 10 +++++----- .../bgp/test_bgp_outbound_uplink_po_flap.py | 4 ++-- .../bgp/test_bgp_outbound_uplink_po_member_flap.py | 4 ++-- .../bgp/test_bgp_outbound_uplink_process_crash.py | 4 ++-- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_port_flap.py b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_port_flap.py index 2c5b48533c1..5ff65ea6daa 100755 --- a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_port_flap.py +++ b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_port_flap.py @@ -68,7 +68,7 @@ def test_bgp_outbound_downlink_port_flap(snappi_api, snappi_extra_params.multi_dut_params.flap_details = FLAP_DETAILS snappi_extra_params.test_name = "T1 Interconnectivity flap" if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -78,7 +78,7 @@ def test_bgp_outbound_downlink_port_flap(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: diff --git a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_process_crash.py b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_process_crash.py index a0ac0f9f15e..15e727a186d 100755 --- a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_process_crash.py +++ b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_downlink_process_crash.py @@ -66,7 +66,7 @@ def test_bgp_outbound_downlink_process_crash(snappi_api, } snappi_extra_params.multi_dut_params.host_name = t1_t2_device_hostnames[2] if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -76,7 +76,7 @@ def test_bgp_outbound_downlink_process_crash(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: diff --git a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_tsa.py b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_tsa.py index 2db762e9dc3..567a9804741 100644 --- a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_tsa.py +++ b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_tsa.py @@ -62,7 +62,7 @@ def test_dut_configuration(multidut_snappi_ports_for_bgp, # noq if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: @@ -103,7 +103,7 @@ def test_bgp_outbound_uplink_tsa(snappi_api, snappi_extra_params.device_name = t1_t2_device_hostnames[1] if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -113,7 +113,7 @@ def test_bgp_outbound_uplink_tsa(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: @@ -161,7 +161,7 @@ def test_bgp_outbound_downlink_tsa(snappi_api, snappi_extra_params.device_name = t1_t2_device_hostnames[2] if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -171,7 +171,7 @@ def test_bgp_outbound_downlink_tsa(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: @@ -217,7 +217,7 @@ def test_bgp_outbound_supervisor_tsa(snappi_api, snappi_extra_params.device_name = t1_t2_device_hostnames[3] if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -227,7 +227,7 @@ def test_bgp_outbound_supervisor_tsa(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: diff --git a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_multi_po_flap.py b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_multi_po_flap.py index a983e3642d1..414e7790ccf 100644 --- a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_multi_po_flap.py +++ b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_multi_po_flap.py @@ -62,7 +62,7 @@ def test_dut_configuration(multidut_snappi_ports_for_bgp, # noq if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: @@ -103,7 +103,7 @@ def test_bgp_outbound_uplink_complete_blackout(snappi_api, snappi_extra_params.multi_dut_params.BLACKOUT_PERCENTAGE = 100 if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -112,7 +112,7 @@ def test_bgp_outbound_uplink_complete_blackout(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: @@ -156,7 +156,7 @@ def test_bgp_outbound_uplink_partial_blackout(snappi_api, snappi_extra_params.multi_dut_params.BLACKOUT_PERCENTAGE = 50 if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -165,7 +165,7 @@ def test_bgp_outbound_uplink_partial_blackout(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: diff --git a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_flap.py b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_flap.py index 59fa935e80d..1e9c2715a86 100755 --- a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_flap.py +++ b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_flap.py @@ -68,7 +68,7 @@ def test_bgp_outbound_uplink_po_flap(snappi_api, snappi_extra_params.multi_dut_params.flap_details = FLAP_DETAILS if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -77,7 +77,7 @@ def test_bgp_outbound_uplink_po_flap(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: diff --git a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_member_flap.py b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_member_flap.py index 3c273641a7a..04135c39d10 100755 --- a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_member_flap.py +++ b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_po_member_flap.py @@ -68,7 +68,7 @@ def test_bgp_outbound_uplink_po_member_flap(snappi_api, snappi_extra_params.multi_dut_params.flap_details = FLAP_DETAILS if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -77,7 +77,7 @@ def test_bgp_outbound_uplink_po_member_flap(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: diff --git a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_process_crash.py b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_process_crash.py index d27cde536b7..ef9b209cedb 100755 --- a/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_process_crash.py +++ b/tests/snappi_tests/multidut/bgp/test_bgp_outbound_uplink_process_crash.py @@ -66,7 +66,7 @@ def test_bgp_outbound_uplink_process_crash(snappi_api, } snappi_extra_params.multi_dut_params.host_name = t1_t2_device_hostnames[1] if (len(t1_t2_device_hostnames) < 3) or (len(duthosts) < 3): - pytest_assert(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") + pytest_require(False, "Need minimum of 3 devices : One T1 and Two T2 line cards") ansible_dut_hostnames = [] for duthost in duthosts: @@ -76,7 +76,7 @@ def test_bgp_outbound_uplink_process_crash(snappi_api, if device_hostname not in ansible_dut_hostnames: logger.info('!!!!! Attention: {} not in : {} derived from ansible dut hostnames'. format(device_hostname, ansible_dut_hostnames)) - pytest_assert(False, "Mismatch between the dut hostnames in ansible and in variables.py files") + pytest_require(False, "Mismatch between the dut hostnames in ansible and in variables.py files") for duthost in duthosts: if t1_t2_device_hostnames[0] in duthost.hostname: From b0051823c0ece42c3aa6c75f456c7406658f4cc6 Mon Sep 17 00:00:00 2001 From: Dashuai Zhang <164845223+sdszhang@users.noreply.github.com> Date: Thu, 14 Nov 2024 10:35:21 +1100 Subject: [PATCH 089/175] [snappi][master only] add enum with completeness_level back in (#15538) Summary: The fix in #15057 was overwritten by recent changes. This PR add it back into master. #15539 add it back into 202405. Will open another PR for 202405 as the fix will be slight different. test_pfc_pause_single_lossless_prio_reboot: the parameter/fixture sequence is different between master and 202405 branch. this change moves the enum_dut_lossless_prio_with_completeness_level back to original position. so it will be same as 202405 branch. test_pfc_pause_single_lossy_prio_reboot: add enum_dut_lossy_prio_with_completeness_level back in. --- .../pfc/test_multidut_pfc_pause_lossless_with_snappi.py | 4 ++-- .../pfc/test_multidut_pfc_pause_lossy_with_snappi.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py index a3e40541bb5..bc131deb4fc 100644 --- a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py +++ b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossless_with_snappi.py @@ -141,12 +141,12 @@ def test_pfc_pause_single_lossless_prio_reboot(snappi_api, # n fanout_graph_facts_multidut, # noqa: F811 duthosts, localhost, + enum_dut_lossless_prio_with_completeness_level, # noqa: F811 prio_dscp_map, # noqa: F811 lossless_prio_list, # noqa: F811 all_prio_list, # noqa: F811 get_snappi_ports, # noqa: F811 tbinfo, # noqa: F811 - enum_dut_lossless_prio_with_completeness_level, # noqa: F811 setup_ports_and_dut, # noqa: F811 disable_pfcwd, # noqa: F811 reboot_duts): # noqa: F811 @@ -159,10 +159,10 @@ def test_pfc_pause_single_lossless_prio_reboot(snappi_api, # n fanout_graph_facts_multidut (pytest fixture): fanout graph duthosts (pytest fixture): list of DUTs localhost (pytest fixture): localhost handle + enum_dut_lossless_prio_with_completeness_level (str): lossless priority to test, e.g., 's6100-1|3' all_prio_list (pytest fixture): list of all the priorities prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). lossless_prio_list (pytest fixture): list of all the lossless priorities - enum_dut_lossless_prio_with_completeness_level (str): lossless priority to test, e.g., 's6100-1|3' tbinfo (pytest fixture): fixture provides information about testbed get_snappi_ports (pytest fixture): gets snappi ports and connected DUT port info and returns as a list Returns: diff --git a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py index 8a03b72ac0e..e44c5a86de1 100644 --- a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py +++ b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py @@ -136,7 +136,7 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 fanout_graph_facts_multidut, # noqa: F811 duthosts, localhost, - enum_dut_lossy_prio, + enum_dut_lossy_prio_with_completeness_level, prio_dscp_map, # noqa: F811 lossy_prio_list, # noqa: F811 all_prio_list, # noqa: F811 @@ -154,7 +154,7 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 fanout_graph_facts_multidut (pytest fixture): fanout graph duthosts (pytest fixture): list of DUTs localhost (pytest fixture): localhost handle - enum_dut_lossy_prio (str): name of lossy priority to test, e.g., 's6100-1|2' + enum_dut_lossy_prio_with_completeness_level (str): name of lossy priority to test, e.g., 's6100-1|2' prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). lossy_prio_list (pytest fixture): list of all the lossy priorities all_prio_list (pytest fixture): list of all the priorities @@ -166,7 +166,7 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 """ testbed_config, port_config_list, snappi_ports = setup_ports_and_dut - _, lossy_prio = enum_dut_lossy_prio.split('|') + _, lossy_prio = enum_dut_lossy_prio_with_completeness_level.split('|') lossy_prio = int(lossy_prio) pause_prio_list = [lossy_prio] test_prio_list = [lossy_prio] From a3811f551ed6bb6cb33b57abcd368f23562a7ceb Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Thu, 14 Nov 2024 08:53:59 +0800 Subject: [PATCH 090/175] Add nhop group test to onboarding PR test (#15531) What is the motivation for this PR? Elastictest performs well in distribute running PR test in multiple KVMs, which support us to add more test scripts to PR checker. But some traffic test using ptfadapter can't be tested on KVM platform, we need to skip traffic test if needed How did you do it? Add nhop group test to onboarding PR test and skip traffic test How did you verify/test it? --- .azure-pipelines/pr_test_scripts.yaml | 1 + ...sts_mark_conditions_skip_traffic_test.yaml | 6 ++++ tests/common/vs_data.py | 2 ++ tests/ipfwd/test_nhop_group.py | 30 +++++++++++++++---- 4 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 tests/common/vs_data.py diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index b48fd5685b5..1cd4372e2bf 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -475,6 +475,7 @@ onboarding_t0: onboarding_t1: - lldp/test_lldp_syncd.py + - ipfwd/test_nhop_group.py specific_param: t0-sonic: diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml index 18371c03db6..12cccb05cd6 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml @@ -279,6 +279,12 @@ ipfwd/test_dir_bcast.py: conditions: - "asic_type in ['vs']" +ipfwd/test_nhop_group.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + ####################################### ##### route ##### ####################################### diff --git a/tests/common/vs_data.py b/tests/common/vs_data.py new file mode 100644 index 00000000000..047173d2ce2 --- /dev/null +++ b/tests/common/vs_data.py @@ -0,0 +1,2 @@ +def is_vs_device(dut): + return dut.facts["asic_type"] == "vs" diff --git a/tests/ipfwd/test_nhop_group.py b/tests/ipfwd/test_nhop_group.py index abfa90d5413..86a1500b685 100644 --- a/tests/ipfwd/test_nhop_group.py +++ b/tests/ipfwd/test_nhop_group.py @@ -15,6 +15,7 @@ from tests.common.cisco_data import is_cisco_device from tests.common.mellanox_data import is_mellanox_device, get_chip_type from tests.common.innovium_data import is_innovium_device +from tests.common.vs_data import is_vs_device from tests.common.utilities import wait_until from tests.common.platform.device_utils import fanout_switch_port_lookup, toggle_one_link @@ -457,6 +458,8 @@ def test_nhop_group_member_count(duthost, tbinfo, loganalyzer): ) elif is_mellanox_device(duthost): logger.info("skip this check on Mellanox as ASIC resources are shared") + elif is_vs_device(duthost): + logger.info("skip this check on VS as no real ASIC") else: pytest_assert( crm_after["available_nhop_grp"] == 0, @@ -516,8 +519,13 @@ def built_and_send_tcp_ip_packet(): for flow_count in range(50): pkt, exp_pkt = build_pkt(rtr_mac, ip_route, ip_ttl, flow_count) testutils.send(ptfadapter, gather_facts['dst_port_ids'][0], pkt, 10) - (_, recv_pkt) = testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, + verify_result = testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, ports=gather_facts['src_port_ids']) + if isinstance(verify_result, bool): + logger.info("Using dummy testutils to skip traffic test.") + return + else: + _, recv_pkt = verify_result assert recv_pkt @@ -564,7 +572,8 @@ def built_and_send_tcp_ip_packet(): asic.stop_service("bgp") time.sleep(15) logger.info("Toggle link {} on {}".format(fanout_port, fanout)) - toggle_one_link(duthost, gather_facts['src_port'][0], fanout, fanout_port) + if is_vs_device(duthost) is False: + toggle_one_link(duthost, gather_facts['src_port'][0], fanout, fanout_port) time.sleep(15) built_and_send_tcp_ip_packet() @@ -804,6 +813,10 @@ def built_and_send_tcp_ip_packet(): hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname] mgFacts = duthost.get_extended_minigraph_facts(tbinfo) dutAsic = None + if vendor == "vs": + logger.info("Skipping following traffic validation on VS platform") + return + for asic, nexthop_map in list(SUPPORTED_ASIC_TO_NEXTHOP_SELECTED_MAP.items()): vendorAsic = "{0}_{1}_hwskus".format(vendor, asic) if vendorAsic in list(hostvars.keys()) and mgFacts["minigraph_hwsku"] in hostvars[vendorAsic]: @@ -871,7 +884,8 @@ def test_nhop_group_interface_flap(duthosts, enum_rand_one_per_hwsku_frontend_ho fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, duthost.hostname, gather_facts['src_port'][i]) logger.debug("Shut fanout sw: %s, port: %s", fanout, fanout_port) - fanout.shutdown(fanout_port) + if is_vs_device(duthost) is False: + fanout.no_shutdown(fanout_port) nhop.add_ip_route(ip_prefix, ips) nhop.program_routes() @@ -890,13 +904,19 @@ def test_nhop_group_interface_flap(duthosts, enum_rand_one_per_hwsku_frontend_ho fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, duthost.hostname, gather_facts['src_port'][i]) logger.debug("No Shut fanout sw: %s, port: %s", fanout, fanout_port) - fanout.no_shutdown(fanout_port) + if is_vs_device(duthost) is False: + fanout.no_shutdown(fanout_port) time.sleep(20) duthost.shell("portstat -c") ptfadapter.dataplane.flush() testutils.send(ptfadapter, gather_facts['dst_port_ids'][0], pkt, pkt_count) - (_, recv_pkt) = testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, + verify_result = testutils.verify_packet_any_port(test=ptfadapter, pkt=exp_pkt, ports=gather_facts['src_port_ids']) + if isinstance(verify_result, bool): + logger.info("Using dummy testutils to skip traffic test.") + return + else: + _, recv_pkt = verify_result # Make sure routing is done pytest_assert(scapy.Ether(recv_pkt).ttl == (ip_ttl - 1), "Routed Packet TTL not decremented") pytest_assert(scapy.Ether(recv_pkt).src == rtr_mac, "Routed Packet Source Mac is not router MAC") From 52112624553cfec32101779473ae8b386f59c126 Mon Sep 17 00:00:00 2001 From: "Austin (Thang Pham)" Date: Thu, 14 Nov 2024 14:43:47 +1100 Subject: [PATCH 091/175] Update pfc_gen_t2.py (#15527) Cherry pick PR #11037 While running PFCWD test cases, encountered concatenation issue on sonic fanout. root@xx37-root-fanout:/tmp# sudo nice --20 python pfc_gen_t2.py -p 16 -t 65535 -s 8 -n 1000000 -i Ethernet152 -r 1.76.0.62 Traceback (most recent call last): File "/tmp/pfc_gen_t2.py", line 340, in main() File "/tmp/pfc_gen_t2.py", line 264, in main fo_logger.debug(fo_str + ' sendmmsg got errno ' + str(errno) + ' for socket ' + s.getsockname()) TypeError: can only concatenate str (not "tuple") to str We converted s.getsockname() into str and able to proceed further. Cc: sanjair-git, @rraghav-cisco Signed-off-by: Austin Pham --- tests/common/helpers/pfc_gen_t2.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/common/helpers/pfc_gen_t2.py b/tests/common/helpers/pfc_gen_t2.py index baf292b79ac..04f1d50dd76 100755 --- a/tests/common/helpers/pfc_gen_t2.py +++ b/tests/common/helpers/pfc_gen_t2.py @@ -261,13 +261,14 @@ def main(): num_sent = _sendmmsg(s.fileno(), m_msghdr[0], num_to_send, 0) # direct to c library api if num_sent < 0: errno = get_errno() - fo_logger.debug(fo_str + ' sendmmsg got errno ' + str(errno) + ' for socket ' + s.getsockname()) + fo_logger.debug(fo_str + ' sendmmsg got errno ' + str(errno) + ' for socket ' + + str(s.getsockname())) break else: if num_sent != num_to_send: fo_logger.debug(fo_str + ' sendmmsg iteration ' + str(iters) + ' only sent ' + str(num_sent) + ' out of requested ' + str(num_to_send) + - ' for socket ' + s.getsockname()) + ' for socket ' + str(s.getsockname())) # Count across all sockets total_num_sent += num_sent iters += 1 @@ -302,14 +303,16 @@ def main(): num_sent = _sendmmsg(s.fileno(), m_msghdr[0], num_to_send, 0) if num_sent < 0: errno = get_errno() - fo_logger.debug(fo_str + ' sendmmsg got errno ' + str(errno) + ' for socket ' + s.getsockname()) + fo_logger.debug(fo_str + ' sendmmsg got errno ' + str(errno) + ' for socket ' + + str(s.getsockname())) test_failed = True break else: if num_sent != num_to_send: fo_logger.debug(fo_str + ' sendmmsg iteration ' + str(iters) + ' only sent ' + str(num_sent) + - ' out of requested ' + str(num_to_send) + ' for socket ' + s.getsockname()) + ' out of requested ' + str(num_to_send) + ' for socket ' + + str(s.getsockname())) total_pkts_remaining[index] -= num_sent total_pkts_sent[index] += num_sent if total_pkts_remaining[index] <= 0: From 51520037fe15e497f0e7db0630fc3456535ade04 Mon Sep 17 00:00:00 2001 From: "Austin (Thang Pham)" Date: Thu, 14 Nov 2024 14:45:24 +1100 Subject: [PATCH 092/175] fix: fix flaky pfc_storm (#15544) Description of PR Summary: Fix flaky pfc_storm stom_restored Fixes # (issue) 30115860 Approach What is the motivation for this PR? Currently we detect flaky in detecting storm restore. The reason was because the storm terminated early and restore itself before LogAnalyzer can detect restoration. As a result, we want to keep this to be stormed long enough. After the end of each test case, we have stop_storm so it would be fine. Signed-off-by: Austin Pham --- tests/pfcwd/test_pfcwd_function.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/pfcwd/test_pfcwd_function.py b/tests/pfcwd/test_pfcwd_function.py index 92c5d12e015..22a082b4fde 100644 --- a/tests/pfcwd/test_pfcwd_function.py +++ b/tests/pfcwd/test_pfcwd_function.py @@ -464,7 +464,9 @@ def storm_setup(self, init=False, detect=True): if self.dut.topo_type == 't2' and self.fanout[self.peer_device].os == 'sonic': gen_file = 'pfc_gen_t2.py' - pfc_send_time = 60 + # We want to set the timer to be high here to keep the storm long enough for manual termination + # in the test instead of having it terminated by itself + pfc_send_time = 240 else: gen_file = 'pfc_gen.py' pfc_send_time = None From 1e6d920e5cb95aba85177ca5aac4faca1f7e0822 Mon Sep 17 00:00:00 2001 From: Justin Wong <51811017+justin-wong-ce@users.noreply.github.com> Date: Wed, 13 Nov 2024 20:21:08 -0800 Subject: [PATCH 093/175] Add missing skip conditions for hash/test_generic_hash.py tests for broadcom asics (#15211) Summary: Add missing skip condition for one of the test cases in hash/test_generic_hash.py Continuation of #15091 --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 05eecad5dc5..14f6b68bc0c 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -918,9 +918,9 @@ hash/test_generic_hash.py::test_ecmp_and_lag_hash: hash/test_generic_hash.py::test_ecmp_and_lag_hash[CRC-INNER_IP_PROTOCOL: skip: - reason: "On Mellanox platforms, due to HW limitation, it would not support CRC algorithm on INNER_IP_PROTOCOL field" + reason: "On Mellanox platforms, due to HW limitation, it would not support CRC algorithm on INNER_IP_PROTOCOL field. For broadcom, ECMP hash is not supported in broadcom SAI." conditions: - - "asic_type in ['mellanox']" + - "asic_type in ['broadcom', 'mellanox']" hash/test_generic_hash.py::test_ecmp_hash: skip: From 14f20261e06ed8247eb71eac499c25f5e551d072 Mon Sep 17 00:00:00 2001 From: Riff Date: Wed, 13 Nov 2024 22:43:40 -0800 Subject: [PATCH 094/175] Update topo generator and add the topology for 96 downlinks, 32 uplinks and 2 peer links. (#15454) What is the motivation for this PR? Creating topology can be tedious when the number of ports becomes large. Setting hundreds of VM configurations manually are not efficient. How did you do it? This PR updates the topology generator python script to support T0 topology generation with specific downlinks, uplinks and peer links. Besides, it also creates an example topology t0-isolated-d96u32s2.yml, so we can unblock the device testing with 96 downlinks, 32 uplinks and 2 peer links. How did you verify/test it? --- ansible/generate_topo.py | 115 +- ansible/templates/topo_t0-isolated.j2 | 70 ++ ansible/templates/topo_t1-isolated.j2 | 7 +- ansible/vars/topo_t0-isolated-d96u32s2.yml | 948 ++++++++++++++++ ansible/vars/topo_t1-isolated-d128.yml | 640 ++++++----- ansible/vars/topo_t1-isolated-d224u8.yml | 1160 ++++++++++++-------- 6 files changed, 2196 insertions(+), 744 deletions(-) create mode 100644 ansible/templates/topo_t0-isolated.j2 create mode 100644 ansible/vars/topo_t0-isolated-d96u32s2.yml diff --git a/ansible/generate_topo.py b/ansible/generate_topo.py index b78b15bf724..b340e028e32 100755 --- a/ansible/generate_topo.py +++ b/ansible/generate_topo.py @@ -1,7 +1,8 @@ #!/usr/bin/env python3 -from typing import Any, Dict, List -import ipaddress +import copy +from typing import Any, Dict, List, Tuple +from ipaddress import IPv4Network, IPv6Network import click import jinja2 @@ -22,24 +23,31 @@ } +vlan_group_cfgs = [ + {"name": "one_vlan_a", "vlan_count": 1, "v4_prefix": "192.168.0.0/21", "v6_prefix": "fc02:1000::0/64"}, + {"name": "two_vlan_a", "vlan_count": 2, "v4_prefix": "192.168.0.0/22", "v6_prefix": "fc02:100::0/64"}, + {"name": "four_vlan_a", "vlan_count": 4, "v4_prefix": "192.168.0.0/22", "v6_prefix": "fc02:100::0/64"}, +] + + # Utility functions to calculate IP addresses def calc_ipv4_pair(subnet_str, port_id): - subnet = ipaddress.IPv4Network(subnet_str) + subnet = IPv4Network(subnet_str) return (str(subnet.network_address + 2*port_id), str(subnet.network_address + 2*port_id + 1)) def calc_ipv6_pair(subnet_str, port_id): - subnet = ipaddress.IPv6Network(subnet_str) + subnet = IPv6Network(subnet_str) return (str(subnet.network_address + 4*port_id+1), str(subnet.network_address + 4*port_id + 2)) def calc_ipv4(subnet_str, port_id): - subnet = ipaddress.IPv4Network(subnet_str) + subnet = IPv4Network(subnet_str) return str(subnet.network_address + port_id) def calc_ipv6(subnet_str, port_id): - subnet = ipaddress.IPv6Network(subnet_str) + subnet = IPv6Network(subnet_str) return str(subnet.network_address + port_id) @@ -72,7 +80,7 @@ def __init__(self, self.dut_intf_ipv4, self.pc_intf_ipv4 = calc_ipv4_pair("10.0.0.0", self.ip_offset) self.dut_intf_ipv6, self.pc_intf_ipv6 = calc_ipv6_pair("FC00::", self.ip_offset) self.loopback_ipv4 = calc_ipv4("100.1.0.0", self.ip_offset+1) - self.loopback_ipv6 = calc_ipv6("2064:100::", self.ip_offset+1) + self.loopback_ipv6 = calc_ipv6("2064:100::", (self.ip_offset+1) * 2**64) # Backplane IPs will go with the VM ID self.bp_ipv4 = calc_ipv4("10.10.246.1", self.vm_offset+1) @@ -85,7 +93,50 @@ def __init__(self, port_id: int): self.port_id = port_id -def generate_topo(role: str, port_count: int, uplink_ports: List[int], peer_ports: List[int]): +class Vlan: + """ Class to represent a VLAN in the topology """ + def __init__(self, + vlan_id: int, + hostifs: List[HostInterface], + v4_prefix: IPv4Network, + v6_prefix: IPv6Network): + + self.id = vlan_id + self.intfs = hostifs + self.port_ids = [hostif.port_id for hostif in hostifs] + self.v4_prefix = copy.deepcopy(v4_prefix) + self.v4_prefix.network_address += 1 + self.v6_prefix = copy.deepcopy(v6_prefix) + self.v6_prefix.network_address += 1 + + +class VlanGroup: + """ Class to represent a group of VLANs in the topology """ + def __init__(self, name: str, vlan_count: int, hostifs: List[HostInterface], v4_prefix: str, v6_prefix: str): + self.name = name + self.vlans = [] + + # Split host if into the number of VLANs + hostif_count_per_vlan = len(hostifs) // vlan_count + hostif_groups = [hostifs[i*hostif_count_per_vlan:(i+1)*hostif_count_per_vlan] for i in range(vlan_count)] + + v4_prefix = IPv4Network(v4_prefix) + v6_prefix = IPv6Network(v6_prefix) + for vlan_index in range(len(hostif_groups)): + vlan = Vlan(1000 + vlan_index * 100, hostif_groups[vlan_index], v4_prefix, v6_prefix) + self.vlans.append(vlan) + + # Move to next subnet based on the prefix length + v4_prefix.network_address += 2**(32 - v4_prefix.prefixlen) + v6_prefix.network_address += 2**96 + + +def generate_topo(role: str, + port_count: int, + uplink_ports: List[int], + peer_ports: List[int] + ) -> Tuple[List[VM], List[HostInterface]]: + dut_role_cfg = roles_cfg[role] vm_list = [] @@ -131,10 +182,25 @@ def generate_topo(role: str, port_count: int, uplink_ports: List[int], peer_port return vm_list, hostif_list -def generate_topo_file_content(role: str, - template_file: str, - vm_list: List[VM], - hostif_list: List[HostInterface]): +def generate_vlan_groups(hostif_list: List[HostInterface]) -> List[VlanGroup]: + if len(hostif_list) == 0: + return [] + + vlan_groups = [] + for vlan_group_cfg in vlan_group_cfgs: + vlan_group = VlanGroup(vlan_group_cfg["name"], vlan_group_cfg["vlan_count"], hostif_list, + vlan_group_cfg["v4_prefix"], vlan_group_cfg["v6_prefix"]) + vlan_groups.append(vlan_group) + + return vlan_groups + + +def generate_topo_file(role: str, + template_file: str, + vm_list: List[VM], + hostif_list: List[HostInterface], + vlan_group_list: List[VlanGroup] + ) -> str: with open(template_file) as f: template = jinja2.Template(f.read()) @@ -142,17 +208,18 @@ def generate_topo_file_content(role: str, output = template.render(role=role, dut=roles_cfg[role], vm_list=vm_list, - hostif_list=hostif_list) + hostif_list=hostif_list, + vlan_group_list=vlan_group_list) return output -def output_topo_file(role: str, - keyword: str, - downlink_port_count: int, - uplink_port_count: int, - peer_port_count: int, - file_content: str): +def write_topo_file(role: str, + keyword: str, + downlink_port_count: int, + uplink_port_count: int, + peer_port_count: int, + file_content: str): downlink_keyword = f"d{downlink_port_count}" if downlink_port_count > 0 else "" uplink_keyword = f"u{uplink_port_count}" if uplink_port_count > 0 else "" peer_keyword = f"s{peer_port_count}" if peer_port_count > 0 else "" @@ -166,7 +233,7 @@ def output_topo_file(role: str, @click.command() -@click.option("--role", "-r", required=True, type=click.Choice(['t1']), help="Role of the device") +@click.option("--role", "-r", required=True, type=click.Choice(['t0', 't1']), help="Role of the device") @click.option("--keyword", "-k", required=True, type=str, help="Keyword for the topology file") @click.option("--template", "-t", required=True, type=str, help="Path to the Jinja template file") @click.option("--port-count", "-c", required=True, type=int, help="Number of ports on the device") @@ -180,14 +247,16 @@ def main(role: str, keyword: str, template: str, port_count: int, uplinks: str, Examples (in the ansible directory): - ./generate_topo.py -r t1 -k isolated -t t1-isolated -c 128 - ./generate_topo.py -r t1 -k isolated -t t1-isolated -c 232 -u 48,49,58,59,164,165,174,175 + - ./generate_topo.py -r t0 -k isolated -t t0-isolated -c 130 -p 128,129 -u 25,26,27,28,29,30,31,32 """ uplink_ports = [int(port) for port in uplinks.split(",")] if uplinks != "" else [] peer_ports = [int(port) for port in peers.split(",")] if peers != "" else [] vm_list, hostif_list = generate_topo(role, port_count, uplink_ports, peer_ports) - file_content = generate_topo_file_content(role, f"templates/topo_{template}.j2", vm_list, hostif_list) - output_topo_file(role, keyword, port_count - len(uplink_ports) - len(peer_ports), len(uplink_ports), - len(peer_ports), file_content) + vlan_group_list = generate_vlan_groups(hostif_list) + file_content = generate_topo_file(role, f"templates/topo_{template}.j2", vm_list, hostif_list, vlan_group_list) + write_topo_file(role, keyword, port_count - len(uplink_ports) - len(peer_ports), len(uplink_ports), + len(peer_ports), file_content) if __name__ == "__main__": diff --git a/ansible/templates/topo_t0-isolated.j2 b/ansible/templates/topo_t0-isolated.j2 new file mode 100644 index 00000000000..4794c9a1a64 --- /dev/null +++ b/ansible/templates/topo_t0-isolated.j2 @@ -0,0 +1,70 @@ +topology: + host_interfaces: +{%- for hostif in hostif_list %} + - {{ hostif.port_id }} +{%- endfor %} +{%- if vm_list | length == 0 %} + VMs: {} +{%- else %} + VMs: + {%- for vm in vm_list %} + {{ vm.name }}: + vlans: + - {{ vm.vlans[0] }} + vm_offset: {{ vm.vm_offset }} + {%- endfor %} +{%- endif %} + DUT: + vlan_configs: + default_vlan_config: {{ vlan_group_list[0].name }} +{%- for vlan_group in vlan_group_list %} + {{ vlan_group.name }}: + {%- for vlan in vlan_group.vlans %} + Vlan{{ vlan.id }}: + id: {{ vlan.id }} + intfs: {{ vlan.port_ids }} + prefix: {{ vlan.v4_prefix }} + prefix_v6: {{ vlan.v6_prefix }} + tag: {{ vlan.id }} + {%- endfor %} +{%- endfor %} + +configuration_properties: + common: + dut_asn: {{ dut.asn }} + dut_type: ToRRouter + swrole: leaf + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 65534 + leaf_asn_start: 64600 + tor_asn_start: 65500 + failure_rate: 0 + +configuration: +{%- for vm in vm_list %} + {{vm.name}}: + properties: + - common + bgp: + asn: {{vm.asn}} + peers: + {{vm.peer_asn}}: + - {{vm.dut_intf_ipv4}} + - {{vm.dut_intf_ipv6}} + interfaces: + Loopback0: + ipv4: {{vm.loopback_ipv4}}/32 + ipv6: {{vm.loopback_ipv6}}/128 + Ethernet1: + ipv4: {{vm.pc_intf_ipv4}}/31 + ipv6: {{vm.pc_intf_ipv6}}/126 + bp_interface: + ipv4: {{vm.bp_ipv4}}/24 + ipv6: {{vm.bp_ipv6}}/64 +{%- endfor %} diff --git a/ansible/templates/topo_t1-isolated.j2 b/ansible/templates/topo_t1-isolated.j2 index 0c58680063d..4f03b9eeba1 100644 --- a/ansible/templates/topo_t1-isolated.j2 +++ b/ansible/templates/topo_t1-isolated.j2 @@ -28,6 +28,11 @@ configuration: {{vm.name}}: properties: - common + {%- if vm.role == 't0' %} + - tor + {%- elif vm.role == 't2' %} + - spine + {%- endif %} bgp: asn: {{vm.asn}} peers: @@ -41,7 +46,7 @@ configuration: Ethernet1: ipv4: {{vm.pc_intf_ipv4}}/31 ipv6: {{vm.pc_intf_ipv6}}/126 - bp_interfaces: + bp_interface: ipv4: {{vm.bp_ipv4}}/24 ipv6: {{vm.bp_ipv6}}/64 {%- endfor %} diff --git a/ansible/vars/topo_t0-isolated-d96u32s2.yml b/ansible/vars/topo_t0-isolated-d96u32s2.yml new file mode 100644 index 00000000000..24f1a4d2040 --- /dev/null +++ b/ansible/vars/topo_t0-isolated-d96u32s2.yml @@ -0,0 +1,948 @@ +topology: + host_interfaces: + - 0 + - 1 + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + - 11 + - 12 + - 13 + - 14 + - 15 + - 16 + - 17 + - 18 + - 19 + - 20 + - 21 + - 22 + - 23 + - 24 + - 41 + - 42 + - 43 + - 44 + - 45 + - 46 + - 47 + - 48 + - 49 + - 50 + - 51 + - 52 + - 53 + - 54 + - 55 + - 56 + - 57 + - 58 + - 59 + - 60 + - 61 + - 62 + - 63 + - 64 + - 65 + - 66 + - 67 + - 68 + - 69 + - 70 + - 71 + - 72 + - 73 + - 74 + - 75 + - 76 + - 77 + - 78 + - 79 + - 80 + - 81 + - 82 + - 83 + - 84 + - 85 + - 86 + - 87 + - 88 + - 105 + - 106 + - 107 + - 108 + - 109 + - 110 + - 111 + - 112 + - 113 + - 114 + - 115 + - 116 + - 117 + - 118 + - 119 + - 120 + - 121 + - 122 + - 123 + - 124 + - 125 + - 126 + - 127 + VMs: + ARISTA01T1: + vlans: + - 25 + vm_offset: 0 + ARISTA02T1: + vlans: + - 26 + vm_offset: 1 + ARISTA03T1: + vlans: + - 27 + vm_offset: 2 + ARISTA04T1: + vlans: + - 28 + vm_offset: 3 + ARISTA05T1: + vlans: + - 29 + vm_offset: 4 + ARISTA06T1: + vlans: + - 30 + vm_offset: 5 + ARISTA07T1: + vlans: + - 31 + vm_offset: 6 + ARISTA08T1: + vlans: + - 32 + vm_offset: 7 + ARISTA09T1: + vlans: + - 33 + vm_offset: 8 + ARISTA10T1: + vlans: + - 34 + vm_offset: 9 + ARISTA11T1: + vlans: + - 35 + vm_offset: 10 + ARISTA12T1: + vlans: + - 36 + vm_offset: 11 + ARISTA13T1: + vlans: + - 37 + vm_offset: 12 + ARISTA14T1: + vlans: + - 38 + vm_offset: 13 + ARISTA15T1: + vlans: + - 39 + vm_offset: 14 + ARISTA16T1: + vlans: + - 40 + vm_offset: 15 + ARISTA17T1: + vlans: + - 89 + vm_offset: 16 + ARISTA18T1: + vlans: + - 90 + vm_offset: 17 + ARISTA19T1: + vlans: + - 91 + vm_offset: 18 + ARISTA20T1: + vlans: + - 92 + vm_offset: 19 + ARISTA21T1: + vlans: + - 93 + vm_offset: 20 + ARISTA22T1: + vlans: + - 94 + vm_offset: 21 + ARISTA23T1: + vlans: + - 95 + vm_offset: 22 + ARISTA24T1: + vlans: + - 96 + vm_offset: 23 + ARISTA25T1: + vlans: + - 97 + vm_offset: 24 + ARISTA26T1: + vlans: + - 98 + vm_offset: 25 + ARISTA27T1: + vlans: + - 99 + vm_offset: 26 + ARISTA28T1: + vlans: + - 100 + vm_offset: 27 + ARISTA29T1: + vlans: + - 101 + vm_offset: 28 + ARISTA30T1: + vlans: + - 102 + vm_offset: 29 + ARISTA31T1: + vlans: + - 103 + vm_offset: 30 + ARISTA32T1: + vlans: + - 104 + vm_offset: 31 + ARISTA01PT0: + vlans: + - 128 + vm_offset: 32 + ARISTA02PT0: + vlans: + - 129 + vm_offset: 33 + DUT: + vlan_configs: + default_vlan_config: one_vlan_a + one_vlan_a: + Vlan1000: + id: 1000 + intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127] + prefix: 192.168.0.1/21 + prefix_v6: fc02:1000::1/64 + tag: 1000 + two_vlan_a: + Vlan1000: + id: 1000 + intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63] + prefix: 192.168.0.1/22 + prefix_v6: fc02:100::1/64 + tag: 1000 + Vlan1100: + id: 1100 + intfs: [64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127] + prefix: 192.168.4.1/22 + prefix_v6: fc02:101::1/64 + tag: 1100 + four_vlan_a: + Vlan1000: + id: 1000 + intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + prefix: 192.168.0.1/22 + prefix_v6: fc02:100::1/64 + tag: 1000 + Vlan1100: + id: 1100 + intfs: [24, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63] + prefix: 192.168.4.1/22 + prefix_v6: fc02:101::1/64 + tag: 1100 + Vlan1200: + id: 1200 + intfs: [64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87] + prefix: 192.168.8.1/22 + prefix_v6: fc02:102::1/64 + tag: 1200 + Vlan1300: + id: 1300 + intfs: [88, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127] + prefix: 192.168.12.1/22 + prefix_v6: fc02:103::1/64 + tag: 1300 + +configuration_properties: + common: + dut_asn: 65100 + dut_type: ToRRouter + swrole: leaf + nhipv4: 10.10.246.254 + nhipv6: FC0A::FF + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 65534 + leaf_asn_start: 64600 + tor_asn_start: 65500 + failure_rate: 0 + +configuration: + ARISTA01T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.0 + - fc00::1 + interfaces: + Loopback0: + ipv4: 100.1.0.1/32 + ipv6: 2064:100:0:1::/128 + Ethernet1: + ipv4: 10.0.0.1/31 + ipv6: fc00::2/126 + bp_interface: + ipv4: 10.10.246.2/24 + ipv6: fc0a::2/64 + ARISTA02T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.2 + - fc00::5 + interfaces: + Loopback0: + ipv4: 100.1.0.2/32 + ipv6: 2064:100:0:2::/128 + Ethernet1: + ipv4: 10.0.0.3/31 + ipv6: fc00::6/126 + bp_interface: + ipv4: 10.10.246.3/24 + ipv6: fc0a::3/64 + ARISTA03T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.4 + - fc00::9 + interfaces: + Loopback0: + ipv4: 100.1.0.3/32 + ipv6: 2064:100:0:3::/128 + Ethernet1: + ipv4: 10.0.0.5/31 + ipv6: fc00::a/126 + bp_interface: + ipv4: 10.10.246.4/24 + ipv6: fc0a::4/64 + ARISTA04T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.6 + - fc00::d + interfaces: + Loopback0: + ipv4: 100.1.0.4/32 + ipv6: 2064:100:0:4::/128 + Ethernet1: + ipv4: 10.0.0.7/31 + ipv6: fc00::e/126 + bp_interface: + ipv4: 10.10.246.5/24 + ipv6: fc0a::5/64 + ARISTA05T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.8 + - fc00::11 + interfaces: + Loopback0: + ipv4: 100.1.0.5/32 + ipv6: 2064:100:0:5::/128 + Ethernet1: + ipv4: 10.0.0.9/31 + ipv6: fc00::12/126 + bp_interface: + ipv4: 10.10.246.6/24 + ipv6: fc0a::6/64 + ARISTA06T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.10 + - fc00::15 + interfaces: + Loopback0: + ipv4: 100.1.0.6/32 + ipv6: 2064:100:0:6::/128 + Ethernet1: + ipv4: 10.0.0.11/31 + ipv6: fc00::16/126 + bp_interface: + ipv4: 10.10.246.7/24 + ipv6: fc0a::7/64 + ARISTA07T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.12 + - fc00::19 + interfaces: + Loopback0: + ipv4: 100.1.0.7/32 + ipv6: 2064:100:0:7::/128 + Ethernet1: + ipv4: 10.0.0.13/31 + ipv6: fc00::1a/126 + bp_interface: + ipv4: 10.10.246.8/24 + ipv6: fc0a::8/64 + ARISTA08T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.14 + - fc00::1d + interfaces: + Loopback0: + ipv4: 100.1.0.8/32 + ipv6: 2064:100:0:8::/128 + Ethernet1: + ipv4: 10.0.0.15/31 + ipv6: fc00::1e/126 + bp_interface: + ipv4: 10.10.246.9/24 + ipv6: fc0a::9/64 + ARISTA09T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.16 + - fc00::21 + interfaces: + Loopback0: + ipv4: 100.1.0.9/32 + ipv6: 2064:100:0:9::/128 + Ethernet1: + ipv4: 10.0.0.17/31 + ipv6: fc00::22/126 + bp_interface: + ipv4: 10.10.246.10/24 + ipv6: fc0a::a/64 + ARISTA10T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.18 + - fc00::25 + interfaces: + Loopback0: + ipv4: 100.1.0.10/32 + ipv6: 2064:100:0:a::/128 + Ethernet1: + ipv4: 10.0.0.19/31 + ipv6: fc00::26/126 + bp_interface: + ipv4: 10.10.246.11/24 + ipv6: fc0a::b/64 + ARISTA11T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.20 + - fc00::29 + interfaces: + Loopback0: + ipv4: 100.1.0.11/32 + ipv6: 2064:100:0:b::/128 + Ethernet1: + ipv4: 10.0.0.21/31 + ipv6: fc00::2a/126 + bp_interface: + ipv4: 10.10.246.12/24 + ipv6: fc0a::c/64 + ARISTA12T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.22 + - fc00::2d + interfaces: + Loopback0: + ipv4: 100.1.0.12/32 + ipv6: 2064:100:0:c::/128 + Ethernet1: + ipv4: 10.0.0.23/31 + ipv6: fc00::2e/126 + bp_interface: + ipv4: 10.10.246.13/24 + ipv6: fc0a::d/64 + ARISTA13T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.24 + - fc00::31 + interfaces: + Loopback0: + ipv4: 100.1.0.13/32 + ipv6: 2064:100:0:d::/128 + Ethernet1: + ipv4: 10.0.0.25/31 + ipv6: fc00::32/126 + bp_interface: + ipv4: 10.10.246.14/24 + ipv6: fc0a::e/64 + ARISTA14T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.26 + - fc00::35 + interfaces: + Loopback0: + ipv4: 100.1.0.14/32 + ipv6: 2064:100:0:e::/128 + Ethernet1: + ipv4: 10.0.0.27/31 + ipv6: fc00::36/126 + bp_interface: + ipv4: 10.10.246.15/24 + ipv6: fc0a::f/64 + ARISTA15T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.28 + - fc00::39 + interfaces: + Loopback0: + ipv4: 100.1.0.15/32 + ipv6: 2064:100:0:f::/128 + Ethernet1: + ipv4: 10.0.0.29/31 + ipv6: fc00::3a/126 + bp_interface: + ipv4: 10.10.246.16/24 + ipv6: fc0a::10/64 + ARISTA16T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.30 + - fc00::3d + interfaces: + Loopback0: + ipv4: 100.1.0.16/32 + ipv6: 2064:100:0:10::/128 + Ethernet1: + ipv4: 10.0.0.31/31 + ipv6: fc00::3e/126 + bp_interface: + ipv4: 10.10.246.17/24 + ipv6: fc0a::11/64 + ARISTA17T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.32 + - fc00::41 + interfaces: + Loopback0: + ipv4: 100.1.0.17/32 + ipv6: 2064:100:0:11::/128 + Ethernet1: + ipv4: 10.0.0.33/31 + ipv6: fc00::42/126 + bp_interface: + ipv4: 10.10.246.18/24 + ipv6: fc0a::12/64 + ARISTA18T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.34 + - fc00::45 + interfaces: + Loopback0: + ipv4: 100.1.0.18/32 + ipv6: 2064:100:0:12::/128 + Ethernet1: + ipv4: 10.0.0.35/31 + ipv6: fc00::46/126 + bp_interface: + ipv4: 10.10.246.19/24 + ipv6: fc0a::13/64 + ARISTA19T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.36 + - fc00::49 + interfaces: + Loopback0: + ipv4: 100.1.0.19/32 + ipv6: 2064:100:0:13::/128 + Ethernet1: + ipv4: 10.0.0.37/31 + ipv6: fc00::4a/126 + bp_interface: + ipv4: 10.10.246.20/24 + ipv6: fc0a::14/64 + ARISTA20T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.38 + - fc00::4d + interfaces: + Loopback0: + ipv4: 100.1.0.20/32 + ipv6: 2064:100:0:14::/128 + Ethernet1: + ipv4: 10.0.0.39/31 + ipv6: fc00::4e/126 + bp_interface: + ipv4: 10.10.246.21/24 + ipv6: fc0a::15/64 + ARISTA21T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.40 + - fc00::51 + interfaces: + Loopback0: + ipv4: 100.1.0.21/32 + ipv6: 2064:100:0:15::/128 + Ethernet1: + ipv4: 10.0.0.41/31 + ipv6: fc00::52/126 + bp_interface: + ipv4: 10.10.246.22/24 + ipv6: fc0a::16/64 + ARISTA22T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.42 + - fc00::55 + interfaces: + Loopback0: + ipv4: 100.1.0.22/32 + ipv6: 2064:100:0:16::/128 + Ethernet1: + ipv4: 10.0.0.43/31 + ipv6: fc00::56/126 + bp_interface: + ipv4: 10.10.246.23/24 + ipv6: fc0a::17/64 + ARISTA23T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.44 + - fc00::59 + interfaces: + Loopback0: + ipv4: 100.1.0.23/32 + ipv6: 2064:100:0:17::/128 + Ethernet1: + ipv4: 10.0.0.45/31 + ipv6: fc00::5a/126 + bp_interface: + ipv4: 10.10.246.24/24 + ipv6: fc0a::18/64 + ARISTA24T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.46 + - fc00::5d + interfaces: + Loopback0: + ipv4: 100.1.0.24/32 + ipv6: 2064:100:0:18::/128 + Ethernet1: + ipv4: 10.0.0.47/31 + ipv6: fc00::5e/126 + bp_interface: + ipv4: 10.10.246.25/24 + ipv6: fc0a::19/64 + ARISTA25T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.48 + - fc00::61 + interfaces: + Loopback0: + ipv4: 100.1.0.25/32 + ipv6: 2064:100:0:19::/128 + Ethernet1: + ipv4: 10.0.0.49/31 + ipv6: fc00::62/126 + bp_interface: + ipv4: 10.10.246.26/24 + ipv6: fc0a::1a/64 + ARISTA26T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.50 + - fc00::65 + interfaces: + Loopback0: + ipv4: 100.1.0.26/32 + ipv6: 2064:100:0:1a::/128 + Ethernet1: + ipv4: 10.0.0.51/31 + ipv6: fc00::66/126 + bp_interface: + ipv4: 10.10.246.27/24 + ipv6: fc0a::1b/64 + ARISTA27T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.52 + - fc00::69 + interfaces: + Loopback0: + ipv4: 100.1.0.27/32 + ipv6: 2064:100:0:1b::/128 + Ethernet1: + ipv4: 10.0.0.53/31 + ipv6: fc00::6a/126 + bp_interface: + ipv4: 10.10.246.28/24 + ipv6: fc0a::1c/64 + ARISTA28T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.54 + - fc00::6d + interfaces: + Loopback0: + ipv4: 100.1.0.28/32 + ipv6: 2064:100:0:1c::/128 + Ethernet1: + ipv4: 10.0.0.55/31 + ipv6: fc00::6e/126 + bp_interface: + ipv4: 10.10.246.29/24 + ipv6: fc0a::1d/64 + ARISTA29T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.56 + - fc00::71 + interfaces: + Loopback0: + ipv4: 100.1.0.29/32 + ipv6: 2064:100:0:1d::/128 + Ethernet1: + ipv4: 10.0.0.57/31 + ipv6: fc00::72/126 + bp_interface: + ipv4: 10.10.246.30/24 + ipv6: fc0a::1e/64 + ARISTA30T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.58 + - fc00::75 + interfaces: + Loopback0: + ipv4: 100.1.0.30/32 + ipv6: 2064:100:0:1e::/128 + Ethernet1: + ipv4: 10.0.0.59/31 + ipv6: fc00::76/126 + bp_interface: + ipv4: 10.10.246.31/24 + ipv6: fc0a::1f/64 + ARISTA31T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.60 + - fc00::79 + interfaces: + Loopback0: + ipv4: 100.1.0.31/32 + ipv6: 2064:100:0:1f::/128 + Ethernet1: + ipv4: 10.0.0.61/31 + ipv6: fc00::7a/126 + bp_interface: + ipv4: 10.10.246.32/24 + ipv6: fc0a::20/64 + ARISTA32T1: + properties: + - common + bgp: + asn: 64600 + peers: + 65100: + - 10.0.0.62 + - fc00::7d + interfaces: + Loopback0: + ipv4: 100.1.0.32/32 + ipv6: 2064:100:0:20::/128 + Ethernet1: + ipv4: 10.0.0.63/31 + ipv6: fc00::7e/126 + bp_interface: + ipv4: 10.10.246.33/24 + ipv6: fc0a::21/64 + ARISTA01PT0: + properties: + - common + bgp: + asn: 65100 + peers: + 65100: + - 10.0.0.64 + - fc00::81 + interfaces: + Loopback0: + ipv4: 100.1.0.33/32 + ipv6: 2064:100:0:21::/128 + Ethernet1: + ipv4: 10.0.0.65/31 + ipv6: fc00::82/126 + bp_interface: + ipv4: 10.10.246.34/24 + ipv6: fc0a::22/64 + ARISTA02PT0: + properties: + - common + bgp: + asn: 65100 + peers: + 65100: + - 10.0.0.66 + - fc00::85 + interfaces: + Loopback0: + ipv4: 100.1.0.34/32 + ipv6: 2064:100:0:22::/128 + Ethernet1: + ipv4: 10.0.0.67/31 + ipv6: fc00::86/126 + bp_interface: + ipv4: 10.10.246.35/24 + ipv6: fc0a::23/64 diff --git a/ansible/vars/topo_t1-isolated-d128.yml b/ansible/vars/topo_t1-isolated-d128.yml index 1873728bd2e..536e06a7c1c 100644 --- a/ansible/vars/topo_t1-isolated-d128.yml +++ b/ansible/vars/topo_t1-isolated-d128.yml @@ -533,6 +533,7 @@ configuration: ARISTA01T0: properties: - common + - tor bgp: asn: 64001 peers: @@ -542,16 +543,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.1/32 - ipv6: 2064:100::1/128 + ipv6: 2064:100:0:1::/128 Ethernet1: ipv4: 10.0.0.1/31 ipv6: fc00::2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.2/24 ipv6: fc0a::2/64 ARISTA02T0: properties: - common + - tor bgp: asn: 64002 peers: @@ -561,16 +563,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.2/32 - ipv6: 2064:100::2/128 + ipv6: 2064:100:0:2::/128 Ethernet1: ipv4: 10.0.0.3/31 ipv6: fc00::6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.3/24 ipv6: fc0a::3/64 ARISTA03T0: properties: - common + - tor bgp: asn: 64003 peers: @@ -580,16 +583,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.3/32 - ipv6: 2064:100::3/128 + ipv6: 2064:100:0:3::/128 Ethernet1: ipv4: 10.0.0.5/31 ipv6: fc00::a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.4/24 ipv6: fc0a::4/64 ARISTA04T0: properties: - common + - tor bgp: asn: 64004 peers: @@ -599,16 +603,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.4/32 - ipv6: 2064:100::4/128 + ipv6: 2064:100:0:4::/128 Ethernet1: ipv4: 10.0.0.7/31 ipv6: fc00::e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.5/24 ipv6: fc0a::5/64 ARISTA05T0: properties: - common + - tor bgp: asn: 64005 peers: @@ -618,16 +623,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.5/32 - ipv6: 2064:100::5/128 + ipv6: 2064:100:0:5::/128 Ethernet1: ipv4: 10.0.0.9/31 ipv6: fc00::12/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.6/24 ipv6: fc0a::6/64 ARISTA06T0: properties: - common + - tor bgp: asn: 64006 peers: @@ -637,16 +643,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.6/32 - ipv6: 2064:100::6/128 + ipv6: 2064:100:0:6::/128 Ethernet1: ipv4: 10.0.0.11/31 ipv6: fc00::16/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.7/24 ipv6: fc0a::7/64 ARISTA07T0: properties: - common + - tor bgp: asn: 64007 peers: @@ -656,16 +663,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.7/32 - ipv6: 2064:100::7/128 + ipv6: 2064:100:0:7::/128 Ethernet1: ipv4: 10.0.0.13/31 ipv6: fc00::1a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.8/24 ipv6: fc0a::8/64 ARISTA08T0: properties: - common + - tor bgp: asn: 64008 peers: @@ -675,16 +683,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.8/32 - ipv6: 2064:100::8/128 + ipv6: 2064:100:0:8::/128 Ethernet1: ipv4: 10.0.0.15/31 ipv6: fc00::1e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.9/24 ipv6: fc0a::9/64 ARISTA09T0: properties: - common + - tor bgp: asn: 64009 peers: @@ -694,16 +703,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.9/32 - ipv6: 2064:100::9/128 + ipv6: 2064:100:0:9::/128 Ethernet1: ipv4: 10.0.0.17/31 ipv6: fc00::22/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.10/24 ipv6: fc0a::a/64 ARISTA10T0: properties: - common + - tor bgp: asn: 64010 peers: @@ -713,16 +723,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.10/32 - ipv6: 2064:100::a/128 + ipv6: 2064:100:0:a::/128 Ethernet1: ipv4: 10.0.0.19/31 ipv6: fc00::26/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.11/24 ipv6: fc0a::b/64 ARISTA11T0: properties: - common + - tor bgp: asn: 64011 peers: @@ -732,16 +743,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.11/32 - ipv6: 2064:100::b/128 + ipv6: 2064:100:0:b::/128 Ethernet1: ipv4: 10.0.0.21/31 ipv6: fc00::2a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.12/24 ipv6: fc0a::c/64 ARISTA12T0: properties: - common + - tor bgp: asn: 64012 peers: @@ -751,16 +763,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.12/32 - ipv6: 2064:100::c/128 + ipv6: 2064:100:0:c::/128 Ethernet1: ipv4: 10.0.0.23/31 ipv6: fc00::2e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.13/24 ipv6: fc0a::d/64 ARISTA13T0: properties: - common + - tor bgp: asn: 64013 peers: @@ -770,16 +783,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.13/32 - ipv6: 2064:100::d/128 + ipv6: 2064:100:0:d::/128 Ethernet1: ipv4: 10.0.0.25/31 ipv6: fc00::32/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.14/24 ipv6: fc0a::e/64 ARISTA14T0: properties: - common + - tor bgp: asn: 64014 peers: @@ -789,16 +803,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.14/32 - ipv6: 2064:100::e/128 + ipv6: 2064:100:0:e::/128 Ethernet1: ipv4: 10.0.0.27/31 ipv6: fc00::36/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.15/24 ipv6: fc0a::f/64 ARISTA15T0: properties: - common + - tor bgp: asn: 64015 peers: @@ -808,16 +823,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.15/32 - ipv6: 2064:100::f/128 + ipv6: 2064:100:0:f::/128 Ethernet1: ipv4: 10.0.0.29/31 ipv6: fc00::3a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.16/24 ipv6: fc0a::10/64 ARISTA16T0: properties: - common + - tor bgp: asn: 64016 peers: @@ -827,16 +843,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.16/32 - ipv6: 2064:100::10/128 + ipv6: 2064:100:0:10::/128 Ethernet1: ipv4: 10.0.0.31/31 ipv6: fc00::3e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.17/24 ipv6: fc0a::11/64 ARISTA17T0: properties: - common + - tor bgp: asn: 64017 peers: @@ -846,16 +863,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.17/32 - ipv6: 2064:100::11/128 + ipv6: 2064:100:0:11::/128 Ethernet1: ipv4: 10.0.0.33/31 ipv6: fc00::42/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.18/24 ipv6: fc0a::12/64 ARISTA18T0: properties: - common + - tor bgp: asn: 64018 peers: @@ -865,16 +883,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.18/32 - ipv6: 2064:100::12/128 + ipv6: 2064:100:0:12::/128 Ethernet1: ipv4: 10.0.0.35/31 ipv6: fc00::46/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.19/24 ipv6: fc0a::13/64 ARISTA19T0: properties: - common + - tor bgp: asn: 64019 peers: @@ -884,16 +903,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.19/32 - ipv6: 2064:100::13/128 + ipv6: 2064:100:0:13::/128 Ethernet1: ipv4: 10.0.0.37/31 ipv6: fc00::4a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.20/24 ipv6: fc0a::14/64 ARISTA20T0: properties: - common + - tor bgp: asn: 64020 peers: @@ -903,16 +923,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.20/32 - ipv6: 2064:100::14/128 + ipv6: 2064:100:0:14::/128 Ethernet1: ipv4: 10.0.0.39/31 ipv6: fc00::4e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.21/24 ipv6: fc0a::15/64 ARISTA21T0: properties: - common + - tor bgp: asn: 64021 peers: @@ -922,16 +943,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.21/32 - ipv6: 2064:100::15/128 + ipv6: 2064:100:0:15::/128 Ethernet1: ipv4: 10.0.0.41/31 ipv6: fc00::52/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.22/24 ipv6: fc0a::16/64 ARISTA22T0: properties: - common + - tor bgp: asn: 64022 peers: @@ -941,16 +963,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.22/32 - ipv6: 2064:100::16/128 + ipv6: 2064:100:0:16::/128 Ethernet1: ipv4: 10.0.0.43/31 ipv6: fc00::56/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.23/24 ipv6: fc0a::17/64 ARISTA23T0: properties: - common + - tor bgp: asn: 64023 peers: @@ -960,16 +983,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.23/32 - ipv6: 2064:100::17/128 + ipv6: 2064:100:0:17::/128 Ethernet1: ipv4: 10.0.0.45/31 ipv6: fc00::5a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.24/24 ipv6: fc0a::18/64 ARISTA24T0: properties: - common + - tor bgp: asn: 64024 peers: @@ -979,16 +1003,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.24/32 - ipv6: 2064:100::18/128 + ipv6: 2064:100:0:18::/128 Ethernet1: ipv4: 10.0.0.47/31 ipv6: fc00::5e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.25/24 ipv6: fc0a::19/64 ARISTA25T0: properties: - common + - tor bgp: asn: 64025 peers: @@ -998,16 +1023,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.25/32 - ipv6: 2064:100::19/128 + ipv6: 2064:100:0:19::/128 Ethernet1: ipv4: 10.0.0.49/31 ipv6: fc00::62/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.26/24 ipv6: fc0a::1a/64 ARISTA26T0: properties: - common + - tor bgp: asn: 64026 peers: @@ -1017,16 +1043,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.26/32 - ipv6: 2064:100::1a/128 + ipv6: 2064:100:0:1a::/128 Ethernet1: ipv4: 10.0.0.51/31 ipv6: fc00::66/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.27/24 ipv6: fc0a::1b/64 ARISTA27T0: properties: - common + - tor bgp: asn: 64027 peers: @@ -1036,16 +1063,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.27/32 - ipv6: 2064:100::1b/128 + ipv6: 2064:100:0:1b::/128 Ethernet1: ipv4: 10.0.0.53/31 ipv6: fc00::6a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.28/24 ipv6: fc0a::1c/64 ARISTA28T0: properties: - common + - tor bgp: asn: 64028 peers: @@ -1055,16 +1083,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.28/32 - ipv6: 2064:100::1c/128 + ipv6: 2064:100:0:1c::/128 Ethernet1: ipv4: 10.0.0.55/31 ipv6: fc00::6e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.29/24 ipv6: fc0a::1d/64 ARISTA29T0: properties: - common + - tor bgp: asn: 64029 peers: @@ -1074,16 +1103,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.29/32 - ipv6: 2064:100::1d/128 + ipv6: 2064:100:0:1d::/128 Ethernet1: ipv4: 10.0.0.57/31 ipv6: fc00::72/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.30/24 ipv6: fc0a::1e/64 ARISTA30T0: properties: - common + - tor bgp: asn: 64030 peers: @@ -1093,16 +1123,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.30/32 - ipv6: 2064:100::1e/128 + ipv6: 2064:100:0:1e::/128 Ethernet1: ipv4: 10.0.0.59/31 ipv6: fc00::76/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.31/24 ipv6: fc0a::1f/64 ARISTA31T0: properties: - common + - tor bgp: asn: 64031 peers: @@ -1112,16 +1143,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.31/32 - ipv6: 2064:100::1f/128 + ipv6: 2064:100:0:1f::/128 Ethernet1: ipv4: 10.0.0.61/31 ipv6: fc00::7a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.32/24 ipv6: fc0a::20/64 ARISTA32T0: properties: - common + - tor bgp: asn: 64032 peers: @@ -1131,16 +1163,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.32/32 - ipv6: 2064:100::20/128 + ipv6: 2064:100:0:20::/128 Ethernet1: ipv4: 10.0.0.63/31 ipv6: fc00::7e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.33/24 ipv6: fc0a::21/64 ARISTA33T0: properties: - common + - tor bgp: asn: 64033 peers: @@ -1150,16 +1183,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.33/32 - ipv6: 2064:100::21/128 + ipv6: 2064:100:0:21::/128 Ethernet1: ipv4: 10.0.0.65/31 ipv6: fc00::82/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.34/24 ipv6: fc0a::22/64 ARISTA34T0: properties: - common + - tor bgp: asn: 64034 peers: @@ -1169,16 +1203,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.34/32 - ipv6: 2064:100::22/128 + ipv6: 2064:100:0:22::/128 Ethernet1: ipv4: 10.0.0.67/31 ipv6: fc00::86/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.35/24 ipv6: fc0a::23/64 ARISTA35T0: properties: - common + - tor bgp: asn: 64035 peers: @@ -1188,16 +1223,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.35/32 - ipv6: 2064:100::23/128 + ipv6: 2064:100:0:23::/128 Ethernet1: ipv4: 10.0.0.69/31 ipv6: fc00::8a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.36/24 ipv6: fc0a::24/64 ARISTA36T0: properties: - common + - tor bgp: asn: 64036 peers: @@ -1207,16 +1243,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.36/32 - ipv6: 2064:100::24/128 + ipv6: 2064:100:0:24::/128 Ethernet1: ipv4: 10.0.0.71/31 ipv6: fc00::8e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.37/24 ipv6: fc0a::25/64 ARISTA37T0: properties: - common + - tor bgp: asn: 64037 peers: @@ -1226,16 +1263,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.37/32 - ipv6: 2064:100::25/128 + ipv6: 2064:100:0:25::/128 Ethernet1: ipv4: 10.0.0.73/31 ipv6: fc00::92/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.38/24 ipv6: fc0a::26/64 ARISTA38T0: properties: - common + - tor bgp: asn: 64038 peers: @@ -1245,16 +1283,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.38/32 - ipv6: 2064:100::26/128 + ipv6: 2064:100:0:26::/128 Ethernet1: ipv4: 10.0.0.75/31 ipv6: fc00::96/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.39/24 ipv6: fc0a::27/64 ARISTA39T0: properties: - common + - tor bgp: asn: 64039 peers: @@ -1264,16 +1303,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.39/32 - ipv6: 2064:100::27/128 + ipv6: 2064:100:0:27::/128 Ethernet1: ipv4: 10.0.0.77/31 ipv6: fc00::9a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.40/24 ipv6: fc0a::28/64 ARISTA40T0: properties: - common + - tor bgp: asn: 64040 peers: @@ -1283,16 +1323,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.40/32 - ipv6: 2064:100::28/128 + ipv6: 2064:100:0:28::/128 Ethernet1: ipv4: 10.0.0.79/31 ipv6: fc00::9e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.41/24 ipv6: fc0a::29/64 ARISTA41T0: properties: - common + - tor bgp: asn: 64041 peers: @@ -1302,16 +1343,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.41/32 - ipv6: 2064:100::29/128 + ipv6: 2064:100:0:29::/128 Ethernet1: ipv4: 10.0.0.81/31 ipv6: fc00::a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.42/24 ipv6: fc0a::2a/64 ARISTA42T0: properties: - common + - tor bgp: asn: 64042 peers: @@ -1321,16 +1363,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.42/32 - ipv6: 2064:100::2a/128 + ipv6: 2064:100:0:2a::/128 Ethernet1: ipv4: 10.0.0.83/31 ipv6: fc00::a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.43/24 ipv6: fc0a::2b/64 ARISTA43T0: properties: - common + - tor bgp: asn: 64043 peers: @@ -1340,16 +1383,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.43/32 - ipv6: 2064:100::2b/128 + ipv6: 2064:100:0:2b::/128 Ethernet1: ipv4: 10.0.0.85/31 ipv6: fc00::aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.44/24 ipv6: fc0a::2c/64 ARISTA44T0: properties: - common + - tor bgp: asn: 64044 peers: @@ -1359,16 +1403,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.44/32 - ipv6: 2064:100::2c/128 + ipv6: 2064:100:0:2c::/128 Ethernet1: ipv4: 10.0.0.87/31 ipv6: fc00::ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.45/24 ipv6: fc0a::2d/64 ARISTA45T0: properties: - common + - tor bgp: asn: 64045 peers: @@ -1378,16 +1423,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.45/32 - ipv6: 2064:100::2d/128 + ipv6: 2064:100:0:2d::/128 Ethernet1: ipv4: 10.0.0.89/31 ipv6: fc00::b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.46/24 ipv6: fc0a::2e/64 ARISTA46T0: properties: - common + - tor bgp: asn: 64046 peers: @@ -1397,16 +1443,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.46/32 - ipv6: 2064:100::2e/128 + ipv6: 2064:100:0:2e::/128 Ethernet1: ipv4: 10.0.0.91/31 ipv6: fc00::b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.47/24 ipv6: fc0a::2f/64 ARISTA47T0: properties: - common + - tor bgp: asn: 64047 peers: @@ -1416,16 +1463,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.47/32 - ipv6: 2064:100::2f/128 + ipv6: 2064:100:0:2f::/128 Ethernet1: ipv4: 10.0.0.93/31 ipv6: fc00::ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.48/24 ipv6: fc0a::30/64 ARISTA48T0: properties: - common + - tor bgp: asn: 64048 peers: @@ -1435,16 +1483,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.48/32 - ipv6: 2064:100::30/128 + ipv6: 2064:100:0:30::/128 Ethernet1: ipv4: 10.0.0.95/31 ipv6: fc00::be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.49/24 ipv6: fc0a::31/64 ARISTA49T0: properties: - common + - tor bgp: asn: 64049 peers: @@ -1454,16 +1503,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.49/32 - ipv6: 2064:100::31/128 + ipv6: 2064:100:0:31::/128 Ethernet1: ipv4: 10.0.0.97/31 ipv6: fc00::c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.50/24 ipv6: fc0a::32/64 ARISTA50T0: properties: - common + - tor bgp: asn: 64050 peers: @@ -1473,16 +1523,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.50/32 - ipv6: 2064:100::32/128 + ipv6: 2064:100:0:32::/128 Ethernet1: ipv4: 10.0.0.99/31 ipv6: fc00::c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.51/24 ipv6: fc0a::33/64 ARISTA51T0: properties: - common + - tor bgp: asn: 64051 peers: @@ -1492,16 +1543,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.51/32 - ipv6: 2064:100::33/128 + ipv6: 2064:100:0:33::/128 Ethernet1: ipv4: 10.0.0.101/31 ipv6: fc00::ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.52/24 ipv6: fc0a::34/64 ARISTA52T0: properties: - common + - tor bgp: asn: 64052 peers: @@ -1511,16 +1563,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.52/32 - ipv6: 2064:100::34/128 + ipv6: 2064:100:0:34::/128 Ethernet1: ipv4: 10.0.0.103/31 ipv6: fc00::ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.53/24 ipv6: fc0a::35/64 ARISTA53T0: properties: - common + - tor bgp: asn: 64053 peers: @@ -1530,16 +1583,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.53/32 - ipv6: 2064:100::35/128 + ipv6: 2064:100:0:35::/128 Ethernet1: ipv4: 10.0.0.105/31 ipv6: fc00::d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.54/24 ipv6: fc0a::36/64 ARISTA54T0: properties: - common + - tor bgp: asn: 64054 peers: @@ -1549,16 +1603,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.54/32 - ipv6: 2064:100::36/128 + ipv6: 2064:100:0:36::/128 Ethernet1: ipv4: 10.0.0.107/31 ipv6: fc00::d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.55/24 ipv6: fc0a::37/64 ARISTA55T0: properties: - common + - tor bgp: asn: 64055 peers: @@ -1568,16 +1623,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.55/32 - ipv6: 2064:100::37/128 + ipv6: 2064:100:0:37::/128 Ethernet1: ipv4: 10.0.0.109/31 ipv6: fc00::da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.56/24 ipv6: fc0a::38/64 ARISTA56T0: properties: - common + - tor bgp: asn: 64056 peers: @@ -1587,16 +1643,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.56/32 - ipv6: 2064:100::38/128 + ipv6: 2064:100:0:38::/128 Ethernet1: ipv4: 10.0.0.111/31 ipv6: fc00::de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.57/24 ipv6: fc0a::39/64 ARISTA57T0: properties: - common + - tor bgp: asn: 64057 peers: @@ -1606,16 +1663,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.57/32 - ipv6: 2064:100::39/128 + ipv6: 2064:100:0:39::/128 Ethernet1: ipv4: 10.0.0.113/31 ipv6: fc00::e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.58/24 ipv6: fc0a::3a/64 ARISTA58T0: properties: - common + - tor bgp: asn: 64058 peers: @@ -1625,16 +1683,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.58/32 - ipv6: 2064:100::3a/128 + ipv6: 2064:100:0:3a::/128 Ethernet1: ipv4: 10.0.0.115/31 ipv6: fc00::e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.59/24 ipv6: fc0a::3b/64 ARISTA59T0: properties: - common + - tor bgp: asn: 64059 peers: @@ -1644,16 +1703,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.59/32 - ipv6: 2064:100::3b/128 + ipv6: 2064:100:0:3b::/128 Ethernet1: ipv4: 10.0.0.117/31 ipv6: fc00::ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.60/24 ipv6: fc0a::3c/64 ARISTA60T0: properties: - common + - tor bgp: asn: 64060 peers: @@ -1663,16 +1723,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.60/32 - ipv6: 2064:100::3c/128 + ipv6: 2064:100:0:3c::/128 Ethernet1: ipv4: 10.0.0.119/31 ipv6: fc00::ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.61/24 ipv6: fc0a::3d/64 ARISTA61T0: properties: - common + - tor bgp: asn: 64061 peers: @@ -1682,16 +1743,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.61/32 - ipv6: 2064:100::3d/128 + ipv6: 2064:100:0:3d::/128 Ethernet1: ipv4: 10.0.0.121/31 ipv6: fc00::f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.62/24 ipv6: fc0a::3e/64 ARISTA62T0: properties: - common + - tor bgp: asn: 64062 peers: @@ -1701,16 +1763,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.62/32 - ipv6: 2064:100::3e/128 + ipv6: 2064:100:0:3e::/128 Ethernet1: ipv4: 10.0.0.123/31 ipv6: fc00::f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.63/24 ipv6: fc0a::3f/64 ARISTA63T0: properties: - common + - tor bgp: asn: 64063 peers: @@ -1720,16 +1783,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.63/32 - ipv6: 2064:100::3f/128 + ipv6: 2064:100:0:3f::/128 Ethernet1: ipv4: 10.0.0.125/31 ipv6: fc00::fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.64/24 ipv6: fc0a::40/64 ARISTA64T0: properties: - common + - tor bgp: asn: 64064 peers: @@ -1739,16 +1803,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.64/32 - ipv6: 2064:100::40/128 + ipv6: 2064:100:0:40::/128 Ethernet1: ipv4: 10.0.0.127/31 ipv6: fc00::fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.65/24 ipv6: fc0a::41/64 ARISTA65T0: properties: - common + - tor bgp: asn: 64065 peers: @@ -1758,16 +1823,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.65/32 - ipv6: 2064:100::41/128 + ipv6: 2064:100:0:41::/128 Ethernet1: ipv4: 10.0.0.129/31 ipv6: fc00::102/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.66/24 ipv6: fc0a::42/64 ARISTA66T0: properties: - common + - tor bgp: asn: 64066 peers: @@ -1777,16 +1843,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.66/32 - ipv6: 2064:100::42/128 + ipv6: 2064:100:0:42::/128 Ethernet1: ipv4: 10.0.0.131/31 ipv6: fc00::106/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.67/24 ipv6: fc0a::43/64 ARISTA67T0: properties: - common + - tor bgp: asn: 64067 peers: @@ -1796,16 +1863,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.67/32 - ipv6: 2064:100::43/128 + ipv6: 2064:100:0:43::/128 Ethernet1: ipv4: 10.0.0.133/31 ipv6: fc00::10a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.68/24 ipv6: fc0a::44/64 ARISTA68T0: properties: - common + - tor bgp: asn: 64068 peers: @@ -1815,16 +1883,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.68/32 - ipv6: 2064:100::44/128 + ipv6: 2064:100:0:44::/128 Ethernet1: ipv4: 10.0.0.135/31 ipv6: fc00::10e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.69/24 ipv6: fc0a::45/64 ARISTA69T0: properties: - common + - tor bgp: asn: 64069 peers: @@ -1834,16 +1903,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.69/32 - ipv6: 2064:100::45/128 + ipv6: 2064:100:0:45::/128 Ethernet1: ipv4: 10.0.0.137/31 ipv6: fc00::112/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.70/24 ipv6: fc0a::46/64 ARISTA70T0: properties: - common + - tor bgp: asn: 64070 peers: @@ -1853,16 +1923,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.70/32 - ipv6: 2064:100::46/128 + ipv6: 2064:100:0:46::/128 Ethernet1: ipv4: 10.0.0.139/31 ipv6: fc00::116/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.71/24 ipv6: fc0a::47/64 ARISTA71T0: properties: - common + - tor bgp: asn: 64071 peers: @@ -1872,16 +1943,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.71/32 - ipv6: 2064:100::47/128 + ipv6: 2064:100:0:47::/128 Ethernet1: ipv4: 10.0.0.141/31 ipv6: fc00::11a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.72/24 ipv6: fc0a::48/64 ARISTA72T0: properties: - common + - tor bgp: asn: 64072 peers: @@ -1891,16 +1963,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.72/32 - ipv6: 2064:100::48/128 + ipv6: 2064:100:0:48::/128 Ethernet1: ipv4: 10.0.0.143/31 ipv6: fc00::11e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.73/24 ipv6: fc0a::49/64 ARISTA73T0: properties: - common + - tor bgp: asn: 64073 peers: @@ -1910,16 +1983,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.73/32 - ipv6: 2064:100::49/128 + ipv6: 2064:100:0:49::/128 Ethernet1: ipv4: 10.0.0.145/31 ipv6: fc00::122/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.74/24 ipv6: fc0a::4a/64 ARISTA74T0: properties: - common + - tor bgp: asn: 64074 peers: @@ -1929,16 +2003,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.74/32 - ipv6: 2064:100::4a/128 + ipv6: 2064:100:0:4a::/128 Ethernet1: ipv4: 10.0.0.147/31 ipv6: fc00::126/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.75/24 ipv6: fc0a::4b/64 ARISTA75T0: properties: - common + - tor bgp: asn: 64075 peers: @@ -1948,16 +2023,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.75/32 - ipv6: 2064:100::4b/128 + ipv6: 2064:100:0:4b::/128 Ethernet1: ipv4: 10.0.0.149/31 ipv6: fc00::12a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.76/24 ipv6: fc0a::4c/64 ARISTA76T0: properties: - common + - tor bgp: asn: 64076 peers: @@ -1967,16 +2043,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.76/32 - ipv6: 2064:100::4c/128 + ipv6: 2064:100:0:4c::/128 Ethernet1: ipv4: 10.0.0.151/31 ipv6: fc00::12e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.77/24 ipv6: fc0a::4d/64 ARISTA77T0: properties: - common + - tor bgp: asn: 64077 peers: @@ -1986,16 +2063,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.77/32 - ipv6: 2064:100::4d/128 + ipv6: 2064:100:0:4d::/128 Ethernet1: ipv4: 10.0.0.153/31 ipv6: fc00::132/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.78/24 ipv6: fc0a::4e/64 ARISTA78T0: properties: - common + - tor bgp: asn: 64078 peers: @@ -2005,16 +2083,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.78/32 - ipv6: 2064:100::4e/128 + ipv6: 2064:100:0:4e::/128 Ethernet1: ipv4: 10.0.0.155/31 ipv6: fc00::136/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.79/24 ipv6: fc0a::4f/64 ARISTA79T0: properties: - common + - tor bgp: asn: 64079 peers: @@ -2024,16 +2103,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.79/32 - ipv6: 2064:100::4f/128 + ipv6: 2064:100:0:4f::/128 Ethernet1: ipv4: 10.0.0.157/31 ipv6: fc00::13a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.80/24 ipv6: fc0a::50/64 ARISTA80T0: properties: - common + - tor bgp: asn: 64080 peers: @@ -2043,16 +2123,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.80/32 - ipv6: 2064:100::50/128 + ipv6: 2064:100:0:50::/128 Ethernet1: ipv4: 10.0.0.159/31 ipv6: fc00::13e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.81/24 ipv6: fc0a::51/64 ARISTA81T0: properties: - common + - tor bgp: asn: 64081 peers: @@ -2062,16 +2143,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.81/32 - ipv6: 2064:100::51/128 + ipv6: 2064:100:0:51::/128 Ethernet1: ipv4: 10.0.0.161/31 ipv6: fc00::142/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.82/24 ipv6: fc0a::52/64 ARISTA82T0: properties: - common + - tor bgp: asn: 64082 peers: @@ -2081,16 +2163,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.82/32 - ipv6: 2064:100::52/128 + ipv6: 2064:100:0:52::/128 Ethernet1: ipv4: 10.0.0.163/31 ipv6: fc00::146/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.83/24 ipv6: fc0a::53/64 ARISTA83T0: properties: - common + - tor bgp: asn: 64083 peers: @@ -2100,16 +2183,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.83/32 - ipv6: 2064:100::53/128 + ipv6: 2064:100:0:53::/128 Ethernet1: ipv4: 10.0.0.165/31 ipv6: fc00::14a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.84/24 ipv6: fc0a::54/64 ARISTA84T0: properties: - common + - tor bgp: asn: 64084 peers: @@ -2119,16 +2203,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.84/32 - ipv6: 2064:100::54/128 + ipv6: 2064:100:0:54::/128 Ethernet1: ipv4: 10.0.0.167/31 ipv6: fc00::14e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.85/24 ipv6: fc0a::55/64 ARISTA85T0: properties: - common + - tor bgp: asn: 64085 peers: @@ -2138,16 +2223,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.85/32 - ipv6: 2064:100::55/128 + ipv6: 2064:100:0:55::/128 Ethernet1: ipv4: 10.0.0.169/31 ipv6: fc00::152/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.86/24 ipv6: fc0a::56/64 ARISTA86T0: properties: - common + - tor bgp: asn: 64086 peers: @@ -2157,16 +2243,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.86/32 - ipv6: 2064:100::56/128 + ipv6: 2064:100:0:56::/128 Ethernet1: ipv4: 10.0.0.171/31 ipv6: fc00::156/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.87/24 ipv6: fc0a::57/64 ARISTA87T0: properties: - common + - tor bgp: asn: 64087 peers: @@ -2176,16 +2263,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.87/32 - ipv6: 2064:100::57/128 + ipv6: 2064:100:0:57::/128 Ethernet1: ipv4: 10.0.0.173/31 ipv6: fc00::15a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.88/24 ipv6: fc0a::58/64 ARISTA88T0: properties: - common + - tor bgp: asn: 64088 peers: @@ -2195,16 +2283,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.88/32 - ipv6: 2064:100::58/128 + ipv6: 2064:100:0:58::/128 Ethernet1: ipv4: 10.0.0.175/31 ipv6: fc00::15e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.89/24 ipv6: fc0a::59/64 ARISTA89T0: properties: - common + - tor bgp: asn: 64089 peers: @@ -2214,16 +2303,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.89/32 - ipv6: 2064:100::59/128 + ipv6: 2064:100:0:59::/128 Ethernet1: ipv4: 10.0.0.177/31 ipv6: fc00::162/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.90/24 ipv6: fc0a::5a/64 ARISTA90T0: properties: - common + - tor bgp: asn: 64090 peers: @@ -2233,16 +2323,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.90/32 - ipv6: 2064:100::5a/128 + ipv6: 2064:100:0:5a::/128 Ethernet1: ipv4: 10.0.0.179/31 ipv6: fc00::166/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.91/24 ipv6: fc0a::5b/64 ARISTA91T0: properties: - common + - tor bgp: asn: 64091 peers: @@ -2252,16 +2343,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.91/32 - ipv6: 2064:100::5b/128 + ipv6: 2064:100:0:5b::/128 Ethernet1: ipv4: 10.0.0.181/31 ipv6: fc00::16a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.92/24 ipv6: fc0a::5c/64 ARISTA92T0: properties: - common + - tor bgp: asn: 64092 peers: @@ -2271,16 +2363,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.92/32 - ipv6: 2064:100::5c/128 + ipv6: 2064:100:0:5c::/128 Ethernet1: ipv4: 10.0.0.183/31 ipv6: fc00::16e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.93/24 ipv6: fc0a::5d/64 ARISTA93T0: properties: - common + - tor bgp: asn: 64093 peers: @@ -2290,16 +2383,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.93/32 - ipv6: 2064:100::5d/128 + ipv6: 2064:100:0:5d::/128 Ethernet1: ipv4: 10.0.0.185/31 ipv6: fc00::172/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.94/24 ipv6: fc0a::5e/64 ARISTA94T0: properties: - common + - tor bgp: asn: 64094 peers: @@ -2309,16 +2403,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.94/32 - ipv6: 2064:100::5e/128 + ipv6: 2064:100:0:5e::/128 Ethernet1: ipv4: 10.0.0.187/31 ipv6: fc00::176/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.95/24 ipv6: fc0a::5f/64 ARISTA95T0: properties: - common + - tor bgp: asn: 64095 peers: @@ -2328,16 +2423,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.95/32 - ipv6: 2064:100::5f/128 + ipv6: 2064:100:0:5f::/128 Ethernet1: ipv4: 10.0.0.189/31 ipv6: fc00::17a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.96/24 ipv6: fc0a::60/64 ARISTA96T0: properties: - common + - tor bgp: asn: 64096 peers: @@ -2347,16 +2443,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.96/32 - ipv6: 2064:100::60/128 + ipv6: 2064:100:0:60::/128 Ethernet1: ipv4: 10.0.0.191/31 ipv6: fc00::17e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.97/24 ipv6: fc0a::61/64 ARISTA97T0: properties: - common + - tor bgp: asn: 64097 peers: @@ -2366,16 +2463,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.97/32 - ipv6: 2064:100::61/128 + ipv6: 2064:100:0:61::/128 Ethernet1: ipv4: 10.0.0.193/31 ipv6: fc00::182/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.98/24 ipv6: fc0a::62/64 ARISTA98T0: properties: - common + - tor bgp: asn: 64098 peers: @@ -2385,16 +2483,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.98/32 - ipv6: 2064:100::62/128 + ipv6: 2064:100:0:62::/128 Ethernet1: ipv4: 10.0.0.195/31 ipv6: fc00::186/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.99/24 ipv6: fc0a::63/64 ARISTA99T0: properties: - common + - tor bgp: asn: 64099 peers: @@ -2404,16 +2503,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.99/32 - ipv6: 2064:100::63/128 + ipv6: 2064:100:0:63::/128 Ethernet1: ipv4: 10.0.0.197/31 ipv6: fc00::18a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.100/24 ipv6: fc0a::64/64 ARISTA100T0: properties: - common + - tor bgp: asn: 64100 peers: @@ -2423,16 +2523,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.100/32 - ipv6: 2064:100::64/128 + ipv6: 2064:100:0:64::/128 Ethernet1: ipv4: 10.0.0.199/31 ipv6: fc00::18e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.101/24 ipv6: fc0a::65/64 ARISTA101T0: properties: - common + - tor bgp: asn: 64101 peers: @@ -2442,16 +2543,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.101/32 - ipv6: 2064:100::65/128 + ipv6: 2064:100:0:65::/128 Ethernet1: ipv4: 10.0.0.201/31 ipv6: fc00::192/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.102/24 ipv6: fc0a::66/64 ARISTA102T0: properties: - common + - tor bgp: asn: 64102 peers: @@ -2461,16 +2563,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.102/32 - ipv6: 2064:100::66/128 + ipv6: 2064:100:0:66::/128 Ethernet1: ipv4: 10.0.0.203/31 ipv6: fc00::196/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.103/24 ipv6: fc0a::67/64 ARISTA103T0: properties: - common + - tor bgp: asn: 64103 peers: @@ -2480,16 +2583,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.103/32 - ipv6: 2064:100::67/128 + ipv6: 2064:100:0:67::/128 Ethernet1: ipv4: 10.0.0.205/31 ipv6: fc00::19a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.104/24 ipv6: fc0a::68/64 ARISTA104T0: properties: - common + - tor bgp: asn: 64104 peers: @@ -2499,16 +2603,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.104/32 - ipv6: 2064:100::68/128 + ipv6: 2064:100:0:68::/128 Ethernet1: ipv4: 10.0.0.207/31 ipv6: fc00::19e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.105/24 ipv6: fc0a::69/64 ARISTA105T0: properties: - common + - tor bgp: asn: 64105 peers: @@ -2518,16 +2623,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.105/32 - ipv6: 2064:100::69/128 + ipv6: 2064:100:0:69::/128 Ethernet1: ipv4: 10.0.0.209/31 ipv6: fc00::1a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.106/24 ipv6: fc0a::6a/64 ARISTA106T0: properties: - common + - tor bgp: asn: 64106 peers: @@ -2537,16 +2643,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.106/32 - ipv6: 2064:100::6a/128 + ipv6: 2064:100:0:6a::/128 Ethernet1: ipv4: 10.0.0.211/31 ipv6: fc00::1a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.107/24 ipv6: fc0a::6b/64 ARISTA107T0: properties: - common + - tor bgp: asn: 64107 peers: @@ -2556,16 +2663,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.107/32 - ipv6: 2064:100::6b/128 + ipv6: 2064:100:0:6b::/128 Ethernet1: ipv4: 10.0.0.213/31 ipv6: fc00::1aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.108/24 ipv6: fc0a::6c/64 ARISTA108T0: properties: - common + - tor bgp: asn: 64108 peers: @@ -2575,16 +2683,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.108/32 - ipv6: 2064:100::6c/128 + ipv6: 2064:100:0:6c::/128 Ethernet1: ipv4: 10.0.0.215/31 ipv6: fc00::1ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.109/24 ipv6: fc0a::6d/64 ARISTA109T0: properties: - common + - tor bgp: asn: 64109 peers: @@ -2594,16 +2703,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.109/32 - ipv6: 2064:100::6d/128 + ipv6: 2064:100:0:6d::/128 Ethernet1: ipv4: 10.0.0.217/31 ipv6: fc00::1b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.110/24 ipv6: fc0a::6e/64 ARISTA110T0: properties: - common + - tor bgp: asn: 64110 peers: @@ -2613,16 +2723,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.110/32 - ipv6: 2064:100::6e/128 + ipv6: 2064:100:0:6e::/128 Ethernet1: ipv4: 10.0.0.219/31 ipv6: fc00::1b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.111/24 ipv6: fc0a::6f/64 ARISTA111T0: properties: - common + - tor bgp: asn: 64111 peers: @@ -2632,16 +2743,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.111/32 - ipv6: 2064:100::6f/128 + ipv6: 2064:100:0:6f::/128 Ethernet1: ipv4: 10.0.0.221/31 ipv6: fc00::1ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.112/24 ipv6: fc0a::70/64 ARISTA112T0: properties: - common + - tor bgp: asn: 64112 peers: @@ -2651,16 +2763,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.112/32 - ipv6: 2064:100::70/128 + ipv6: 2064:100:0:70::/128 Ethernet1: ipv4: 10.0.0.223/31 ipv6: fc00::1be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.113/24 ipv6: fc0a::71/64 ARISTA113T0: properties: - common + - tor bgp: asn: 64113 peers: @@ -2670,16 +2783,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.113/32 - ipv6: 2064:100::71/128 + ipv6: 2064:100:0:71::/128 Ethernet1: ipv4: 10.0.0.225/31 ipv6: fc00::1c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.114/24 ipv6: fc0a::72/64 ARISTA114T0: properties: - common + - tor bgp: asn: 64114 peers: @@ -2689,16 +2803,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.114/32 - ipv6: 2064:100::72/128 + ipv6: 2064:100:0:72::/128 Ethernet1: ipv4: 10.0.0.227/31 ipv6: fc00::1c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.115/24 ipv6: fc0a::73/64 ARISTA115T0: properties: - common + - tor bgp: asn: 64115 peers: @@ -2708,16 +2823,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.115/32 - ipv6: 2064:100::73/128 + ipv6: 2064:100:0:73::/128 Ethernet1: ipv4: 10.0.0.229/31 ipv6: fc00::1ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.116/24 ipv6: fc0a::74/64 ARISTA116T0: properties: - common + - tor bgp: asn: 64116 peers: @@ -2727,16 +2843,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.116/32 - ipv6: 2064:100::74/128 + ipv6: 2064:100:0:74::/128 Ethernet1: ipv4: 10.0.0.231/31 ipv6: fc00::1ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.117/24 ipv6: fc0a::75/64 ARISTA117T0: properties: - common + - tor bgp: asn: 64117 peers: @@ -2746,16 +2863,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.117/32 - ipv6: 2064:100::75/128 + ipv6: 2064:100:0:75::/128 Ethernet1: ipv4: 10.0.0.233/31 ipv6: fc00::1d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.118/24 ipv6: fc0a::76/64 ARISTA118T0: properties: - common + - tor bgp: asn: 64118 peers: @@ -2765,16 +2883,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.118/32 - ipv6: 2064:100::76/128 + ipv6: 2064:100:0:76::/128 Ethernet1: ipv4: 10.0.0.235/31 ipv6: fc00::1d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.119/24 ipv6: fc0a::77/64 ARISTA119T0: properties: - common + - tor bgp: asn: 64119 peers: @@ -2784,16 +2903,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.119/32 - ipv6: 2064:100::77/128 + ipv6: 2064:100:0:77::/128 Ethernet1: ipv4: 10.0.0.237/31 ipv6: fc00::1da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.120/24 ipv6: fc0a::78/64 ARISTA120T0: properties: - common + - tor bgp: asn: 64120 peers: @@ -2803,16 +2923,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.120/32 - ipv6: 2064:100::78/128 + ipv6: 2064:100:0:78::/128 Ethernet1: ipv4: 10.0.0.239/31 ipv6: fc00::1de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.121/24 ipv6: fc0a::79/64 ARISTA121T0: properties: - common + - tor bgp: asn: 64121 peers: @@ -2822,16 +2943,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.121/32 - ipv6: 2064:100::79/128 + ipv6: 2064:100:0:79::/128 Ethernet1: ipv4: 10.0.0.241/31 ipv6: fc00::1e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.122/24 ipv6: fc0a::7a/64 ARISTA122T0: properties: - common + - tor bgp: asn: 64122 peers: @@ -2841,16 +2963,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.122/32 - ipv6: 2064:100::7a/128 + ipv6: 2064:100:0:7a::/128 Ethernet1: ipv4: 10.0.0.243/31 ipv6: fc00::1e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.123/24 ipv6: fc0a::7b/64 ARISTA123T0: properties: - common + - tor bgp: asn: 64123 peers: @@ -2860,16 +2983,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.123/32 - ipv6: 2064:100::7b/128 + ipv6: 2064:100:0:7b::/128 Ethernet1: ipv4: 10.0.0.245/31 ipv6: fc00::1ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.124/24 ipv6: fc0a::7c/64 ARISTA124T0: properties: - common + - tor bgp: asn: 64124 peers: @@ -2879,16 +3003,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.124/32 - ipv6: 2064:100::7c/128 + ipv6: 2064:100:0:7c::/128 Ethernet1: ipv4: 10.0.0.247/31 ipv6: fc00::1ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.125/24 ipv6: fc0a::7d/64 ARISTA125T0: properties: - common + - tor bgp: asn: 64125 peers: @@ -2898,16 +3023,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.125/32 - ipv6: 2064:100::7d/128 + ipv6: 2064:100:0:7d::/128 Ethernet1: ipv4: 10.0.0.249/31 ipv6: fc00::1f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.126/24 ipv6: fc0a::7e/64 ARISTA126T0: properties: - common + - tor bgp: asn: 64126 peers: @@ -2917,16 +3043,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.126/32 - ipv6: 2064:100::7e/128 + ipv6: 2064:100:0:7e::/128 Ethernet1: ipv4: 10.0.0.251/31 ipv6: fc00::1f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.127/24 ipv6: fc0a::7f/64 ARISTA127T0: properties: - common + - tor bgp: asn: 64127 peers: @@ -2936,16 +3063,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.127/32 - ipv6: 2064:100::7f/128 + ipv6: 2064:100:0:7f::/128 Ethernet1: ipv4: 10.0.0.253/31 ipv6: fc00::1fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.128/24 ipv6: fc0a::80/64 ARISTA128T0: properties: - common + - tor bgp: asn: 64128 peers: @@ -2955,10 +3083,10 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.128/32 - ipv6: 2064:100::80/128 + ipv6: 2064:100:0:80::/128 Ethernet1: ipv4: 10.0.0.255/31 ipv6: fc00::1fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.129/24 ipv6: fc0a::81/64 diff --git a/ansible/vars/topo_t1-isolated-d224u8.yml b/ansible/vars/topo_t1-isolated-d224u8.yml index f2f809366d5..6c82ab9448b 100644 --- a/ansible/vars/topo_t1-isolated-d224u8.yml +++ b/ansible/vars/topo_t1-isolated-d224u8.yml @@ -949,6 +949,7 @@ configuration: ARISTA01T0: properties: - common + - tor bgp: asn: 64001 peers: @@ -958,16 +959,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.1/32 - ipv6: 2064:100::1/128 + ipv6: 2064:100:0:1::/128 Ethernet1: ipv4: 10.0.0.1/31 ipv6: fc00::2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.2/24 ipv6: fc0a::2/64 ARISTA02T0: properties: - common + - tor bgp: asn: 64002 peers: @@ -977,16 +979,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.2/32 - ipv6: 2064:100::2/128 + ipv6: 2064:100:0:2::/128 Ethernet1: ipv4: 10.0.0.3/31 ipv6: fc00::6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.3/24 ipv6: fc0a::3/64 ARISTA03T0: properties: - common + - tor bgp: asn: 64003 peers: @@ -996,16 +999,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.3/32 - ipv6: 2064:100::3/128 + ipv6: 2064:100:0:3::/128 Ethernet1: ipv4: 10.0.0.5/31 ipv6: fc00::a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.4/24 ipv6: fc0a::4/64 ARISTA04T0: properties: - common + - tor bgp: asn: 64004 peers: @@ -1015,16 +1019,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.4/32 - ipv6: 2064:100::4/128 + ipv6: 2064:100:0:4::/128 Ethernet1: ipv4: 10.0.0.7/31 ipv6: fc00::e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.5/24 ipv6: fc0a::5/64 ARISTA05T0: properties: - common + - tor bgp: asn: 64005 peers: @@ -1034,16 +1039,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.5/32 - ipv6: 2064:100::5/128 + ipv6: 2064:100:0:5::/128 Ethernet1: ipv4: 10.0.0.9/31 ipv6: fc00::12/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.6/24 ipv6: fc0a::6/64 ARISTA06T0: properties: - common + - tor bgp: asn: 64006 peers: @@ -1053,16 +1059,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.6/32 - ipv6: 2064:100::6/128 + ipv6: 2064:100:0:6::/128 Ethernet1: ipv4: 10.0.0.11/31 ipv6: fc00::16/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.7/24 ipv6: fc0a::7/64 ARISTA07T0: properties: - common + - tor bgp: asn: 64007 peers: @@ -1072,16 +1079,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.7/32 - ipv6: 2064:100::7/128 + ipv6: 2064:100:0:7::/128 Ethernet1: ipv4: 10.0.0.13/31 ipv6: fc00::1a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.8/24 ipv6: fc0a::8/64 ARISTA08T0: properties: - common + - tor bgp: asn: 64008 peers: @@ -1091,16 +1099,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.8/32 - ipv6: 2064:100::8/128 + ipv6: 2064:100:0:8::/128 Ethernet1: ipv4: 10.0.0.15/31 ipv6: fc00::1e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.9/24 ipv6: fc0a::9/64 ARISTA09T0: properties: - common + - tor bgp: asn: 64009 peers: @@ -1110,16 +1119,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.9/32 - ipv6: 2064:100::9/128 + ipv6: 2064:100:0:9::/128 Ethernet1: ipv4: 10.0.0.17/31 ipv6: fc00::22/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.10/24 ipv6: fc0a::a/64 ARISTA10T0: properties: - common + - tor bgp: asn: 64010 peers: @@ -1129,16 +1139,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.10/32 - ipv6: 2064:100::a/128 + ipv6: 2064:100:0:a::/128 Ethernet1: ipv4: 10.0.0.19/31 ipv6: fc00::26/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.11/24 ipv6: fc0a::b/64 ARISTA11T0: properties: - common + - tor bgp: asn: 64011 peers: @@ -1148,16 +1159,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.11/32 - ipv6: 2064:100::b/128 + ipv6: 2064:100:0:b::/128 Ethernet1: ipv4: 10.0.0.21/31 ipv6: fc00::2a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.12/24 ipv6: fc0a::c/64 ARISTA12T0: properties: - common + - tor bgp: asn: 64012 peers: @@ -1167,16 +1179,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.12/32 - ipv6: 2064:100::c/128 + ipv6: 2064:100:0:c::/128 Ethernet1: ipv4: 10.0.0.23/31 ipv6: fc00::2e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.13/24 ipv6: fc0a::d/64 ARISTA13T0: properties: - common + - tor bgp: asn: 64013 peers: @@ -1186,16 +1199,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.13/32 - ipv6: 2064:100::d/128 + ipv6: 2064:100:0:d::/128 Ethernet1: ipv4: 10.0.0.25/31 ipv6: fc00::32/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.14/24 ipv6: fc0a::e/64 ARISTA14T0: properties: - common + - tor bgp: asn: 64014 peers: @@ -1205,16 +1219,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.14/32 - ipv6: 2064:100::e/128 + ipv6: 2064:100:0:e::/128 Ethernet1: ipv4: 10.0.0.27/31 ipv6: fc00::36/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.15/24 ipv6: fc0a::f/64 ARISTA15T0: properties: - common + - tor bgp: asn: 64015 peers: @@ -1224,16 +1239,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.15/32 - ipv6: 2064:100::f/128 + ipv6: 2064:100:0:f::/128 Ethernet1: ipv4: 10.0.0.29/31 ipv6: fc00::3a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.16/24 ipv6: fc0a::10/64 ARISTA16T0: properties: - common + - tor bgp: asn: 64016 peers: @@ -1243,16 +1259,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.16/32 - ipv6: 2064:100::10/128 + ipv6: 2064:100:0:10::/128 Ethernet1: ipv4: 10.0.0.31/31 ipv6: fc00::3e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.17/24 ipv6: fc0a::11/64 ARISTA17T0: properties: - common + - tor bgp: asn: 64017 peers: @@ -1262,16 +1279,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.17/32 - ipv6: 2064:100::11/128 + ipv6: 2064:100:0:11::/128 Ethernet1: ipv4: 10.0.0.33/31 ipv6: fc00::42/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.18/24 ipv6: fc0a::12/64 ARISTA18T0: properties: - common + - tor bgp: asn: 64018 peers: @@ -1281,16 +1299,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.18/32 - ipv6: 2064:100::12/128 + ipv6: 2064:100:0:12::/128 Ethernet1: ipv4: 10.0.0.35/31 ipv6: fc00::46/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.19/24 ipv6: fc0a::13/64 ARISTA19T0: properties: - common + - tor bgp: asn: 64019 peers: @@ -1300,16 +1319,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.19/32 - ipv6: 2064:100::13/128 + ipv6: 2064:100:0:13::/128 Ethernet1: ipv4: 10.0.0.37/31 ipv6: fc00::4a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.20/24 ipv6: fc0a::14/64 ARISTA20T0: properties: - common + - tor bgp: asn: 64020 peers: @@ -1319,16 +1339,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.20/32 - ipv6: 2064:100::14/128 + ipv6: 2064:100:0:14::/128 Ethernet1: ipv4: 10.0.0.39/31 ipv6: fc00::4e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.21/24 ipv6: fc0a::15/64 ARISTA21T0: properties: - common + - tor bgp: asn: 64021 peers: @@ -1338,16 +1359,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.21/32 - ipv6: 2064:100::15/128 + ipv6: 2064:100:0:15::/128 Ethernet1: ipv4: 10.0.0.41/31 ipv6: fc00::52/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.22/24 ipv6: fc0a::16/64 ARISTA22T0: properties: - common + - tor bgp: asn: 64022 peers: @@ -1357,16 +1379,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.22/32 - ipv6: 2064:100::16/128 + ipv6: 2064:100:0:16::/128 Ethernet1: ipv4: 10.0.0.43/31 ipv6: fc00::56/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.23/24 ipv6: fc0a::17/64 ARISTA23T0: properties: - common + - tor bgp: asn: 64023 peers: @@ -1376,16 +1399,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.23/32 - ipv6: 2064:100::17/128 + ipv6: 2064:100:0:17::/128 Ethernet1: ipv4: 10.0.0.45/31 ipv6: fc00::5a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.24/24 ipv6: fc0a::18/64 ARISTA24T0: properties: - common + - tor bgp: asn: 64024 peers: @@ -1395,16 +1419,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.24/32 - ipv6: 2064:100::18/128 + ipv6: 2064:100:0:18::/128 Ethernet1: ipv4: 10.0.0.47/31 ipv6: fc00::5e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.25/24 ipv6: fc0a::19/64 ARISTA25T0: properties: - common + - tor bgp: asn: 64025 peers: @@ -1414,16 +1439,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.25/32 - ipv6: 2064:100::19/128 + ipv6: 2064:100:0:19::/128 Ethernet1: ipv4: 10.0.0.49/31 ipv6: fc00::62/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.26/24 ipv6: fc0a::1a/64 ARISTA26T0: properties: - common + - tor bgp: asn: 64026 peers: @@ -1433,16 +1459,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.26/32 - ipv6: 2064:100::1a/128 + ipv6: 2064:100:0:1a::/128 Ethernet1: ipv4: 10.0.0.51/31 ipv6: fc00::66/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.27/24 ipv6: fc0a::1b/64 ARISTA27T0: properties: - common + - tor bgp: asn: 64027 peers: @@ -1452,16 +1479,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.27/32 - ipv6: 2064:100::1b/128 + ipv6: 2064:100:0:1b::/128 Ethernet1: ipv4: 10.0.0.53/31 ipv6: fc00::6a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.28/24 ipv6: fc0a::1c/64 ARISTA28T0: properties: - common + - tor bgp: asn: 64028 peers: @@ -1471,16 +1499,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.28/32 - ipv6: 2064:100::1c/128 + ipv6: 2064:100:0:1c::/128 Ethernet1: ipv4: 10.0.0.55/31 ipv6: fc00::6e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.29/24 ipv6: fc0a::1d/64 ARISTA29T0: properties: - common + - tor bgp: asn: 64029 peers: @@ -1490,16 +1519,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.29/32 - ipv6: 2064:100::1d/128 + ipv6: 2064:100:0:1d::/128 Ethernet1: ipv4: 10.0.0.57/31 ipv6: fc00::72/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.30/24 ipv6: fc0a::1e/64 ARISTA30T0: properties: - common + - tor bgp: asn: 64030 peers: @@ -1509,16 +1539,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.30/32 - ipv6: 2064:100::1e/128 + ipv6: 2064:100:0:1e::/128 Ethernet1: ipv4: 10.0.0.59/31 ipv6: fc00::76/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.31/24 ipv6: fc0a::1f/64 ARISTA31T0: properties: - common + - tor bgp: asn: 64031 peers: @@ -1528,16 +1559,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.31/32 - ipv6: 2064:100::1f/128 + ipv6: 2064:100:0:1f::/128 Ethernet1: ipv4: 10.0.0.61/31 ipv6: fc00::7a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.32/24 ipv6: fc0a::20/64 ARISTA32T0: properties: - common + - tor bgp: asn: 64032 peers: @@ -1547,16 +1579,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.32/32 - ipv6: 2064:100::20/128 + ipv6: 2064:100:0:20::/128 Ethernet1: ipv4: 10.0.0.63/31 ipv6: fc00::7e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.33/24 ipv6: fc0a::21/64 ARISTA33T0: properties: - common + - tor bgp: asn: 64033 peers: @@ -1566,16 +1599,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.33/32 - ipv6: 2064:100::21/128 + ipv6: 2064:100:0:21::/128 Ethernet1: ipv4: 10.0.0.65/31 ipv6: fc00::82/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.34/24 ipv6: fc0a::22/64 ARISTA34T0: properties: - common + - tor bgp: asn: 64034 peers: @@ -1585,16 +1619,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.34/32 - ipv6: 2064:100::22/128 + ipv6: 2064:100:0:22::/128 Ethernet1: ipv4: 10.0.0.67/31 ipv6: fc00::86/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.35/24 ipv6: fc0a::23/64 ARISTA35T0: properties: - common + - tor bgp: asn: 64035 peers: @@ -1604,16 +1639,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.35/32 - ipv6: 2064:100::23/128 + ipv6: 2064:100:0:23::/128 Ethernet1: ipv4: 10.0.0.69/31 ipv6: fc00::8a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.36/24 ipv6: fc0a::24/64 ARISTA36T0: properties: - common + - tor bgp: asn: 64036 peers: @@ -1623,16 +1659,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.36/32 - ipv6: 2064:100::24/128 + ipv6: 2064:100:0:24::/128 Ethernet1: ipv4: 10.0.0.71/31 ipv6: fc00::8e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.37/24 ipv6: fc0a::25/64 ARISTA37T0: properties: - common + - tor bgp: asn: 64037 peers: @@ -1642,16 +1679,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.37/32 - ipv6: 2064:100::25/128 + ipv6: 2064:100:0:25::/128 Ethernet1: ipv4: 10.0.0.73/31 ipv6: fc00::92/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.38/24 ipv6: fc0a::26/64 ARISTA38T0: properties: - common + - tor bgp: asn: 64038 peers: @@ -1661,16 +1699,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.38/32 - ipv6: 2064:100::26/128 + ipv6: 2064:100:0:26::/128 Ethernet1: ipv4: 10.0.0.75/31 ipv6: fc00::96/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.39/24 ipv6: fc0a::27/64 ARISTA39T0: properties: - common + - tor bgp: asn: 64039 peers: @@ -1680,16 +1719,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.39/32 - ipv6: 2064:100::27/128 + ipv6: 2064:100:0:27::/128 Ethernet1: ipv4: 10.0.0.77/31 ipv6: fc00::9a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.40/24 ipv6: fc0a::28/64 ARISTA40T0: properties: - common + - tor bgp: asn: 64040 peers: @@ -1699,16 +1739,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.40/32 - ipv6: 2064:100::28/128 + ipv6: 2064:100:0:28::/128 Ethernet1: ipv4: 10.0.0.79/31 ipv6: fc00::9e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.41/24 ipv6: fc0a::29/64 ARISTA41T0: properties: - common + - tor bgp: asn: 64041 peers: @@ -1718,16 +1759,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.41/32 - ipv6: 2064:100::29/128 + ipv6: 2064:100:0:29::/128 Ethernet1: ipv4: 10.0.0.81/31 ipv6: fc00::a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.42/24 ipv6: fc0a::2a/64 ARISTA42T0: properties: - common + - tor bgp: asn: 64042 peers: @@ -1737,16 +1779,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.42/32 - ipv6: 2064:100::2a/128 + ipv6: 2064:100:0:2a::/128 Ethernet1: ipv4: 10.0.0.83/31 ipv6: fc00::a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.43/24 ipv6: fc0a::2b/64 ARISTA43T0: properties: - common + - tor bgp: asn: 64043 peers: @@ -1756,16 +1799,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.43/32 - ipv6: 2064:100::2b/128 + ipv6: 2064:100:0:2b::/128 Ethernet1: ipv4: 10.0.0.85/31 ipv6: fc00::aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.44/24 ipv6: fc0a::2c/64 ARISTA44T0: properties: - common + - tor bgp: asn: 64044 peers: @@ -1775,16 +1819,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.44/32 - ipv6: 2064:100::2c/128 + ipv6: 2064:100:0:2c::/128 Ethernet1: ipv4: 10.0.0.87/31 ipv6: fc00::ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.45/24 ipv6: fc0a::2d/64 ARISTA45T0: properties: - common + - tor bgp: asn: 64045 peers: @@ -1794,16 +1839,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.45/32 - ipv6: 2064:100::2d/128 + ipv6: 2064:100:0:2d::/128 Ethernet1: ipv4: 10.0.0.89/31 ipv6: fc00::b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.46/24 ipv6: fc0a::2e/64 ARISTA46T0: properties: - common + - tor bgp: asn: 64046 peers: @@ -1813,16 +1859,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.46/32 - ipv6: 2064:100::2e/128 + ipv6: 2064:100:0:2e::/128 Ethernet1: ipv4: 10.0.0.91/31 ipv6: fc00::b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.47/24 ipv6: fc0a::2f/64 ARISTA47T0: properties: - common + - tor bgp: asn: 64047 peers: @@ -1832,16 +1879,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.47/32 - ipv6: 2064:100::2f/128 + ipv6: 2064:100:0:2f::/128 Ethernet1: ipv4: 10.0.0.93/31 ipv6: fc00::ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.48/24 ipv6: fc0a::30/64 ARISTA48T0: properties: - common + - tor bgp: asn: 64048 peers: @@ -1851,16 +1899,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.48/32 - ipv6: 2064:100::30/128 + ipv6: 2064:100:0:30::/128 Ethernet1: ipv4: 10.0.0.95/31 ipv6: fc00::be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.49/24 ipv6: fc0a::31/64 ARISTA01T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -1870,16 +1919,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.49/32 - ipv6: 2064:100::31/128 + ipv6: 2064:100:0:31::/128 Ethernet1: ipv4: 10.0.0.97/31 ipv6: fc00::c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.50/24 ipv6: fc0a::32/64 ARISTA02T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -1889,16 +1939,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.50/32 - ipv6: 2064:100::32/128 + ipv6: 2064:100:0:32::/128 Ethernet1: ipv4: 10.0.0.99/31 ipv6: fc00::c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.51/24 ipv6: fc0a::33/64 ARISTA49T0: properties: - common + - tor bgp: asn: 64049 peers: @@ -1908,16 +1959,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.51/32 - ipv6: 2064:100::33/128 + ipv6: 2064:100:0:33::/128 Ethernet1: ipv4: 10.0.0.101/31 ipv6: fc00::ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.52/24 ipv6: fc0a::34/64 ARISTA50T0: properties: - common + - tor bgp: asn: 64050 peers: @@ -1927,16 +1979,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.52/32 - ipv6: 2064:100::34/128 + ipv6: 2064:100:0:34::/128 Ethernet1: ipv4: 10.0.0.103/31 ipv6: fc00::ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.53/24 ipv6: fc0a::35/64 ARISTA51T0: properties: - common + - tor bgp: asn: 64051 peers: @@ -1946,16 +1999,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.53/32 - ipv6: 2064:100::35/128 + ipv6: 2064:100:0:35::/128 Ethernet1: ipv4: 10.0.0.105/31 ipv6: fc00::d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.54/24 ipv6: fc0a::36/64 ARISTA52T0: properties: - common + - tor bgp: asn: 64052 peers: @@ -1965,16 +2019,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.54/32 - ipv6: 2064:100::36/128 + ipv6: 2064:100:0:36::/128 Ethernet1: ipv4: 10.0.0.107/31 ipv6: fc00::d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.55/24 ipv6: fc0a::37/64 ARISTA53T0: properties: - common + - tor bgp: asn: 64053 peers: @@ -1984,16 +2039,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.55/32 - ipv6: 2064:100::37/128 + ipv6: 2064:100:0:37::/128 Ethernet1: ipv4: 10.0.0.109/31 ipv6: fc00::da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.56/24 ipv6: fc0a::38/64 ARISTA54T0: properties: - common + - tor bgp: asn: 64054 peers: @@ -2003,16 +2059,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.56/32 - ipv6: 2064:100::38/128 + ipv6: 2064:100:0:38::/128 Ethernet1: ipv4: 10.0.0.111/31 ipv6: fc00::de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.57/24 ipv6: fc0a::39/64 ARISTA55T0: properties: - common + - tor bgp: asn: 64055 peers: @@ -2022,16 +2079,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.57/32 - ipv6: 2064:100::39/128 + ipv6: 2064:100:0:39::/128 Ethernet1: ipv4: 10.0.0.113/31 ipv6: fc00::e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.58/24 ipv6: fc0a::3a/64 ARISTA56T0: properties: - common + - tor bgp: asn: 64056 peers: @@ -2041,16 +2099,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.58/32 - ipv6: 2064:100::3a/128 + ipv6: 2064:100:0:3a::/128 Ethernet1: ipv4: 10.0.0.115/31 ipv6: fc00::e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.59/24 ipv6: fc0a::3b/64 ARISTA03T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -2060,16 +2119,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.59/32 - ipv6: 2064:100::3b/128 + ipv6: 2064:100:0:3b::/128 Ethernet1: ipv4: 10.0.0.117/31 ipv6: fc00::ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.60/24 ipv6: fc0a::3c/64 ARISTA04T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -2079,16 +2139,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.60/32 - ipv6: 2064:100::3c/128 + ipv6: 2064:100:0:3c::/128 Ethernet1: ipv4: 10.0.0.119/31 ipv6: fc00::ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.61/24 ipv6: fc0a::3d/64 ARISTA57T0: properties: - common + - tor bgp: asn: 64057 peers: @@ -2098,16 +2159,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.61/32 - ipv6: 2064:100::3d/128 + ipv6: 2064:100:0:3d::/128 Ethernet1: ipv4: 10.0.0.121/31 ipv6: fc00::f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.62/24 ipv6: fc0a::3e/64 ARISTA58T0: properties: - common + - tor bgp: asn: 64058 peers: @@ -2117,16 +2179,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.62/32 - ipv6: 2064:100::3e/128 + ipv6: 2064:100:0:3e::/128 Ethernet1: ipv4: 10.0.0.123/31 ipv6: fc00::f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.63/24 ipv6: fc0a::3f/64 ARISTA59T0: properties: - common + - tor bgp: asn: 64059 peers: @@ -2136,16 +2199,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.63/32 - ipv6: 2064:100::3f/128 + ipv6: 2064:100:0:3f::/128 Ethernet1: ipv4: 10.0.0.125/31 ipv6: fc00::fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.64/24 ipv6: fc0a::40/64 ARISTA60T0: properties: - common + - tor bgp: asn: 64060 peers: @@ -2155,16 +2219,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.64/32 - ipv6: 2064:100::40/128 + ipv6: 2064:100:0:40::/128 Ethernet1: ipv4: 10.0.0.127/31 ipv6: fc00::fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.65/24 ipv6: fc0a::41/64 ARISTA61T0: properties: - common + - tor bgp: asn: 64061 peers: @@ -2174,16 +2239,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.65/32 - ipv6: 2064:100::41/128 + ipv6: 2064:100:0:41::/128 Ethernet1: ipv4: 10.0.0.129/31 ipv6: fc00::102/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.66/24 ipv6: fc0a::42/64 ARISTA62T0: properties: - common + - tor bgp: asn: 64062 peers: @@ -2193,16 +2259,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.66/32 - ipv6: 2064:100::42/128 + ipv6: 2064:100:0:42::/128 Ethernet1: ipv4: 10.0.0.131/31 ipv6: fc00::106/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.67/24 ipv6: fc0a::43/64 ARISTA63T0: properties: - common + - tor bgp: asn: 64063 peers: @@ -2212,16 +2279,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.67/32 - ipv6: 2064:100::43/128 + ipv6: 2064:100:0:43::/128 Ethernet1: ipv4: 10.0.0.133/31 ipv6: fc00::10a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.68/24 ipv6: fc0a::44/64 ARISTA64T0: properties: - common + - tor bgp: asn: 64064 peers: @@ -2231,16 +2299,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.68/32 - ipv6: 2064:100::44/128 + ipv6: 2064:100:0:44::/128 Ethernet1: ipv4: 10.0.0.135/31 ipv6: fc00::10e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.69/24 ipv6: fc0a::45/64 ARISTA65T0: properties: - common + - tor bgp: asn: 64065 peers: @@ -2250,16 +2319,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.69/32 - ipv6: 2064:100::45/128 + ipv6: 2064:100:0:45::/128 Ethernet1: ipv4: 10.0.0.137/31 ipv6: fc00::112/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.70/24 ipv6: fc0a::46/64 ARISTA66T0: properties: - common + - tor bgp: asn: 64066 peers: @@ -2269,16 +2339,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.70/32 - ipv6: 2064:100::46/128 + ipv6: 2064:100:0:46::/128 Ethernet1: ipv4: 10.0.0.139/31 ipv6: fc00::116/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.71/24 ipv6: fc0a::47/64 ARISTA67T0: properties: - common + - tor bgp: asn: 64067 peers: @@ -2288,16 +2359,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.71/32 - ipv6: 2064:100::47/128 + ipv6: 2064:100:0:47::/128 Ethernet1: ipv4: 10.0.0.141/31 ipv6: fc00::11a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.72/24 ipv6: fc0a::48/64 ARISTA68T0: properties: - common + - tor bgp: asn: 64068 peers: @@ -2307,16 +2379,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.72/32 - ipv6: 2064:100::48/128 + ipv6: 2064:100:0:48::/128 Ethernet1: ipv4: 10.0.0.143/31 ipv6: fc00::11e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.73/24 ipv6: fc0a::49/64 ARISTA69T0: properties: - common + - tor bgp: asn: 64069 peers: @@ -2326,16 +2399,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.73/32 - ipv6: 2064:100::49/128 + ipv6: 2064:100:0:49::/128 Ethernet1: ipv4: 10.0.0.145/31 ipv6: fc00::122/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.74/24 ipv6: fc0a::4a/64 ARISTA70T0: properties: - common + - tor bgp: asn: 64070 peers: @@ -2345,16 +2419,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.74/32 - ipv6: 2064:100::4a/128 + ipv6: 2064:100:0:4a::/128 Ethernet1: ipv4: 10.0.0.147/31 ipv6: fc00::126/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.75/24 ipv6: fc0a::4b/64 ARISTA71T0: properties: - common + - tor bgp: asn: 64071 peers: @@ -2364,16 +2439,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.75/32 - ipv6: 2064:100::4b/128 + ipv6: 2064:100:0:4b::/128 Ethernet1: ipv4: 10.0.0.149/31 ipv6: fc00::12a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.76/24 ipv6: fc0a::4c/64 ARISTA72T0: properties: - common + - tor bgp: asn: 64072 peers: @@ -2383,16 +2459,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.76/32 - ipv6: 2064:100::4c/128 + ipv6: 2064:100:0:4c::/128 Ethernet1: ipv4: 10.0.0.151/31 ipv6: fc00::12e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.77/24 ipv6: fc0a::4d/64 ARISTA73T0: properties: - common + - tor bgp: asn: 64073 peers: @@ -2402,16 +2479,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.77/32 - ipv6: 2064:100::4d/128 + ipv6: 2064:100:0:4d::/128 Ethernet1: ipv4: 10.0.0.153/31 ipv6: fc00::132/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.78/24 ipv6: fc0a::4e/64 ARISTA74T0: properties: - common + - tor bgp: asn: 64074 peers: @@ -2421,16 +2499,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.78/32 - ipv6: 2064:100::4e/128 + ipv6: 2064:100:0:4e::/128 Ethernet1: ipv4: 10.0.0.155/31 ipv6: fc00::136/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.79/24 ipv6: fc0a::4f/64 ARISTA75T0: properties: - common + - tor bgp: asn: 64075 peers: @@ -2440,16 +2519,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.79/32 - ipv6: 2064:100::4f/128 + ipv6: 2064:100:0:4f::/128 Ethernet1: ipv4: 10.0.0.157/31 ipv6: fc00::13a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.80/24 ipv6: fc0a::50/64 ARISTA76T0: properties: - common + - tor bgp: asn: 64076 peers: @@ -2459,16 +2539,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.80/32 - ipv6: 2064:100::50/128 + ipv6: 2064:100:0:50::/128 Ethernet1: ipv4: 10.0.0.159/31 ipv6: fc00::13e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.81/24 ipv6: fc0a::51/64 ARISTA77T0: properties: - common + - tor bgp: asn: 64077 peers: @@ -2478,16 +2559,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.81/32 - ipv6: 2064:100::51/128 + ipv6: 2064:100:0:51::/128 Ethernet1: ipv4: 10.0.0.161/31 ipv6: fc00::142/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.82/24 ipv6: fc0a::52/64 ARISTA78T0: properties: - common + - tor bgp: asn: 64078 peers: @@ -2497,16 +2579,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.82/32 - ipv6: 2064:100::52/128 + ipv6: 2064:100:0:52::/128 Ethernet1: ipv4: 10.0.0.163/31 ipv6: fc00::146/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.83/24 ipv6: fc0a::53/64 ARISTA79T0: properties: - common + - tor bgp: asn: 64079 peers: @@ -2516,16 +2599,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.83/32 - ipv6: 2064:100::53/128 + ipv6: 2064:100:0:53::/128 Ethernet1: ipv4: 10.0.0.165/31 ipv6: fc00::14a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.84/24 ipv6: fc0a::54/64 ARISTA80T0: properties: - common + - tor bgp: asn: 64080 peers: @@ -2535,16 +2619,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.84/32 - ipv6: 2064:100::54/128 + ipv6: 2064:100:0:54::/128 Ethernet1: ipv4: 10.0.0.167/31 ipv6: fc00::14e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.85/24 ipv6: fc0a::55/64 ARISTA81T0: properties: - common + - tor bgp: asn: 64081 peers: @@ -2554,16 +2639,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.85/32 - ipv6: 2064:100::55/128 + ipv6: 2064:100:0:55::/128 Ethernet1: ipv4: 10.0.0.169/31 ipv6: fc00::152/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.86/24 ipv6: fc0a::56/64 ARISTA82T0: properties: - common + - tor bgp: asn: 64082 peers: @@ -2573,16 +2659,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.86/32 - ipv6: 2064:100::56/128 + ipv6: 2064:100:0:56::/128 Ethernet1: ipv4: 10.0.0.171/31 ipv6: fc00::156/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.87/24 ipv6: fc0a::57/64 ARISTA83T0: properties: - common + - tor bgp: asn: 64083 peers: @@ -2592,16 +2679,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.87/32 - ipv6: 2064:100::57/128 + ipv6: 2064:100:0:57::/128 Ethernet1: ipv4: 10.0.0.173/31 ipv6: fc00::15a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.88/24 ipv6: fc0a::58/64 ARISTA84T0: properties: - common + - tor bgp: asn: 64084 peers: @@ -2611,16 +2699,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.88/32 - ipv6: 2064:100::58/128 + ipv6: 2064:100:0:58::/128 Ethernet1: ipv4: 10.0.0.175/31 ipv6: fc00::15e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.89/24 ipv6: fc0a::59/64 ARISTA85T0: properties: - common + - tor bgp: asn: 64085 peers: @@ -2630,16 +2719,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.89/32 - ipv6: 2064:100::59/128 + ipv6: 2064:100:0:59::/128 Ethernet1: ipv4: 10.0.0.177/31 ipv6: fc00::162/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.90/24 ipv6: fc0a::5a/64 ARISTA86T0: properties: - common + - tor bgp: asn: 64086 peers: @@ -2649,16 +2739,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.90/32 - ipv6: 2064:100::5a/128 + ipv6: 2064:100:0:5a::/128 Ethernet1: ipv4: 10.0.0.179/31 ipv6: fc00::166/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.91/24 ipv6: fc0a::5b/64 ARISTA87T0: properties: - common + - tor bgp: asn: 64087 peers: @@ -2668,16 +2759,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.91/32 - ipv6: 2064:100::5b/128 + ipv6: 2064:100:0:5b::/128 Ethernet1: ipv4: 10.0.0.181/31 ipv6: fc00::16a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.92/24 ipv6: fc0a::5c/64 ARISTA88T0: properties: - common + - tor bgp: asn: 64088 peers: @@ -2687,16 +2779,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.92/32 - ipv6: 2064:100::5c/128 + ipv6: 2064:100:0:5c::/128 Ethernet1: ipv4: 10.0.0.183/31 ipv6: fc00::16e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.93/24 ipv6: fc0a::5d/64 ARISTA89T0: properties: - common + - tor bgp: asn: 64089 peers: @@ -2706,16 +2799,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.93/32 - ipv6: 2064:100::5d/128 + ipv6: 2064:100:0:5d::/128 Ethernet1: ipv4: 10.0.0.185/31 ipv6: fc00::172/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.94/24 ipv6: fc0a::5e/64 ARISTA90T0: properties: - common + - tor bgp: asn: 64090 peers: @@ -2725,16 +2819,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.94/32 - ipv6: 2064:100::5e/128 + ipv6: 2064:100:0:5e::/128 Ethernet1: ipv4: 10.0.0.187/31 ipv6: fc00::176/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.95/24 ipv6: fc0a::5f/64 ARISTA91T0: properties: - common + - tor bgp: asn: 64091 peers: @@ -2744,16 +2839,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.95/32 - ipv6: 2064:100::5f/128 + ipv6: 2064:100:0:5f::/128 Ethernet1: ipv4: 10.0.0.189/31 ipv6: fc00::17a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.96/24 ipv6: fc0a::60/64 ARISTA92T0: properties: - common + - tor bgp: asn: 64092 peers: @@ -2763,16 +2859,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.96/32 - ipv6: 2064:100::60/128 + ipv6: 2064:100:0:60::/128 Ethernet1: ipv4: 10.0.0.191/31 ipv6: fc00::17e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.97/24 ipv6: fc0a::61/64 ARISTA93T0: properties: - common + - tor bgp: asn: 64093 peers: @@ -2782,16 +2879,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.97/32 - ipv6: 2064:100::61/128 + ipv6: 2064:100:0:61::/128 Ethernet1: ipv4: 10.0.0.193/31 ipv6: fc00::182/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.98/24 ipv6: fc0a::62/64 ARISTA94T0: properties: - common + - tor bgp: asn: 64094 peers: @@ -2801,16 +2899,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.98/32 - ipv6: 2064:100::62/128 + ipv6: 2064:100:0:62::/128 Ethernet1: ipv4: 10.0.0.195/31 ipv6: fc00::186/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.99/24 ipv6: fc0a::63/64 ARISTA95T0: properties: - common + - tor bgp: asn: 64095 peers: @@ -2820,16 +2919,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.99/32 - ipv6: 2064:100::63/128 + ipv6: 2064:100:0:63::/128 Ethernet1: ipv4: 10.0.0.197/31 ipv6: fc00::18a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.100/24 ipv6: fc0a::64/64 ARISTA96T0: properties: - common + - tor bgp: asn: 64096 peers: @@ -2839,16 +2939,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.100/32 - ipv6: 2064:100::64/128 + ipv6: 2064:100:0:64::/128 Ethernet1: ipv4: 10.0.0.199/31 ipv6: fc00::18e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.101/24 ipv6: fc0a::65/64 ARISTA97T0: properties: - common + - tor bgp: asn: 64097 peers: @@ -2858,16 +2959,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.101/32 - ipv6: 2064:100::65/128 + ipv6: 2064:100:0:65::/128 Ethernet1: ipv4: 10.0.0.201/31 ipv6: fc00::192/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.102/24 ipv6: fc0a::66/64 ARISTA98T0: properties: - common + - tor bgp: asn: 64098 peers: @@ -2877,16 +2979,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.102/32 - ipv6: 2064:100::66/128 + ipv6: 2064:100:0:66::/128 Ethernet1: ipv4: 10.0.0.203/31 ipv6: fc00::196/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.103/24 ipv6: fc0a::67/64 ARISTA99T0: properties: - common + - tor bgp: asn: 64099 peers: @@ -2896,16 +2999,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.103/32 - ipv6: 2064:100::67/128 + ipv6: 2064:100:0:67::/128 Ethernet1: ipv4: 10.0.0.205/31 ipv6: fc00::19a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.104/24 ipv6: fc0a::68/64 ARISTA100T0: properties: - common + - tor bgp: asn: 64100 peers: @@ -2915,16 +3019,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.104/32 - ipv6: 2064:100::68/128 + ipv6: 2064:100:0:68::/128 Ethernet1: ipv4: 10.0.0.207/31 ipv6: fc00::19e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.105/24 ipv6: fc0a::69/64 ARISTA101T0: properties: - common + - tor bgp: asn: 64101 peers: @@ -2934,16 +3039,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.105/32 - ipv6: 2064:100::69/128 + ipv6: 2064:100:0:69::/128 Ethernet1: ipv4: 10.0.0.209/31 ipv6: fc00::1a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.106/24 ipv6: fc0a::6a/64 ARISTA102T0: properties: - common + - tor bgp: asn: 64102 peers: @@ -2953,16 +3059,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.106/32 - ipv6: 2064:100::6a/128 + ipv6: 2064:100:0:6a::/128 Ethernet1: ipv4: 10.0.0.211/31 ipv6: fc00::1a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.107/24 ipv6: fc0a::6b/64 ARISTA103T0: properties: - common + - tor bgp: asn: 64103 peers: @@ -2972,16 +3079,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.107/32 - ipv6: 2064:100::6b/128 + ipv6: 2064:100:0:6b::/128 Ethernet1: ipv4: 10.0.0.213/31 ipv6: fc00::1aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.108/24 ipv6: fc0a::6c/64 ARISTA104T0: properties: - common + - tor bgp: asn: 64104 peers: @@ -2991,16 +3099,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.108/32 - ipv6: 2064:100::6c/128 + ipv6: 2064:100:0:6c::/128 Ethernet1: ipv4: 10.0.0.215/31 ipv6: fc00::1ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.109/24 ipv6: fc0a::6d/64 ARISTA105T0: properties: - common + - tor bgp: asn: 64105 peers: @@ -3010,16 +3119,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.109/32 - ipv6: 2064:100::6d/128 + ipv6: 2064:100:0:6d::/128 Ethernet1: ipv4: 10.0.0.217/31 ipv6: fc00::1b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.110/24 ipv6: fc0a::6e/64 ARISTA106T0: properties: - common + - tor bgp: asn: 64106 peers: @@ -3029,16 +3139,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.110/32 - ipv6: 2064:100::6e/128 + ipv6: 2064:100:0:6e::/128 Ethernet1: ipv4: 10.0.0.219/31 ipv6: fc00::1b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.111/24 ipv6: fc0a::6f/64 ARISTA107T0: properties: - common + - tor bgp: asn: 64107 peers: @@ -3048,16 +3159,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.111/32 - ipv6: 2064:100::6f/128 + ipv6: 2064:100:0:6f::/128 Ethernet1: ipv4: 10.0.0.221/31 ipv6: fc00::1ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.112/24 ipv6: fc0a::70/64 ARISTA108T0: properties: - common + - tor bgp: asn: 64108 peers: @@ -3067,16 +3179,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.112/32 - ipv6: 2064:100::70/128 + ipv6: 2064:100:0:70::/128 Ethernet1: ipv4: 10.0.0.223/31 ipv6: fc00::1be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.113/24 ipv6: fc0a::71/64 ARISTA109T0: properties: - common + - tor bgp: asn: 64109 peers: @@ -3086,16 +3199,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.113/32 - ipv6: 2064:100::71/128 + ipv6: 2064:100:0:71::/128 Ethernet1: ipv4: 10.0.0.225/31 ipv6: fc00::1c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.114/24 ipv6: fc0a::72/64 ARISTA110T0: properties: - common + - tor bgp: asn: 64110 peers: @@ -3105,16 +3219,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.114/32 - ipv6: 2064:100::72/128 + ipv6: 2064:100:0:72::/128 Ethernet1: ipv4: 10.0.0.227/31 ipv6: fc00::1c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.115/24 ipv6: fc0a::73/64 ARISTA111T0: properties: - common + - tor bgp: asn: 64111 peers: @@ -3124,16 +3239,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.115/32 - ipv6: 2064:100::73/128 + ipv6: 2064:100:0:73::/128 Ethernet1: ipv4: 10.0.0.229/31 ipv6: fc00::1ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.116/24 ipv6: fc0a::74/64 ARISTA112T0: properties: - common + - tor bgp: asn: 64112 peers: @@ -3143,16 +3259,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.116/32 - ipv6: 2064:100::74/128 + ipv6: 2064:100:0:74::/128 Ethernet1: ipv4: 10.0.0.231/31 ipv6: fc00::1ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.117/24 ipv6: fc0a::75/64 ARISTA113T0: properties: - common + - tor bgp: asn: 64113 peers: @@ -3162,16 +3279,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.117/32 - ipv6: 2064:100::75/128 + ipv6: 2064:100:0:75::/128 Ethernet1: ipv4: 10.0.0.233/31 ipv6: fc00::1d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.118/24 ipv6: fc0a::76/64 ARISTA114T0: properties: - common + - tor bgp: asn: 64114 peers: @@ -3181,16 +3299,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.118/32 - ipv6: 2064:100::76/128 + ipv6: 2064:100:0:76::/128 Ethernet1: ipv4: 10.0.0.235/31 ipv6: fc00::1d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.119/24 ipv6: fc0a::77/64 ARISTA115T0: properties: - common + - tor bgp: asn: 64115 peers: @@ -3200,16 +3319,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.119/32 - ipv6: 2064:100::77/128 + ipv6: 2064:100:0:77::/128 Ethernet1: ipv4: 10.0.0.237/31 ipv6: fc00::1da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.120/24 ipv6: fc0a::78/64 ARISTA116T0: properties: - common + - tor bgp: asn: 64116 peers: @@ -3219,16 +3339,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.120/32 - ipv6: 2064:100::78/128 + ipv6: 2064:100:0:78::/128 Ethernet1: ipv4: 10.0.0.239/31 ipv6: fc00::1de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.121/24 ipv6: fc0a::79/64 ARISTA117T0: properties: - common + - tor bgp: asn: 64117 peers: @@ -3238,16 +3359,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.121/32 - ipv6: 2064:100::79/128 + ipv6: 2064:100:0:79::/128 Ethernet1: ipv4: 10.0.0.241/31 ipv6: fc00::1e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.122/24 ipv6: fc0a::7a/64 ARISTA118T0: properties: - common + - tor bgp: asn: 64118 peers: @@ -3257,16 +3379,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.122/32 - ipv6: 2064:100::7a/128 + ipv6: 2064:100:0:7a::/128 Ethernet1: ipv4: 10.0.0.243/31 ipv6: fc00::1e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.123/24 ipv6: fc0a::7b/64 ARISTA119T0: properties: - common + - tor bgp: asn: 64119 peers: @@ -3276,16 +3399,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.123/32 - ipv6: 2064:100::7b/128 + ipv6: 2064:100:0:7b::/128 Ethernet1: ipv4: 10.0.0.245/31 ipv6: fc00::1ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.124/24 ipv6: fc0a::7c/64 ARISTA120T0: properties: - common + - tor bgp: asn: 64120 peers: @@ -3295,16 +3419,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.124/32 - ipv6: 2064:100::7c/128 + ipv6: 2064:100:0:7c::/128 Ethernet1: ipv4: 10.0.0.247/31 ipv6: fc00::1ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.125/24 ipv6: fc0a::7d/64 ARISTA121T0: properties: - common + - tor bgp: asn: 64121 peers: @@ -3314,16 +3439,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.125/32 - ipv6: 2064:100::7d/128 + ipv6: 2064:100:0:7d::/128 Ethernet1: ipv4: 10.0.0.249/31 ipv6: fc00::1f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.126/24 ipv6: fc0a::7e/64 ARISTA122T0: properties: - common + - tor bgp: asn: 64122 peers: @@ -3333,16 +3459,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.126/32 - ipv6: 2064:100::7e/128 + ipv6: 2064:100:0:7e::/128 Ethernet1: ipv4: 10.0.0.251/31 ipv6: fc00::1f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.127/24 ipv6: fc0a::7f/64 ARISTA123T0: properties: - common + - tor bgp: asn: 64123 peers: @@ -3352,16 +3479,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.127/32 - ipv6: 2064:100::7f/128 + ipv6: 2064:100:0:7f::/128 Ethernet1: ipv4: 10.0.0.253/31 ipv6: fc00::1fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.128/24 ipv6: fc0a::80/64 ARISTA124T0: properties: - common + - tor bgp: asn: 64124 peers: @@ -3371,16 +3499,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.128/32 - ipv6: 2064:100::80/128 + ipv6: 2064:100:0:80::/128 Ethernet1: ipv4: 10.0.0.255/31 ipv6: fc00::1fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.129/24 ipv6: fc0a::81/64 ARISTA125T0: properties: - common + - tor bgp: asn: 64125 peers: @@ -3390,16 +3519,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.129/32 - ipv6: 2064:100::81/128 + ipv6: 2064:100:0:81::/128 Ethernet1: ipv4: 10.0.1.1/31 ipv6: fc00::202/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.130/24 ipv6: fc0a::82/64 ARISTA126T0: properties: - common + - tor bgp: asn: 64126 peers: @@ -3409,16 +3539,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.130/32 - ipv6: 2064:100::82/128 + ipv6: 2064:100:0:82::/128 Ethernet1: ipv4: 10.0.1.3/31 ipv6: fc00::206/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.131/24 ipv6: fc0a::83/64 ARISTA127T0: properties: - common + - tor bgp: asn: 64127 peers: @@ -3428,16 +3559,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.131/32 - ipv6: 2064:100::83/128 + ipv6: 2064:100:0:83::/128 Ethernet1: ipv4: 10.0.1.5/31 ipv6: fc00::20a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.132/24 ipv6: fc0a::84/64 ARISTA128T0: properties: - common + - tor bgp: asn: 64128 peers: @@ -3447,16 +3579,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.132/32 - ipv6: 2064:100::84/128 + ipv6: 2064:100:0:84::/128 Ethernet1: ipv4: 10.0.1.7/31 ipv6: fc00::20e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.133/24 ipv6: fc0a::85/64 ARISTA129T0: properties: - common + - tor bgp: asn: 64129 peers: @@ -3466,16 +3599,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.133/32 - ipv6: 2064:100::85/128 + ipv6: 2064:100:0:85::/128 Ethernet1: ipv4: 10.0.1.9/31 ipv6: fc00::212/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.134/24 ipv6: fc0a::86/64 ARISTA130T0: properties: - common + - tor bgp: asn: 64130 peers: @@ -3485,16 +3619,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.134/32 - ipv6: 2064:100::86/128 + ipv6: 2064:100:0:86::/128 Ethernet1: ipv4: 10.0.1.11/31 ipv6: fc00::216/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.135/24 ipv6: fc0a::87/64 ARISTA131T0: properties: - common + - tor bgp: asn: 64131 peers: @@ -3504,16 +3639,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.135/32 - ipv6: 2064:100::87/128 + ipv6: 2064:100:0:87::/128 Ethernet1: ipv4: 10.0.1.13/31 ipv6: fc00::21a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.136/24 ipv6: fc0a::88/64 ARISTA132T0: properties: - common + - tor bgp: asn: 64132 peers: @@ -3523,16 +3659,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.136/32 - ipv6: 2064:100::88/128 + ipv6: 2064:100:0:88::/128 Ethernet1: ipv4: 10.0.1.15/31 ipv6: fc00::21e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.137/24 ipv6: fc0a::89/64 ARISTA133T0: properties: - common + - tor bgp: asn: 64133 peers: @@ -3542,16 +3679,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.137/32 - ipv6: 2064:100::89/128 + ipv6: 2064:100:0:89::/128 Ethernet1: ipv4: 10.0.1.17/31 ipv6: fc00::222/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.138/24 ipv6: fc0a::8a/64 ARISTA134T0: properties: - common + - tor bgp: asn: 64134 peers: @@ -3561,16 +3699,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.138/32 - ipv6: 2064:100::8a/128 + ipv6: 2064:100:0:8a::/128 Ethernet1: ipv4: 10.0.1.19/31 ipv6: fc00::226/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.139/24 ipv6: fc0a::8b/64 ARISTA135T0: properties: - common + - tor bgp: asn: 64135 peers: @@ -3580,16 +3719,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.139/32 - ipv6: 2064:100::8b/128 + ipv6: 2064:100:0:8b::/128 Ethernet1: ipv4: 10.0.1.21/31 ipv6: fc00::22a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.140/24 ipv6: fc0a::8c/64 ARISTA136T0: properties: - common + - tor bgp: asn: 64136 peers: @@ -3599,16 +3739,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.140/32 - ipv6: 2064:100::8c/128 + ipv6: 2064:100:0:8c::/128 Ethernet1: ipv4: 10.0.1.23/31 ipv6: fc00::22e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.141/24 ipv6: fc0a::8d/64 ARISTA137T0: properties: - common + - tor bgp: asn: 64137 peers: @@ -3618,16 +3759,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.141/32 - ipv6: 2064:100::8d/128 + ipv6: 2064:100:0:8d::/128 Ethernet1: ipv4: 10.0.1.25/31 ipv6: fc00::232/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.142/24 ipv6: fc0a::8e/64 ARISTA138T0: properties: - common + - tor bgp: asn: 64138 peers: @@ -3637,16 +3779,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.142/32 - ipv6: 2064:100::8e/128 + ipv6: 2064:100:0:8e::/128 Ethernet1: ipv4: 10.0.1.27/31 ipv6: fc00::236/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.143/24 ipv6: fc0a::8f/64 ARISTA139T0: properties: - common + - tor bgp: asn: 64139 peers: @@ -3656,16 +3799,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.143/32 - ipv6: 2064:100::8f/128 + ipv6: 2064:100:0:8f::/128 Ethernet1: ipv4: 10.0.1.29/31 ipv6: fc00::23a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.144/24 ipv6: fc0a::90/64 ARISTA140T0: properties: - common + - tor bgp: asn: 64140 peers: @@ -3675,16 +3819,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.144/32 - ipv6: 2064:100::90/128 + ipv6: 2064:100:0:90::/128 Ethernet1: ipv4: 10.0.1.31/31 ipv6: fc00::23e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.145/24 ipv6: fc0a::91/64 ARISTA141T0: properties: - common + - tor bgp: asn: 64141 peers: @@ -3694,16 +3839,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.145/32 - ipv6: 2064:100::91/128 + ipv6: 2064:100:0:91::/128 Ethernet1: ipv4: 10.0.1.33/31 ipv6: fc00::242/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.146/24 ipv6: fc0a::92/64 ARISTA142T0: properties: - common + - tor bgp: asn: 64142 peers: @@ -3713,16 +3859,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.146/32 - ipv6: 2064:100::92/128 + ipv6: 2064:100:0:92::/128 Ethernet1: ipv4: 10.0.1.35/31 ipv6: fc00::246/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.147/24 ipv6: fc0a::93/64 ARISTA143T0: properties: - common + - tor bgp: asn: 64143 peers: @@ -3732,16 +3879,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.147/32 - ipv6: 2064:100::93/128 + ipv6: 2064:100:0:93::/128 Ethernet1: ipv4: 10.0.1.37/31 ipv6: fc00::24a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.148/24 ipv6: fc0a::94/64 ARISTA144T0: properties: - common + - tor bgp: asn: 64144 peers: @@ -3751,16 +3899,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.148/32 - ipv6: 2064:100::94/128 + ipv6: 2064:100:0:94::/128 Ethernet1: ipv4: 10.0.1.39/31 ipv6: fc00::24e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.149/24 ipv6: fc0a::95/64 ARISTA145T0: properties: - common + - tor bgp: asn: 64145 peers: @@ -3770,16 +3919,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.149/32 - ipv6: 2064:100::95/128 + ipv6: 2064:100:0:95::/128 Ethernet1: ipv4: 10.0.1.41/31 ipv6: fc00::252/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.150/24 ipv6: fc0a::96/64 ARISTA146T0: properties: - common + - tor bgp: asn: 64146 peers: @@ -3789,16 +3939,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.150/32 - ipv6: 2064:100::96/128 + ipv6: 2064:100:0:96::/128 Ethernet1: ipv4: 10.0.1.43/31 ipv6: fc00::256/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.151/24 ipv6: fc0a::97/64 ARISTA147T0: properties: - common + - tor bgp: asn: 64147 peers: @@ -3808,16 +3959,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.151/32 - ipv6: 2064:100::97/128 + ipv6: 2064:100:0:97::/128 Ethernet1: ipv4: 10.0.1.45/31 ipv6: fc00::25a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.152/24 ipv6: fc0a::98/64 ARISTA148T0: properties: - common + - tor bgp: asn: 64148 peers: @@ -3827,16 +3979,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.152/32 - ipv6: 2064:100::98/128 + ipv6: 2064:100:0:98::/128 Ethernet1: ipv4: 10.0.1.47/31 ipv6: fc00::25e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.153/24 ipv6: fc0a::99/64 ARISTA149T0: properties: - common + - tor bgp: asn: 64149 peers: @@ -3846,16 +3999,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.153/32 - ipv6: 2064:100::99/128 + ipv6: 2064:100:0:99::/128 Ethernet1: ipv4: 10.0.1.49/31 ipv6: fc00::262/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.154/24 ipv6: fc0a::9a/64 ARISTA150T0: properties: - common + - tor bgp: asn: 64150 peers: @@ -3865,16 +4019,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.154/32 - ipv6: 2064:100::9a/128 + ipv6: 2064:100:0:9a::/128 Ethernet1: ipv4: 10.0.1.51/31 ipv6: fc00::266/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.155/24 ipv6: fc0a::9b/64 ARISTA151T0: properties: - common + - tor bgp: asn: 64151 peers: @@ -3884,16 +4039,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.155/32 - ipv6: 2064:100::9b/128 + ipv6: 2064:100:0:9b::/128 Ethernet1: ipv4: 10.0.1.53/31 ipv6: fc00::26a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.156/24 ipv6: fc0a::9c/64 ARISTA152T0: properties: - common + - tor bgp: asn: 64152 peers: @@ -3903,16 +4059,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.156/32 - ipv6: 2064:100::9c/128 + ipv6: 2064:100:0:9c::/128 Ethernet1: ipv4: 10.0.1.55/31 ipv6: fc00::26e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.157/24 ipv6: fc0a::9d/64 ARISTA153T0: properties: - common + - tor bgp: asn: 64153 peers: @@ -3922,16 +4079,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.157/32 - ipv6: 2064:100::9d/128 + ipv6: 2064:100:0:9d::/128 Ethernet1: ipv4: 10.0.1.57/31 ipv6: fc00::272/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.158/24 ipv6: fc0a::9e/64 ARISTA154T0: properties: - common + - tor bgp: asn: 64154 peers: @@ -3941,16 +4099,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.158/32 - ipv6: 2064:100::9e/128 + ipv6: 2064:100:0:9e::/128 Ethernet1: ipv4: 10.0.1.59/31 ipv6: fc00::276/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.159/24 ipv6: fc0a::9f/64 ARISTA155T0: properties: - common + - tor bgp: asn: 64155 peers: @@ -3960,16 +4119,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.159/32 - ipv6: 2064:100::9f/128 + ipv6: 2064:100:0:9f::/128 Ethernet1: ipv4: 10.0.1.61/31 ipv6: fc00::27a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.160/24 ipv6: fc0a::a0/64 ARISTA156T0: properties: - common + - tor bgp: asn: 64156 peers: @@ -3979,16 +4139,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.160/32 - ipv6: 2064:100::a0/128 + ipv6: 2064:100:0:a0::/128 Ethernet1: ipv4: 10.0.1.63/31 ipv6: fc00::27e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.161/24 ipv6: fc0a::a1/64 ARISTA157T0: properties: - common + - tor bgp: asn: 64157 peers: @@ -3998,16 +4159,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.161/32 - ipv6: 2064:100::a1/128 + ipv6: 2064:100:0:a1::/128 Ethernet1: ipv4: 10.0.1.65/31 ipv6: fc00::282/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.162/24 ipv6: fc0a::a2/64 ARISTA158T0: properties: - common + - tor bgp: asn: 64158 peers: @@ -4017,16 +4179,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.162/32 - ipv6: 2064:100::a2/128 + ipv6: 2064:100:0:a2::/128 Ethernet1: ipv4: 10.0.1.67/31 ipv6: fc00::286/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.163/24 ipv6: fc0a::a3/64 ARISTA159T0: properties: - common + - tor bgp: asn: 64159 peers: @@ -4036,16 +4199,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.163/32 - ipv6: 2064:100::a3/128 + ipv6: 2064:100:0:a3::/128 Ethernet1: ipv4: 10.0.1.69/31 ipv6: fc00::28a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.164/24 ipv6: fc0a::a4/64 ARISTA160T0: properties: - common + - tor bgp: asn: 64160 peers: @@ -4055,16 +4219,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.164/32 - ipv6: 2064:100::a4/128 + ipv6: 2064:100:0:a4::/128 Ethernet1: ipv4: 10.0.1.71/31 ipv6: fc00::28e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.165/24 ipv6: fc0a::a5/64 ARISTA05T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -4074,16 +4239,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.165/32 - ipv6: 2064:100::a5/128 + ipv6: 2064:100:0:a5::/128 Ethernet1: ipv4: 10.0.1.73/31 ipv6: fc00::292/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.166/24 ipv6: fc0a::a6/64 ARISTA06T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -4093,16 +4259,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.166/32 - ipv6: 2064:100::a6/128 + ipv6: 2064:100:0:a6::/128 Ethernet1: ipv4: 10.0.1.75/31 ipv6: fc00::296/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.167/24 ipv6: fc0a::a7/64 ARISTA161T0: properties: - common + - tor bgp: asn: 64161 peers: @@ -4112,16 +4279,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.167/32 - ipv6: 2064:100::a7/128 + ipv6: 2064:100:0:a7::/128 Ethernet1: ipv4: 10.0.1.77/31 ipv6: fc00::29a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.168/24 ipv6: fc0a::a8/64 ARISTA162T0: properties: - common + - tor bgp: asn: 64162 peers: @@ -4131,16 +4299,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.168/32 - ipv6: 2064:100::a8/128 + ipv6: 2064:100:0:a8::/128 Ethernet1: ipv4: 10.0.1.79/31 ipv6: fc00::29e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.169/24 ipv6: fc0a::a9/64 ARISTA163T0: properties: - common + - tor bgp: asn: 64163 peers: @@ -4150,16 +4319,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.169/32 - ipv6: 2064:100::a9/128 + ipv6: 2064:100:0:a9::/128 Ethernet1: ipv4: 10.0.1.81/31 ipv6: fc00::2a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.170/24 ipv6: fc0a::aa/64 ARISTA164T0: properties: - common + - tor bgp: asn: 64164 peers: @@ -4169,16 +4339,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.170/32 - ipv6: 2064:100::aa/128 + ipv6: 2064:100:0:aa::/128 Ethernet1: ipv4: 10.0.1.83/31 ipv6: fc00::2a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.171/24 ipv6: fc0a::ab/64 ARISTA165T0: properties: - common + - tor bgp: asn: 64165 peers: @@ -4188,16 +4359,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.171/32 - ipv6: 2064:100::ab/128 + ipv6: 2064:100:0:ab::/128 Ethernet1: ipv4: 10.0.1.85/31 ipv6: fc00::2aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.172/24 ipv6: fc0a::ac/64 ARISTA166T0: properties: - common + - tor bgp: asn: 64166 peers: @@ -4207,16 +4379,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.172/32 - ipv6: 2064:100::ac/128 + ipv6: 2064:100:0:ac::/128 Ethernet1: ipv4: 10.0.1.87/31 ipv6: fc00::2ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.173/24 ipv6: fc0a::ad/64 ARISTA167T0: properties: - common + - tor bgp: asn: 64167 peers: @@ -4226,16 +4399,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.173/32 - ipv6: 2064:100::ad/128 + ipv6: 2064:100:0:ad::/128 Ethernet1: ipv4: 10.0.1.89/31 ipv6: fc00::2b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.174/24 ipv6: fc0a::ae/64 ARISTA168T0: properties: - common + - tor bgp: asn: 64168 peers: @@ -4245,16 +4419,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.174/32 - ipv6: 2064:100::ae/128 + ipv6: 2064:100:0:ae::/128 Ethernet1: ipv4: 10.0.1.91/31 ipv6: fc00::2b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.175/24 ipv6: fc0a::af/64 ARISTA07T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -4264,16 +4439,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.175/32 - ipv6: 2064:100::af/128 + ipv6: 2064:100:0:af::/128 Ethernet1: ipv4: 10.0.1.93/31 ipv6: fc00::2ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.176/24 ipv6: fc0a::b0/64 ARISTA08T2: properties: - common + - spine bgp: asn: 65200 peers: @@ -4283,16 +4459,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.176/32 - ipv6: 2064:100::b0/128 + ipv6: 2064:100:0:b0::/128 Ethernet1: ipv4: 10.0.1.95/31 ipv6: fc00::2be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.177/24 ipv6: fc0a::b1/64 ARISTA169T0: properties: - common + - tor bgp: asn: 64169 peers: @@ -4302,16 +4479,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.177/32 - ipv6: 2064:100::b1/128 + ipv6: 2064:100:0:b1::/128 Ethernet1: ipv4: 10.0.1.97/31 ipv6: fc00::2c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.178/24 ipv6: fc0a::b2/64 ARISTA170T0: properties: - common + - tor bgp: asn: 64170 peers: @@ -4321,16 +4499,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.178/32 - ipv6: 2064:100::b2/128 + ipv6: 2064:100:0:b2::/128 Ethernet1: ipv4: 10.0.1.99/31 ipv6: fc00::2c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.179/24 ipv6: fc0a::b3/64 ARISTA171T0: properties: - common + - tor bgp: asn: 64171 peers: @@ -4340,16 +4519,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.179/32 - ipv6: 2064:100::b3/128 + ipv6: 2064:100:0:b3::/128 Ethernet1: ipv4: 10.0.1.101/31 ipv6: fc00::2ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.180/24 ipv6: fc0a::b4/64 ARISTA172T0: properties: - common + - tor bgp: asn: 64172 peers: @@ -4359,16 +4539,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.180/32 - ipv6: 2064:100::b4/128 + ipv6: 2064:100:0:b4::/128 Ethernet1: ipv4: 10.0.1.103/31 ipv6: fc00::2ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.181/24 ipv6: fc0a::b5/64 ARISTA173T0: properties: - common + - tor bgp: asn: 64173 peers: @@ -4378,16 +4559,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.181/32 - ipv6: 2064:100::b5/128 + ipv6: 2064:100:0:b5::/128 Ethernet1: ipv4: 10.0.1.105/31 ipv6: fc00::2d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.182/24 ipv6: fc0a::b6/64 ARISTA174T0: properties: - common + - tor bgp: asn: 64174 peers: @@ -4397,16 +4579,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.182/32 - ipv6: 2064:100::b6/128 + ipv6: 2064:100:0:b6::/128 Ethernet1: ipv4: 10.0.1.107/31 ipv6: fc00::2d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.183/24 ipv6: fc0a::b7/64 ARISTA175T0: properties: - common + - tor bgp: asn: 64175 peers: @@ -4416,16 +4599,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.183/32 - ipv6: 2064:100::b7/128 + ipv6: 2064:100:0:b7::/128 Ethernet1: ipv4: 10.0.1.109/31 ipv6: fc00::2da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.184/24 ipv6: fc0a::b8/64 ARISTA176T0: properties: - common + - tor bgp: asn: 64176 peers: @@ -4435,16 +4619,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.184/32 - ipv6: 2064:100::b8/128 + ipv6: 2064:100:0:b8::/128 Ethernet1: ipv4: 10.0.1.111/31 ipv6: fc00::2de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.185/24 ipv6: fc0a::b9/64 ARISTA177T0: properties: - common + - tor bgp: asn: 64177 peers: @@ -4454,16 +4639,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.185/32 - ipv6: 2064:100::b9/128 + ipv6: 2064:100:0:b9::/128 Ethernet1: ipv4: 10.0.1.113/31 ipv6: fc00::2e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.186/24 ipv6: fc0a::ba/64 ARISTA178T0: properties: - common + - tor bgp: asn: 64178 peers: @@ -4473,16 +4659,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.186/32 - ipv6: 2064:100::ba/128 + ipv6: 2064:100:0:ba::/128 Ethernet1: ipv4: 10.0.1.115/31 ipv6: fc00::2e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.187/24 ipv6: fc0a::bb/64 ARISTA179T0: properties: - common + - tor bgp: asn: 64179 peers: @@ -4492,16 +4679,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.187/32 - ipv6: 2064:100::bb/128 + ipv6: 2064:100:0:bb::/128 Ethernet1: ipv4: 10.0.1.117/31 ipv6: fc00::2ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.188/24 ipv6: fc0a::bc/64 ARISTA180T0: properties: - common + - tor bgp: asn: 64180 peers: @@ -4511,16 +4699,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.188/32 - ipv6: 2064:100::bc/128 + ipv6: 2064:100:0:bc::/128 Ethernet1: ipv4: 10.0.1.119/31 ipv6: fc00::2ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.189/24 ipv6: fc0a::bd/64 ARISTA181T0: properties: - common + - tor bgp: asn: 64181 peers: @@ -4530,16 +4719,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.189/32 - ipv6: 2064:100::bd/128 + ipv6: 2064:100:0:bd::/128 Ethernet1: ipv4: 10.0.1.121/31 ipv6: fc00::2f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.190/24 ipv6: fc0a::be/64 ARISTA182T0: properties: - common + - tor bgp: asn: 64182 peers: @@ -4549,16 +4739,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.190/32 - ipv6: 2064:100::be/128 + ipv6: 2064:100:0:be::/128 Ethernet1: ipv4: 10.0.1.123/31 ipv6: fc00::2f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.191/24 ipv6: fc0a::bf/64 ARISTA183T0: properties: - common + - tor bgp: asn: 64183 peers: @@ -4568,16 +4759,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.191/32 - ipv6: 2064:100::bf/128 + ipv6: 2064:100:0:bf::/128 Ethernet1: ipv4: 10.0.1.125/31 ipv6: fc00::2fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.192/24 ipv6: fc0a::c0/64 ARISTA184T0: properties: - common + - tor bgp: asn: 64184 peers: @@ -4587,16 +4779,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.192/32 - ipv6: 2064:100::c0/128 + ipv6: 2064:100:0:c0::/128 Ethernet1: ipv4: 10.0.1.127/31 ipv6: fc00::2fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.193/24 ipv6: fc0a::c1/64 ARISTA185T0: properties: - common + - tor bgp: asn: 64185 peers: @@ -4606,16 +4799,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.193/32 - ipv6: 2064:100::c1/128 + ipv6: 2064:100:0:c1::/128 Ethernet1: ipv4: 10.0.1.129/31 ipv6: fc00::302/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.194/24 ipv6: fc0a::c2/64 ARISTA186T0: properties: - common + - tor bgp: asn: 64186 peers: @@ -4625,16 +4819,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.194/32 - ipv6: 2064:100::c2/128 + ipv6: 2064:100:0:c2::/128 Ethernet1: ipv4: 10.0.1.131/31 ipv6: fc00::306/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.195/24 ipv6: fc0a::c3/64 ARISTA187T0: properties: - common + - tor bgp: asn: 64187 peers: @@ -4644,16 +4839,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.195/32 - ipv6: 2064:100::c3/128 + ipv6: 2064:100:0:c3::/128 Ethernet1: ipv4: 10.0.1.133/31 ipv6: fc00::30a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.196/24 ipv6: fc0a::c4/64 ARISTA188T0: properties: - common + - tor bgp: asn: 64188 peers: @@ -4663,16 +4859,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.196/32 - ipv6: 2064:100::c4/128 + ipv6: 2064:100:0:c4::/128 Ethernet1: ipv4: 10.0.1.135/31 ipv6: fc00::30e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.197/24 ipv6: fc0a::c5/64 ARISTA189T0: properties: - common + - tor bgp: asn: 64189 peers: @@ -4682,16 +4879,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.197/32 - ipv6: 2064:100::c5/128 + ipv6: 2064:100:0:c5::/128 Ethernet1: ipv4: 10.0.1.137/31 ipv6: fc00::312/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.198/24 ipv6: fc0a::c6/64 ARISTA190T0: properties: - common + - tor bgp: asn: 64190 peers: @@ -4701,16 +4899,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.198/32 - ipv6: 2064:100::c6/128 + ipv6: 2064:100:0:c6::/128 Ethernet1: ipv4: 10.0.1.139/31 ipv6: fc00::316/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.199/24 ipv6: fc0a::c7/64 ARISTA191T0: properties: - common + - tor bgp: asn: 64191 peers: @@ -4720,16 +4919,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.199/32 - ipv6: 2064:100::c7/128 + ipv6: 2064:100:0:c7::/128 Ethernet1: ipv4: 10.0.1.141/31 ipv6: fc00::31a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.200/24 ipv6: fc0a::c8/64 ARISTA192T0: properties: - common + - tor bgp: asn: 64192 peers: @@ -4739,16 +4939,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.200/32 - ipv6: 2064:100::c8/128 + ipv6: 2064:100:0:c8::/128 Ethernet1: ipv4: 10.0.1.143/31 ipv6: fc00::31e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.201/24 ipv6: fc0a::c9/64 ARISTA193T0: properties: - common + - tor bgp: asn: 64193 peers: @@ -4758,16 +4959,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.201/32 - ipv6: 2064:100::c9/128 + ipv6: 2064:100:0:c9::/128 Ethernet1: ipv4: 10.0.1.145/31 ipv6: fc00::322/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.202/24 ipv6: fc0a::ca/64 ARISTA194T0: properties: - common + - tor bgp: asn: 64194 peers: @@ -4777,16 +4979,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.202/32 - ipv6: 2064:100::ca/128 + ipv6: 2064:100:0:ca::/128 Ethernet1: ipv4: 10.0.1.147/31 ipv6: fc00::326/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.203/24 ipv6: fc0a::cb/64 ARISTA195T0: properties: - common + - tor bgp: asn: 64195 peers: @@ -4796,16 +4999,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.203/32 - ipv6: 2064:100::cb/128 + ipv6: 2064:100:0:cb::/128 Ethernet1: ipv4: 10.0.1.149/31 ipv6: fc00::32a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.204/24 ipv6: fc0a::cc/64 ARISTA196T0: properties: - common + - tor bgp: asn: 64196 peers: @@ -4815,16 +5019,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.204/32 - ipv6: 2064:100::cc/128 + ipv6: 2064:100:0:cc::/128 Ethernet1: ipv4: 10.0.1.151/31 ipv6: fc00::32e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.205/24 ipv6: fc0a::cd/64 ARISTA197T0: properties: - common + - tor bgp: asn: 64197 peers: @@ -4834,16 +5039,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.205/32 - ipv6: 2064:100::cd/128 + ipv6: 2064:100:0:cd::/128 Ethernet1: ipv4: 10.0.1.153/31 ipv6: fc00::332/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.206/24 ipv6: fc0a::ce/64 ARISTA198T0: properties: - common + - tor bgp: asn: 64198 peers: @@ -4853,16 +5059,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.206/32 - ipv6: 2064:100::ce/128 + ipv6: 2064:100:0:ce::/128 Ethernet1: ipv4: 10.0.1.155/31 ipv6: fc00::336/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.207/24 ipv6: fc0a::cf/64 ARISTA199T0: properties: - common + - tor bgp: asn: 64199 peers: @@ -4872,16 +5079,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.207/32 - ipv6: 2064:100::cf/128 + ipv6: 2064:100:0:cf::/128 Ethernet1: ipv4: 10.0.1.157/31 ipv6: fc00::33a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.208/24 ipv6: fc0a::d0/64 ARISTA200T0: properties: - common + - tor bgp: asn: 64200 peers: @@ -4891,16 +5099,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.208/32 - ipv6: 2064:100::d0/128 + ipv6: 2064:100:0:d0::/128 Ethernet1: ipv4: 10.0.1.159/31 ipv6: fc00::33e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.209/24 ipv6: fc0a::d1/64 ARISTA201T0: properties: - common + - tor bgp: asn: 64201 peers: @@ -4910,16 +5119,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.209/32 - ipv6: 2064:100::d1/128 + ipv6: 2064:100:0:d1::/128 Ethernet1: ipv4: 10.0.1.161/31 ipv6: fc00::342/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.210/24 ipv6: fc0a::d2/64 ARISTA202T0: properties: - common + - tor bgp: asn: 64202 peers: @@ -4929,16 +5139,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.210/32 - ipv6: 2064:100::d2/128 + ipv6: 2064:100:0:d2::/128 Ethernet1: ipv4: 10.0.1.163/31 ipv6: fc00::346/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.211/24 ipv6: fc0a::d3/64 ARISTA203T0: properties: - common + - tor bgp: asn: 64203 peers: @@ -4948,16 +5159,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.211/32 - ipv6: 2064:100::d3/128 + ipv6: 2064:100:0:d3::/128 Ethernet1: ipv4: 10.0.1.165/31 ipv6: fc00::34a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.212/24 ipv6: fc0a::d4/64 ARISTA204T0: properties: - common + - tor bgp: asn: 64204 peers: @@ -4967,16 +5179,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.212/32 - ipv6: 2064:100::d4/128 + ipv6: 2064:100:0:d4::/128 Ethernet1: ipv4: 10.0.1.167/31 ipv6: fc00::34e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.213/24 ipv6: fc0a::d5/64 ARISTA205T0: properties: - common + - tor bgp: asn: 64205 peers: @@ -4986,16 +5199,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.213/32 - ipv6: 2064:100::d5/128 + ipv6: 2064:100:0:d5::/128 Ethernet1: ipv4: 10.0.1.169/31 ipv6: fc00::352/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.214/24 ipv6: fc0a::d6/64 ARISTA206T0: properties: - common + - tor bgp: asn: 64206 peers: @@ -5005,16 +5219,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.214/32 - ipv6: 2064:100::d6/128 + ipv6: 2064:100:0:d6::/128 Ethernet1: ipv4: 10.0.1.171/31 ipv6: fc00::356/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.215/24 ipv6: fc0a::d7/64 ARISTA207T0: properties: - common + - tor bgp: asn: 64207 peers: @@ -5024,16 +5239,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.215/32 - ipv6: 2064:100::d7/128 + ipv6: 2064:100:0:d7::/128 Ethernet1: ipv4: 10.0.1.173/31 ipv6: fc00::35a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.216/24 ipv6: fc0a::d8/64 ARISTA208T0: properties: - common + - tor bgp: asn: 64208 peers: @@ -5043,16 +5259,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.216/32 - ipv6: 2064:100::d8/128 + ipv6: 2064:100:0:d8::/128 Ethernet1: ipv4: 10.0.1.175/31 ipv6: fc00::35e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.217/24 ipv6: fc0a::d9/64 ARISTA209T0: properties: - common + - tor bgp: asn: 64209 peers: @@ -5062,16 +5279,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.217/32 - ipv6: 2064:100::d9/128 + ipv6: 2064:100:0:d9::/128 Ethernet1: ipv4: 10.0.1.177/31 ipv6: fc00::362/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.218/24 ipv6: fc0a::da/64 ARISTA210T0: properties: - common + - tor bgp: asn: 64210 peers: @@ -5081,16 +5299,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.218/32 - ipv6: 2064:100::da/128 + ipv6: 2064:100:0:da::/128 Ethernet1: ipv4: 10.0.1.179/31 ipv6: fc00::366/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.219/24 ipv6: fc0a::db/64 ARISTA211T0: properties: - common + - tor bgp: asn: 64211 peers: @@ -5100,16 +5319,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.219/32 - ipv6: 2064:100::db/128 + ipv6: 2064:100:0:db::/128 Ethernet1: ipv4: 10.0.1.181/31 ipv6: fc00::36a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.220/24 ipv6: fc0a::dc/64 ARISTA212T0: properties: - common + - tor bgp: asn: 64212 peers: @@ -5119,16 +5339,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.220/32 - ipv6: 2064:100::dc/128 + ipv6: 2064:100:0:dc::/128 Ethernet1: ipv4: 10.0.1.183/31 ipv6: fc00::36e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.221/24 ipv6: fc0a::dd/64 ARISTA213T0: properties: - common + - tor bgp: asn: 64213 peers: @@ -5138,16 +5359,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.221/32 - ipv6: 2064:100::dd/128 + ipv6: 2064:100:0:dd::/128 Ethernet1: ipv4: 10.0.1.185/31 ipv6: fc00::372/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.222/24 ipv6: fc0a::de/64 ARISTA214T0: properties: - common + - tor bgp: asn: 64214 peers: @@ -5157,16 +5379,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.222/32 - ipv6: 2064:100::de/128 + ipv6: 2064:100:0:de::/128 Ethernet1: ipv4: 10.0.1.187/31 ipv6: fc00::376/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.223/24 ipv6: fc0a::df/64 ARISTA215T0: properties: - common + - tor bgp: asn: 64215 peers: @@ -5176,16 +5399,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.223/32 - ipv6: 2064:100::df/128 + ipv6: 2064:100:0:df::/128 Ethernet1: ipv4: 10.0.1.189/31 ipv6: fc00::37a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.224/24 ipv6: fc0a::e0/64 ARISTA216T0: properties: - common + - tor bgp: asn: 64216 peers: @@ -5195,16 +5419,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.224/32 - ipv6: 2064:100::e0/128 + ipv6: 2064:100:0:e0::/128 Ethernet1: ipv4: 10.0.1.191/31 ipv6: fc00::37e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.225/24 ipv6: fc0a::e1/64 ARISTA217T0: properties: - common + - tor bgp: asn: 64217 peers: @@ -5214,16 +5439,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.225/32 - ipv6: 2064:100::e1/128 + ipv6: 2064:100:0:e1::/128 Ethernet1: ipv4: 10.0.1.193/31 ipv6: fc00::382/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.226/24 ipv6: fc0a::e2/64 ARISTA218T0: properties: - common + - tor bgp: asn: 64218 peers: @@ -5233,16 +5459,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.226/32 - ipv6: 2064:100::e2/128 + ipv6: 2064:100:0:e2::/128 Ethernet1: ipv4: 10.0.1.195/31 ipv6: fc00::386/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.227/24 ipv6: fc0a::e3/64 ARISTA219T0: properties: - common + - tor bgp: asn: 64219 peers: @@ -5252,16 +5479,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.227/32 - ipv6: 2064:100::e3/128 + ipv6: 2064:100:0:e3::/128 Ethernet1: ipv4: 10.0.1.197/31 ipv6: fc00::38a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.228/24 ipv6: fc0a::e4/64 ARISTA220T0: properties: - common + - tor bgp: asn: 64220 peers: @@ -5271,16 +5499,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.228/32 - ipv6: 2064:100::e4/128 + ipv6: 2064:100:0:e4::/128 Ethernet1: ipv4: 10.0.1.199/31 ipv6: fc00::38e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.229/24 ipv6: fc0a::e5/64 ARISTA221T0: properties: - common + - tor bgp: asn: 64221 peers: @@ -5290,16 +5519,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.229/32 - ipv6: 2064:100::e5/128 + ipv6: 2064:100:0:e5::/128 Ethernet1: ipv4: 10.0.1.201/31 ipv6: fc00::392/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.230/24 ipv6: fc0a::e6/64 ARISTA222T0: properties: - common + - tor bgp: asn: 64222 peers: @@ -5309,16 +5539,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.230/32 - ipv6: 2064:100::e6/128 + ipv6: 2064:100:0:e6::/128 Ethernet1: ipv4: 10.0.1.203/31 ipv6: fc00::396/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.231/24 ipv6: fc0a::e7/64 ARISTA223T0: properties: - common + - tor bgp: asn: 64223 peers: @@ -5328,16 +5559,17 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.231/32 - ipv6: 2064:100::e7/128 + ipv6: 2064:100:0:e7::/128 Ethernet1: ipv4: 10.0.1.205/31 ipv6: fc00::39a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.232/24 ipv6: fc0a::e8/64 ARISTA224T0: properties: - common + - tor bgp: asn: 64224 peers: @@ -5347,10 +5579,10 @@ configuration: interfaces: Loopback0: ipv4: 100.1.0.232/32 - ipv6: 2064:100::e8/128 + ipv6: 2064:100:0:e8::/128 Ethernet1: ipv4: 10.0.1.207/31 ipv6: fc00::39e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.233/24 ipv6: fc0a::e9/64 From 1f31f953524006216319b090b73496db09128d9b Mon Sep 17 00:00:00 2001 From: Chuan Wu <103085864+echuawu@users.noreply.github.com> Date: Thu, 14 Nov 2024 14:51:21 +0800 Subject: [PATCH 095/175] Update fanout deploy yaml file to support using 2024 image (#14389) Update fanout deploy yaml file to support using 2024 image in fanout switch Change-Id: Id21693e0cfa8d317669e6b2ab7428b208aeaea1f --- ansible/roles/fanout/tasks/fanout_sonic.yml | 8 ++++++++ ansible/roles/fanout/tasks/sonic/fanout_sonic_202405.yml | 1 + 2 files changed, 9 insertions(+) create mode 120000 ansible/roles/fanout/tasks/sonic/fanout_sonic_202405.yml diff --git a/ansible/roles/fanout/tasks/fanout_sonic.yml b/ansible/roles/fanout/tasks/fanout_sonic.yml index cf0a1e161fd..74f07d4a780 100644 --- a/ansible/roles/fanout/tasks/fanout_sonic.yml +++ b/ansible/roles/fanout/tasks/fanout_sonic.yml @@ -64,3 +64,11 @@ sonic/fanout_sonic_202311.yml when: dry_run is not defined and incremental is not defined when: "'2023' in fanout_sonic_version['build_version']" + +- name: deploy SONiC fanout with image version 202405 + block: + - name: deploy SONiC fanout not incremental and not dry_run + include_tasks: + sonic/fanout_sonic_202405.yml + when: dry_run is not defined and incremental is not defined + when: "'2024' in fanout_sonic_version['build_version']" diff --git a/ansible/roles/fanout/tasks/sonic/fanout_sonic_202405.yml b/ansible/roles/fanout/tasks/sonic/fanout_sonic_202405.yml new file mode 120000 index 00000000000..2f92d838450 --- /dev/null +++ b/ansible/roles/fanout/tasks/sonic/fanout_sonic_202405.yml @@ -0,0 +1 @@ +fanout_sonic_202311.yml \ No newline at end of file From 4efbf3ef0da138beee72af144e712c6188befdee Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:03:07 +0800 Subject: [PATCH 096/175] Move get_bgp_speaker_runningconfig to common (#15528) What is the motivation for this PR? In script test_bgp_dual_asn.py, there is an import from the folder tests/generic_config_updater. To minimize cross-module dependencies, we have refactored this function to a common location. How did you do it? How did you verify/test it? --- tests/bgp/test_bgp_dual_asn.py | 2 +- tests/common/gu_utils.py | 22 +++++++++++++++++++ .../test_bgp_speaker.py | 22 +------------------ 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/tests/bgp/test_bgp_dual_asn.py b/tests/bgp/test_bgp_dual_asn.py index 9ad279d95f2..c5f6fadd4cf 100644 --- a/tests/bgp/test_bgp_dual_asn.py +++ b/tests/bgp/test_bgp_dual_asn.py @@ -12,7 +12,7 @@ from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert from bgp_helpers import update_routes -from tests.generic_config_updater.test_bgp_speaker import get_bgp_speaker_runningconfig +from tests.common.gu_utils import get_bgp_speaker_runningconfig from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile from tests.common.gu_utils import ( diff --git a/tests/common/gu_utils.py b/tests/common/gu_utils.py index e62ece315cf..1d6648e40ac 100644 --- a/tests/common/gu_utils.py +++ b/tests/common/gu_utils.py @@ -3,6 +3,7 @@ import pytest import os import time +import re from jsonpointer import JsonPointer from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until @@ -477,3 +478,24 @@ def expect_acl_rule_removed(duthost, rulename, setup): removed = len(output) == 0 pytest_assert(removed, "'{}' showed a rule, this following rule should have been removed".format(cmds)) + + +def get_bgp_speaker_runningconfig(duthost): + """ Get bgp speaker config that contains src_address and ip_range + + Sample output in t0: + ['\n neighbor BGPSLBPassive update-source 10.1.0.32', + '\n neighbor BGPVac update-source 10.1.0.32', + '\n bgp listen range 10.255.0.0/25 peer-group BGPSLBPassive', + '\n bgp listen range 192.168.0.0/21 peer-group BGPVac'] + """ + cmds = "show runningconfiguration bgp" + output = duthost.shell(cmds) + pytest_assert(not output['rc'], "'{}' failed with rc={}".format(cmds, output['rc'])) + + # Sample: + # neighbor BGPSLBPassive update-source 10.1.0.32 + # bgp listen range 192.168.0.0/21 peer-group BGPVac + bgp_speaker_pattern = r"\s+neighbor.*update-source.*|\s+bgp listen range.*" + bgp_speaker_config = re.findall(bgp_speaker_pattern, output['stdout']) + return bgp_speaker_config diff --git a/tests/generic_config_updater/test_bgp_speaker.py b/tests/generic_config_updater/test_bgp_speaker.py index 6dcdeda193d..79b8f7a762f 100644 --- a/tests/generic_config_updater/test_bgp_speaker.py +++ b/tests/generic_config_updater/test_bgp_speaker.py @@ -7,6 +7,7 @@ from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload +from tests.common.gu_utils import get_bgp_speaker_runningconfig pytestmark = [ pytest.mark.topology('t0'), # BGP Speaker is limited to t0 only @@ -56,27 +57,6 @@ def lo_intf_ips(rand_selected_dut, tbinfo): pytest_assert(True, "Required ipv4 and ipv6 to start the test") -def get_bgp_speaker_runningconfig(duthost): - """ Get bgp speaker config that contains src_address and ip_range - - Sample output in t0: - ['\n neighbor BGPSLBPassive update-source 10.1.0.32', - '\n neighbor BGPVac update-source 10.1.0.32', - '\n bgp listen range 10.255.0.0/25 peer-group BGPSLBPassive', - '\n bgp listen range 192.168.0.0/21 peer-group BGPVac'] - """ - cmds = "show runningconfiguration bgp" - output = duthost.shell(cmds) - pytest_assert(not output['rc'], "'{}' failed with rc={}".format(cmds, output['rc'])) - - # Sample: - # neighbor BGPSLBPassive update-source 10.1.0.32 - # bgp listen range 192.168.0.0/21 peer-group BGPVac - bgp_speaker_pattern = r"\s+neighbor.*update-source.*|\s+bgp listen range.*" - bgp_speaker_config = re.findall(bgp_speaker_pattern, output['stdout']) - return bgp_speaker_config - - @pytest.fixture(autouse=True) def setup_env(duthosts, rand_one_dut_hostname): """ From 0c934e3ab781f35dfbe9f1e0e36e7df38f6da514 Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Thu, 14 Nov 2024 13:14:44 -0800 Subject: [PATCH 097/175] Add scope into JSON patch of existing GCU testcases for Multi ASIC. (#14098) ### Description of PR Summary: Improve existing GCU test cases for multi asic. ### Approach #### What is the motivation for this PR? In multi asic, by default, there is no namespace when we do replace or remove operation, which will fail due to the path is incomplete. #### How did you do it? When testcase is running, the apply-patch wrapper in test code will inject the localhost namespace into payload. --- tests/bgp/test_bgp_bbr.py | 3 ++ tests/bgp/test_bgp_bbr_default_state.py | 3 ++ tests/bgp/test_bgp_dual_asn.py | 3 ++ tests/common/gu_utils.py | 32 +++++++++++++++++++ tests/generic_config_updater/gu_utils.py | 2 ++ tests/generic_config_updater/test_aaa.py | 14 ++++++++ .../generic_config_updater/test_bgp_prefix.py | 5 +++ .../test_bgp_sentinel.py | 5 +++ .../test_bgp_speaker.py | 5 +++ tests/generic_config_updater/test_bgpl.py | 6 ++++ tests/generic_config_updater/test_cacl.py | 15 +++++++++ .../generic_config_updater/test_dhcp_relay.py | 5 +++ .../test_ecn_config_update.py | 2 ++ .../test_eth_interface.py | 11 +++++++ .../test_incremental_qos.py | 2 ++ tests/generic_config_updater/test_ip_bgp.py | 6 ++++ .../test_kubernetes_config.py | 2 ++ .../test_lo_interface.py | 8 +++++ .../test_mgmt_interface.py | 2 ++ ...est_mmu_dynamic_threshold_config_update.py | 2 ++ .../test_monitor_config.py | 2 ++ tests/generic_config_updater/test_ntp.py | 7 ++++ .../test_pfcwd_interval.py | 2 ++ .../test_pfcwd_status.py | 3 ++ .../test_pg_headroom_update.py | 2 ++ .../test_portchannel_interface.py | 6 ++++ tests/generic_config_updater/test_syslog.py | 6 ++++ .../test_vlan_interface.py | 7 ++++ 28 files changed, 168 insertions(+) diff --git a/tests/bgp/test_bgp_bbr.py b/tests/bgp/test_bgp_bbr.py index 281968d5648..a71beb2dbe1 100644 --- a/tests/bgp/test_bgp_bbr.py +++ b/tests/bgp/test_bgp_bbr.py @@ -22,6 +22,7 @@ from tests.common.utilities import wait_until, delete_running_config from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic pytestmark = [ @@ -75,6 +76,7 @@ def add_bbr_config_to_running_config(duthost, status): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -97,6 +99,7 @@ def config_bbr_by_gcu(duthost, status): "value": "{}".format(status) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/bgp/test_bgp_bbr_default_state.py b/tests/bgp/test_bgp_bbr_default_state.py index bf6019c671b..5e8e8b0181c 100644 --- a/tests/bgp/test_bgp_bbr_default_state.py +++ b/tests/bgp/test_bgp_bbr_default_state.py @@ -11,6 +11,7 @@ from tests.common.utilities import delete_running_config from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.config_reload import config_reload @@ -54,6 +55,7 @@ def add_bbr_config_to_running_config(duthost, status): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) try: @@ -73,6 +75,7 @@ def config_bbr_by_gcu(duthost, status): "value": "{}".format(status) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) try: diff --git a/tests/bgp/test_bgp_dual_asn.py b/tests/bgp/test_bgp_dual_asn.py index c5f6fadd4cf..45b1e1b5a00 100644 --- a/tests/bgp/test_bgp_dual_asn.py +++ b/tests/bgp/test_bgp_dual_asn.py @@ -15,6 +15,7 @@ from tests.common.gu_utils import get_bgp_speaker_runningconfig from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import ( create_checkpoint, delete_checkpoint, @@ -367,6 +368,7 @@ def bgp_peer_range_add_config( } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -402,6 +404,7 @@ def bgp_peer_range_delete_config( {"op": "remove", "path": "/BGP_PEER_RANGE/{}".format(ip_range_name)}, {"op": "remove", "path": "/BGP_PEER_RANGE/{}".format(ipv6_range_name)}, ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/common/gu_utils.py b/tests/common/gu_utils.py index 1d6648e40ac..07435568d5b 100644 --- a/tests/common/gu_utils.py +++ b/tests/common/gu_utils.py @@ -20,6 +20,8 @@ BASE_DIR = os.path.dirname(os.path.realpath(__file__)) FILES_DIR = os.path.join(BASE_DIR, "files") TMP_DIR = '/tmp' +HOST_NAME = "/localhost" +ASIC_PREFIX = "/asic" def generate_tmpfile(duthost): @@ -34,6 +36,36 @@ def delete_tmpfile(duthost, tmpfile): duthost.file(path=tmpfile, state='absent') +def format_json_patch_for_multiasic(duthost, json_data, is_asic_specific=False): + if is_asic_specific: + return json_data + + json_patch = [] + if duthost.is_multi_asic: + num_asic = duthost.facts.get('num_asic') + + for operation in json_data: + path = operation["path"] + if path.startswith(HOST_NAME) and ASIC_PREFIX in path: + json_patch.append(operation) + else: + template = { + "op": operation["op"], + "path": "{}{}".format(HOST_NAME, path) + } + + if operation["op"] in ["add", "replace", "test"]: + template["value"] = operation["value"] + json_patch.append(template.copy()) + for asic_index in range(num_asic): + asic_ns = "{}{}".format(ASIC_PREFIX, asic_index) + template["path"] = "{}{}".format(asic_ns, path) + json_patch.append(template.copy()) + json_data = json_patch + + return json_data + + def apply_patch(duthost, json_data, dest_file): """Run apply-patch on target duthost diff --git a/tests/generic_config_updater/gu_utils.py b/tests/generic_config_updater/gu_utils.py index 6032b26145f..6203adaaaf6 100644 --- a/tests/generic_config_updater/gu_utils.py +++ b/tests/generic_config_updater/gu_utils.py @@ -2,6 +2,7 @@ import logging import json from tests.common.gu_utils import apply_patch, generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic BASE_DIR = os.path.dirname(os.path.realpath(__file__)) @@ -39,6 +40,7 @@ def load_and_apply_json_patch(duthost, file_name, setup): with open(os.path.join(TEMPLATES_DIR, file_name)) as file: json_patch = json.load(file) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) duts_to_apply = [duthost] outputs = [] if setup["is_dualtor"]: diff --git a/tests/generic_config_updater/test_aaa.py b/tests/generic_config_updater/test_aaa.py index 52bdaeffa57..802895ab1ea 100644 --- a/tests/generic_config_updater/test_aaa.py +++ b/tests/generic_config_updater/test_aaa.py @@ -5,6 +5,7 @@ from tests.common.fixtures.tacacs import get_aaa_sub_options_value from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload pytestmark = [ @@ -168,6 +169,7 @@ def aaa_tc1_add_config(duthost): "value": aaa_config } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -206,6 +208,7 @@ def aaa_tc1_replace(duthost): "value": "tacacs+" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -243,6 +246,7 @@ def aaa_tc1_add_duplicate(duthost): "value": "tacacs+" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -269,6 +273,7 @@ def aaa_tc1_remove(duthost): "path": "/AAA" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -313,6 +318,7 @@ def tacacs_global_tc2_add_config(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -352,6 +358,7 @@ def tacacs_global_tc2_invalid_input(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -375,6 +382,7 @@ def tacacs_global_tc2_duplicate_input(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -400,6 +408,7 @@ def tacacs_global_tc2_remove(duthost): "path": "/TACPLUS" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -443,6 +452,7 @@ def tacacs_server_tc3_add_init(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -481,6 +491,7 @@ def tacacs_server_tc3_add_max(duthost): } json_patch.append(patch) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -521,6 +532,7 @@ def tacacs_server_tc3_replace_invalid(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -542,6 +554,7 @@ def tacacs_server_tc3_add_duplicate(duthost): "value": TACACS_SERVER_OPTION } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -566,6 +579,7 @@ def tacacs_server_tc3_remove(duthost): "path": "/TACPLUS_SERVER" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_bgp_prefix.py b/tests/generic_config_updater/test_bgp_prefix.py index 3f40de54ed9..84f26560239 100644 --- a/tests/generic_config_updater/test_bgp_prefix.py +++ b/tests/generic_config_updater/test_bgp_prefix.py @@ -5,6 +5,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_failure, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload pytestmark = [ @@ -115,6 +116,7 @@ def bgp_prefix_tc1_add_config(duthost, community, community_table): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -155,6 +157,7 @@ def bgp_prefix_tc1_xfail(duthost, community_table): "value": prefixes_v4 } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -182,6 +185,7 @@ def bgp_prefix_tc1_replace(duthost, community, community_table): "value": PREFIXES_V4_DUMMY } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -215,6 +219,7 @@ def bgp_prefix_tc1_remove(duthost, community): "path": "/BGP_ALLOWED_PREFIXES" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_bgp_sentinel.py b/tests/generic_config_updater/test_bgp_sentinel.py index be35f1a13b4..bfcb14852c7 100644 --- a/tests/generic_config_updater/test_bgp_sentinel.py +++ b/tests/generic_config_updater/test_bgp_sentinel.py @@ -6,6 +6,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload @@ -129,6 +130,7 @@ def bgp_sentinel_tc1_add_config(duthost, lo_intf_ips): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -168,6 +170,7 @@ def bgp_sentinel_tc1_add_dummy_ip_range(duthost): "value": "{}".format(DUMMY_IP_RANGE_V6) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -201,6 +204,7 @@ def bgp_sentinel_tc1_rm_dummy_ip_range(duthost): "path": "/BGP_SENTINELS/{}/ip_range/1".format(BGPSENTINEL_V6) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -235,6 +239,7 @@ def bgp_sentinel_tc1_replace_src_address(duthost): "value": "{}".format(DUMMY_SRC_ADDRESS_V6) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_bgp_speaker.py b/tests/generic_config_updater/test_bgp_speaker.py index 79b8f7a762f..853e17d3e11 100644 --- a/tests/generic_config_updater/test_bgp_speaker.py +++ b/tests/generic_config_updater/test_bgp_speaker.py @@ -6,6 +6,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import get_bgp_speaker_runningconfig @@ -124,6 +125,7 @@ def bgp_speaker_tc1_add_config(duthost, lo_intf_ips, vlan_intf_ip_ranges): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -163,6 +165,7 @@ def bgp_speaker_tc1_add_dummy_ip_range(duthost): "value": "{}".format(DUMMY_IP_RANGE_V6) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -195,6 +198,7 @@ def bgp_speaker_tc1_rm_dummy_ip_range(duthost): "path": "/BGP_PEER_RANGE/{}/ip_range/1".format(BGPSPEAKER_V6) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -229,6 +233,7 @@ def bgp_speaker_tc1_replace_src_address(duthost): "value": "{}".format(DUMMY_SRC_ADDRESS_V6) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_bgpl.py b/tests/generic_config_updater/test_bgpl.py index b42ad26e662..3d8e164bcd8 100644 --- a/tests/generic_config_updater/test_bgpl.py +++ b/tests/generic_config_updater/test_bgpl.py @@ -7,6 +7,7 @@ from tests.common.helpers.generators import generate_ip_through_default_route from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload pytestmark = [ @@ -113,6 +114,7 @@ def bgpmon_tc1_add_init(duthost, bgpmon_setup_info): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -146,6 +148,7 @@ def bgpmon_tc1_add_duplicate(duthost, bgpmon_setup_info): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -170,6 +173,7 @@ def bgpmon_tc1_admin_change(duthost, bgpmon_setup_info): "value": "down" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -212,6 +216,7 @@ def bgpmon_tc1_ip_change(duthost, bgpmon_setup_info): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -234,6 +239,7 @@ def bgpmon_tc1_remove(duthost): "path": "/BGP_MONITORS" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_cacl.py b/tests/generic_config_updater/test_cacl.py index 6c4e3ec968d..f62953d576e 100644 --- a/tests/generic_config_updater/test_cacl.py +++ b/tests/generic_config_updater/test_cacl.py @@ -6,6 +6,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_res_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.utilities import wait_until @@ -165,6 +166,7 @@ def cacl_tc1_add_new_table(duthost, protocol): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -200,6 +202,7 @@ def cacl_tc1_add_duplicate_table(duthost, protocol): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -259,6 +262,7 @@ def cacl_tc1_replace_table_variable(duthost, protocol): } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -305,6 +309,7 @@ def cacl_tc1_add_invalid_table(duthost, protocol): tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) @@ -322,6 +327,7 @@ def cacl_tc1_remove_unexisted_table(duthost): "path": "/ACL_RULE/SSH_ONLY_UNEXISTED" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -345,6 +351,7 @@ def cacl_tc1_remove_table(duthost, protocol): "path": "/ACL_TABLE/{}".format(table_name) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -406,6 +413,7 @@ def cacl_tc2_add_init_rule(duthost, protocol): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -463,6 +471,7 @@ def cacl_tc2_add_duplicate_rule(duthost, protocol): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -502,6 +511,7 @@ def cacl_tc2_replace_rule(duthost, protocol): "value": "8.8.8.8/32" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -544,6 +554,7 @@ def cacl_tc2_add_rule_to_unexisted_table(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -572,6 +583,7 @@ def cacl_tc2_remove_table_before_rule(duthost, protocol): "path": "/ACL_TABLE/{}".format(table) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -600,6 +612,7 @@ def cacl_tc2_remove_unexist_rule(duthost, protocol): "path": "/ACL_RULE/{}|TEST_DROP2".format(table) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) try: @@ -618,6 +631,7 @@ def cacl_tc2_remove_rule(duthost): "path": "/ACL_RULE" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -658,6 +672,7 @@ def cacl_external_client_add_new_table(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_dhcp_relay.py b/tests/generic_config_updater/test_dhcp_relay.py index 9dca17b1cb4..32d0fd5f2e9 100644 --- a/tests/generic_config_updater/test_dhcp_relay.py +++ b/tests/generic_config_updater/test_dhcp_relay.py @@ -7,6 +7,7 @@ utils_vlan_intfs_dict_add, utils_create_test_vlans # noqa F401 from tests.common.gu_utils import apply_patch, expect_op_success, expect_res_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload, rollback pytestmark = [ @@ -257,6 +258,7 @@ def test_dhcp_relay_tc1_rm_nonexist(rand_selected_dut, vlan_intfs_list): "op": "remove", "path": "/VLAN/Vlan" + str(vlan_intfs_list[0]) + "/dhcp_servers/5" }] + dhcp_rm_nonexist_json = format_json_patch_for_multiasic(duthost=rand_selected_dut, json_data=dhcp_rm_nonexist_json) tmpfile = generate_tmpfile(rand_selected_dut) logger.info("tmpfile {}".format(tmpfile)) @@ -277,6 +279,7 @@ def test_dhcp_relay_tc2_add_exist(rand_selected_dut, vlan_intfs_list): "path": "/VLAN/Vlan" + str(vlan_intfs_list[0]) + "/dhcp_servers/0", "value": "192.0." + str(vlan_intfs_list[0]) + ".1" }] + dhcp_add_exist_json = format_json_patch_for_multiasic(duthost=rand_selected_dut, json_data=dhcp_add_exist_json) tmpfile = generate_tmpfile(rand_selected_dut) logger.info("tmpfile {}".format(tmpfile)) @@ -316,6 +319,7 @@ def test_dhcp_relay_tc3_add_and_rm(rand_selected_dut, vlan_intfs_list): "path": "/VLAN/Vlan" + str(vlan_intfs_list[0]) + "/dhcp_servers/4", "value": "192.0." + str(vlan_intfs_list[0]) + ".5" }] + dhcp_add_rm_json = format_json_patch_for_multiasic(duthost=rand_selected_dut, json_data=dhcp_add_rm_json) tmpfile = generate_tmpfile(rand_selected_dut) logger.info("tmpfile {}".format(tmpfile)) @@ -360,6 +364,7 @@ def test_dhcp_relay_tc4_replace(rand_selected_dut, vlan_intfs_list): "path": "/VLAN/Vlan" + str(vlan_intfs_list[0]) + "/dhcp_servers/0", "value": "192.0." + str(vlan_intfs_list[0]) + ".8" }] + dhcp_replace_json = format_json_patch_for_multiasic(duthost=rand_selected_dut, json_data=dhcp_replace_json) tmpfile = generate_tmpfile(rand_selected_dut) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_ecn_config_update.py b/tests/generic_config_updater/test_ecn_config_update.py index a3459253981..bdb2dbb56d5 100644 --- a/tests/generic_config_updater/test_ecn_config_update.py +++ b/tests/generic_config_updater/test_ecn_config_update.py @@ -7,6 +7,7 @@ from tests.common.helpers.dut_utils import verify_orchagent_running_or_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import is_valid_platform_and_version @@ -107,6 +108,7 @@ def test_ecn_config_updates(duthost, ensure_dut_readiness, configdb_field, opera "path": "/WRED_PROFILE/AZURE_LOSSLESS/{}".format(field), "value": "{}".format(value)}) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) if is_valid_platform_and_version(duthost, "WRED_PROFILE", "ECN tuning", operation): diff --git a/tests/generic_config_updater/test_eth_interface.py b/tests/generic_config_updater/test_eth_interface.py index 7d63aaf8a95..c8a6a5a1525 100644 --- a/tests/generic_config_updater/test_eth_interface.py +++ b/tests/generic_config_updater/test_eth_interface.py @@ -7,6 +7,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.utilities import wait_until @@ -146,6 +147,7 @@ def test_remove_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness): "path": "/PORT/Ethernet0/lanes" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -173,6 +175,7 @@ def test_replace_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness): "value": "{}".format(update_lanes) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -198,6 +201,7 @@ def test_replace_mtu(duthosts, rand_one_dut_hostname, ensure_dut_readiness): "value": "{}".format(target_mtu) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -222,6 +226,7 @@ def test_toggle_pfc_asym(duthosts, rand_one_dut_hostname, ensure_dut_readiness, "value": "{}".format(pfc_asym) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -247,6 +252,7 @@ def test_replace_fec(duthosts, rand_one_dut_hostname, ensure_dut_readiness, fec) "value": "{}".format(fec) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -277,6 +283,7 @@ def test_update_invalid_index(duthosts, rand_one_dut_hostname, ensure_dut_readin "value": "abc1" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -315,6 +322,7 @@ def test_update_valid_index(duthosts, rand_one_dut_hostname, ensure_dut_readines "value": "{}".format(list(interfaces.values())[0]) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -337,6 +345,7 @@ def test_update_speed(duthosts, rand_one_dut_hostname, ensure_dut_readiness): "value": "{}".format(speed) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -364,6 +373,7 @@ def test_update_description(duthosts, rand_one_dut_hostname, ensure_dut_readines "value": "Updated description" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -385,6 +395,7 @@ def test_eth_interface_admin_change(duthosts, rand_one_dut_hostname, admin_statu "value": "{}".format(admin_status) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_incremental_qos.py b/tests/generic_config_updater/test_incremental_qos.py index 7856320fe53..0384793e005 100644 --- a/tests/generic_config_updater/test_incremental_qos.py +++ b/tests/generic_config_updater/test_incremental_qos.py @@ -9,6 +9,7 @@ from tests.common.gu_utils import apply_patch, expect_op_success, \ expect_op_failure # noqa F401 from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import is_valid_platform_and_version from tests.common.mellanox_data import is_mellanox_device @@ -236,6 +237,7 @@ def test_incremental_qos_config_updates(duthost, tbinfo, ensure_dut_readiness, c "path": "/BUFFER_POOL/{}".format(configdb_field), "value": "{}".format(value) }] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) diff --git a/tests/generic_config_updater/test_ip_bgp.py b/tests/generic_config_updater/test_ip_bgp.py index 70b9f318b67..9e3ec8b1c44 100644 --- a/tests/generic_config_updater/test_ip_bgp.py +++ b/tests/generic_config_updater/test_ip_bgp.py @@ -6,6 +6,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload logger = logging.getLogger(__name__) @@ -76,6 +77,7 @@ def add_deleted_ip_neighbor(duthost, ip_version=6): "value": ip_neighbor_config } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -100,6 +102,7 @@ def add_duplicate_ip_neighbor(duthost, ip_version=6): "value": ip_neighbor_config } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -132,6 +135,7 @@ def invalid_ip_neighbor(duthost, ip_version=6): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -157,6 +161,7 @@ def ip_neighbor_admin_change(duthost, ip_version=6): "value": "down" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -183,6 +188,7 @@ def delete_ip_neighbor(duthost, ip_version=6): "path": "/BGP_NEIGHBOR/{}".format(ip_neighbor_address) } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_kubernetes_config.py b/tests/generic_config_updater/test_kubernetes_config.py index 51d4234141d..a36dfeba5ab 100644 --- a/tests/generic_config_updater/test_kubernetes_config.py +++ b/tests/generic_config_updater/test_kubernetes_config.py @@ -5,6 +5,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload @@ -265,6 +266,7 @@ def k8s_config_update(duthost, test_data): for num, (json_patch, target_config, target_table, expected_result) in enumerate(test_data): tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) diff --git a/tests/generic_config_updater/test_lo_interface.py b/tests/generic_config_updater/test_lo_interface.py index 2b04831e87f..04e56711132 100644 --- a/tests/generic_config_updater/test_lo_interface.py +++ b/tests/generic_config_updater/test_lo_interface.py @@ -5,6 +5,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import create_path, check_show_ip_intf, check_vrf_route_for_intf @@ -111,6 +112,7 @@ def lo_interface_tc1_add_init(duthost, lo_intf): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -156,6 +158,7 @@ def lo_interface_tc1_add_duplicate(duthost, lo_intf): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -205,6 +208,7 @@ def lo_interface_tc1_xfail(duthost, lo_intf): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -258,6 +262,7 @@ def lo_interface_tc1_replace(duthost, lo_intf): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -283,6 +288,7 @@ def lo_interface_tc1_remove(duthost, lo_intf): "path": "/LOOPBACK_INTERFACE" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -325,6 +331,7 @@ def setup_vrf_config(duthost, lo_intf): "value": "Vrf_01" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -377,6 +384,7 @@ def test_lo_interface_tc2_vrf_change(rand_selected_dut, lo_intf): "value": "Vrf_02" } ] + json_patch = format_json_patch_for_multiasic(duthost=rand_selected_dut, json_data=json_patch) tmpfile = generate_tmpfile(rand_selected_dut) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_mgmt_interface.py b/tests/generic_config_updater/test_mgmt_interface.py index e5d9a220a55..cc31f4127b0 100644 --- a/tests/generic_config_updater/test_mgmt_interface.py +++ b/tests/generic_config_updater/test_mgmt_interface.py @@ -5,6 +5,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, create_path from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.utilities import wait_for_file_changed, FORCED_MGMT_ROUTE_PRIORITY @@ -56,6 +57,7 @@ def update_forced_mgmt_route(duthost, interface_address, interface_key, routes): else: json_patch[0]["op"] = "add" + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) diff --git a/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py b/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py index 0d80da1ed65..d9d38397f6a 100644 --- a/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py +++ b/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py @@ -7,6 +7,7 @@ from tests.common.helpers.dut_utils import verify_orchagent_running_or_assert from tests.common.gu_utils import apply_patch, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload pytestmark = [ @@ -122,6 +123,7 @@ def test_dynamic_th_config_updates(duthost, ensure_dut_readiness, operation, ski } json_patch.append(individual_patch) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {} created for json patch of updating dynamic threshold and operation: {}" .format(tmpfile, operation)) diff --git a/tests/generic_config_updater/test_monitor_config.py b/tests/generic_config_updater/test_monitor_config.py index 860a5676558..a184fe52d70 100644 --- a/tests/generic_config_updater/test_monitor_config.py +++ b/tests/generic_config_updater/test_monitor_config.py @@ -4,6 +4,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_res_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback, rollback_or_reload pytestmark = [ @@ -194,6 +195,7 @@ def monitor_config_add_config(duthost, get_valid_acl_ports): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_ntp.py b/tests/generic_config_updater/test_ntp.py index c54ef5a699e..9f8771ec35a 100644 --- a/tests/generic_config_updater/test_ntp.py +++ b/tests/generic_config_updater/test_ntp.py @@ -6,6 +6,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_failure, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.utilities import wait_until @@ -115,6 +116,7 @@ def ntp_server_tc1_add_config(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) json_patch_bc = [ { @@ -125,6 +127,7 @@ def ntp_server_tc1_add_config(duthost): } } ] + json_patch_bc = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch_bc) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -169,6 +172,7 @@ def ntp_server_tc1_xfail(duthost): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -199,6 +203,7 @@ def ntp_server_tc1_replace(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) json_patch_bc = [ { @@ -211,6 +216,7 @@ def ntp_server_tc1_replace(duthost): "value": {} } ] + json_patch_bc = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch_bc) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -245,6 +251,7 @@ def ntp_server_tc1_remove(duthost): "path": "/NTP_SERVER" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_pfcwd_interval.py b/tests/generic_config_updater/test_pfcwd_interval.py index fd6d8d16ec6..0a7e095aaef 100644 --- a/tests/generic_config_updater/test_pfcwd_interval.py +++ b/tests/generic_config_updater/test_pfcwd_interval.py @@ -6,6 +6,7 @@ from tests.common.utilities import wait_until from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import is_valid_platform_and_version @@ -164,6 +165,7 @@ def test_pfcwd_interval_config_updates(duthost, ensure_dut_readiness, oper, "path": "/PFC_WD/GLOBAL/POLL_INTERVAL", "value": "{}".format(value) }] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) diff --git a/tests/generic_config_updater/test_pfcwd_status.py b/tests/generic_config_updater/test_pfcwd_status.py index 76f5828d6b5..c522c800ef4 100644 --- a/tests/generic_config_updater/test_pfcwd_status.py +++ b/tests/generic_config_updater/test_pfcwd_status.py @@ -9,6 +9,7 @@ from tests.common.helpers.dut_utils import verify_orchagent_running_or_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import is_valid_platform_and_version @@ -213,6 +214,7 @@ def test_stop_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, port): exp_str = interface break + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: tmpfile = generate_tmpfile(duthost) output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) @@ -256,6 +258,7 @@ def test_start_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, stop_p exp_str = interface break + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: tmpfile = generate_tmpfile(duthost) output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) diff --git a/tests/generic_config_updater/test_pg_headroom_update.py b/tests/generic_config_updater/test_pg_headroom_update.py index 16a0b2e6f0c..d72ab4b1fbc 100644 --- a/tests/generic_config_updater/test_pg_headroom_update.py +++ b/tests/generic_config_updater/test_pg_headroom_update.py @@ -7,6 +7,7 @@ from tests.common.helpers.dut_utils import verify_orchagent_running_or_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import is_valid_platform_and_version, get_asic_name @@ -103,6 +104,7 @@ def test_pg_headroom_update(duthost, ensure_dut_readiness, operation, skip_when_ "path": "/BUFFER_PROFILE/{}/xoff".format(profile_name), "value": "{}".format(value)}) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) if is_valid_platform_and_version(duthost, "BUFFER_PROFILE", "PG headroom modification", operation): diff --git a/tests/generic_config_updater/test_portchannel_interface.py b/tests/generic_config_updater/test_portchannel_interface.py index f7f8e0b29c4..a81021fc744 100644 --- a/tests/generic_config_updater/test_portchannel_interface.py +++ b/tests/generic_config_updater/test_portchannel_interface.py @@ -6,6 +6,7 @@ from tests.common.helpers.assertions import pytest_require from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import create_path, check_show_ip_intf @@ -99,6 +100,7 @@ def portchannel_interface_tc1_add_duplicate(duthost, portchannel_table): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -143,6 +145,7 @@ def portchannel_interface_tc1_xfail(duthost): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -185,6 +188,7 @@ def portchannel_interface_tc1_add_and_rm(duthost, portchannel_table): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -263,6 +267,7 @@ def portchannel_interface_tc2_replace(duthost): } json_patch.append(patch) + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -287,6 +292,7 @@ def portchannel_interface_tc2_incremental(duthost): "value": "Description for PortChannel101" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_syslog.py b/tests/generic_config_updater/test_syslog.py index bcf7d54b2f2..565a1404ef7 100644 --- a/tests/generic_config_updater/test_syslog.py +++ b/tests/generic_config_updater/test_syslog.py @@ -4,6 +4,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_res_success, expect_op_failure, expect_op_success from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload pytestmark = [ @@ -125,6 +126,7 @@ def syslog_server_tc1_add_init(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -162,6 +164,7 @@ def syslog_server_tc1_add_duplicate(duthost): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -205,6 +208,7 @@ def syslog_server_tc1_xfail(duthost): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -245,6 +249,7 @@ def syslog_server_tc1_replace(duthost): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -276,6 +281,7 @@ def syslog_server_tc1_remove(duthost): "path": "/SYSLOG_SERVER" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) diff --git a/tests/generic_config_updater/test_vlan_interface.py b/tests/generic_config_updater/test_vlan_interface.py index b0f697534b4..1b4372c308f 100644 --- a/tests/generic_config_updater/test_vlan_interface.py +++ b/tests/generic_config_updater/test_vlan_interface.py @@ -7,6 +7,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import format_json_patch_for_multiasic from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload from tests.common.gu_utils import create_path, check_show_ip_intf @@ -148,6 +149,7 @@ def vlan_interface_tc1_add_duplicate(duthost, vlan_info): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) logger.info("json patch {}".format(json_patch)) @@ -233,6 +235,7 @@ def vlan_interface_tc1_xfail(duthost, vlan_info): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -304,6 +307,7 @@ def vlan_interface_tc1_add_new(duthost): } } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -362,6 +366,7 @@ def vlan_interface_tc1_replace(duthost, vlan_info): "value": {} } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -387,6 +392,7 @@ def vlan_interface_tc1_remove(duthost, vlan_info): "path": "/VLAN_INTERFACE" } ] + json_patch = format_json_patch_for_multiasic(duthost=duthost, json_data=json_patch) tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -430,6 +436,7 @@ def test_vlan_interface_tc2_incremental_change(rand_selected_dut): "value": "incremental test for Vlan{}".format(EXIST_VLAN_ID) } ] + json_patch = format_json_patch_for_multiasic(duthost=rand_selected_dut, json_data=json_patch) tmpfile = generate_tmpfile(rand_selected_dut) logger.info("tmpfile {}".format(tmpfile)) From e11e6dc057d868a51713cef7fee66fea3c470a8c Mon Sep 17 00:00:00 2001 From: "Austin (Thang Pham)" Date: Fri, 15 Nov 2024 10:35:28 +1100 Subject: [PATCH 098/175] fix flaky tests/autorestart/test_container_autorestart.py (#15526) Description of PR Summary: Fixes # (issue) Fixes 30114172 Approach What is the motivation for this PR? Increases the threshold timeout for container check for T2 since the BGP neighbor originally was setup to be 360 for T0. However the amount of BGP neighbor is much more comparing to T0. Upons investigation, this test case were flaky because our bgp were still in connecting status. Signed-off-by: Austin Pham --- tests/autorestart/test_container_autorestart.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/autorestart/test_container_autorestart.py b/tests/autorestart/test_container_autorestart.py index 591b2e8a75b..d7a983e35da 100644 --- a/tests/autorestart/test_container_autorestart.py +++ b/tests/autorestart/test_container_autorestart.py @@ -27,6 +27,7 @@ DHCP_SERVER = "dhcp_server" POST_CHECK_INTERVAL_SECS = 1 POST_CHECK_THRESHOLD_SECS = 360 +POST_CHECK_THRESHOLD_SECS_T2 = 600 PROGRAM_STATUS = "RUNNING" @@ -459,13 +460,16 @@ def postcheck_critical_processes_status(duthost, feature_autorestart_states, up_ if is_hiting_start_limit(duthost, feature_name): clear_failed_flag_and_restart(duthost, feature_name, feature_name) + post_check_threshold = POST_CHECK_THRESHOLD_SECS_T2 if duthost.get_facts().get("modular_chassis") \ + else POST_CHECK_THRESHOLD_SECS + critical_proceses = wait_until( - POST_CHECK_THRESHOLD_SECS, POST_CHECK_INTERVAL_SECS, 0, + post_check_threshold, POST_CHECK_INTERVAL_SECS, 0, check_all_critical_processes_status, duthost ) bgp_check = wait_until( - POST_CHECK_THRESHOLD_SECS, POST_CHECK_INTERVAL_SECS, 0, + post_check_threshold, POST_CHECK_INTERVAL_SECS, 0, duthost.check_bgp_session_state_all_asics, up_bgp_neighbors, "established" ) From 5d985b9f4e4108d98cee7986394c2e78ad4511a2 Mon Sep 17 00:00:00 2001 From: vkjammala-arista <152394203+vkjammala-arista@users.noreply.github.com> Date: Fri, 15 Nov 2024 06:37:57 +0530 Subject: [PATCH 099/175] [dualtor] Fix snmp/* tests failure on fixture teardown (#15529) Approach What is the motivation for this PR? #15359 has introduced a "yield" statement inside the for loop of duthosts which is causing fixture teardown to fail with Failed: fixture function has more than one 'yield' message. How did you do it? Move "yield" statement out of this for loop of duthosts and do config rollback in a seperate for loop of duthosts. How did you verify/test it? Ran tests under snmp folder and tests are passing on Arista-7260CX3-D108C8 platform. Any platform specific information? --- tests/snmp/conftest.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/snmp/conftest.py b/tests/snmp/conftest.py index 15d47cebd3c..88db6acec52 100644 --- a/tests/snmp/conftest.py +++ b/tests/snmp/conftest.py @@ -68,14 +68,15 @@ def setup_check_snmp_ready(duthosts, localhost): if 'LOCATION' not in snmp_location_redis_vals: duthost.shell(f'sudo config snmp location add {yaml_snmp_location}') # set snmp cli - yield + yield + for duthost in duthosts: # rollback configuration rollback(duthost, SETUP_ENV_CP) - # remove snmp files downloaded - local_command = "find ./snmp/ -type f -name 'snmp.yml' -exec rm -f {} +" - localhost.shell(local_command) + # remove snmp files downloaded + local_command = "find ./snmp/ -type f -name 'snmp.yml' -exec rm -f {} +" + localhost.shell(local_command) def extract_redis_keys(item): From e5df7c9d2a506b77cd0786ecc795fd79982d5bf6 Mon Sep 17 00:00:00 2001 From: AkeelAli <701916+AkeelAli@users.noreply.github.com> Date: Thu, 14 Nov 2024 20:20:49 -0500 Subject: [PATCH 100/175] Disable proxy for POST requests to PTF (#15067) What is the motivation for this PR? Tests that make HTTP POST requests to PTFIP:exabgpPort for bgp updates were failing when proxy variables were set in the environment (gateway timeout 504). Workaround had been to unset these variables before starting the tests. This PR fixes the test scripts such that they don't use the env proxy when making such HTTP requests. How did you do it? Explicitly set the proxies to None when making post requests to ignore the corresponding environment variables. Precedent for this change exists: https://github.com/sonic-net/sonic-mgmt/blob/master/ansible/library/announce_routes.py#L163 How did you verify/test it? Following tests passed with the change despite the presence of proxy variables in the sonic-mgmt container environment (tested on DUT Cisco 8101): test_bgp_update_timer.py test_bgp_sentinel.py test_bgp_bbr.py test_bgp_speaker.py test_route_flap.py test_bgp_dual_asn.py --- tests/bgp/bgp_helpers.py | 2 +- tests/bgp/test_bgp_bbr.py | 2 +- tests/bgp/test_bgp_sentinel.py | 2 +- tests/bgp/test_bgp_speaker.py | 2 +- tests/bgp/test_bgp_suppress_fib.py | 2 +- tests/common/helpers/bgp.py | 6 +++--- tests/route/test_route_bgp_ecmp.py | 2 +- tests/route/test_route_flap.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/bgp/bgp_helpers.py b/tests/bgp/bgp_helpers.py index 2eb7e391f61..9ff615e4666 100644 --- a/tests/bgp/bgp_helpers.py +++ b/tests/bgp/bgp_helpers.py @@ -291,7 +291,7 @@ def update_routes(action, ptfip, port, route): url = 'http://%s:%d' % (ptfip, port) data = {'commands': msg} logging.info('Post url={}, data={}'.format(url, data)) - r = requests.post(url, data=data) + r = requests.post(url, data=data, proxies={"http": None, "https": None}) assert r.status_code == 200 diff --git a/tests/bgp/test_bgp_bbr.py b/tests/bgp/test_bgp_bbr.py index a71beb2dbe1..7fbbf91b5bb 100644 --- a/tests/bgp/test_bgp_bbr.py +++ b/tests/bgp/test_bgp_bbr.py @@ -274,7 +274,7 @@ def update_routes(action, ptfip, port, route): return url = 'http://%s:%d' % (ptfip, port) data = {'commands': msg} - r = requests.post(url, data=data) + r = requests.post(url, data=data, proxies={"http": None, "https": None}) assert r.status_code == 200 diff --git a/tests/bgp/test_bgp_sentinel.py b/tests/bgp/test_bgp_sentinel.py index 315bb3fb762..47d5500ca32 100644 --- a/tests/bgp/test_bgp_sentinel.py +++ b/tests/bgp/test_bgp_sentinel.py @@ -321,7 +321,7 @@ def change_route(operation, ptfip, neighbor, route, nexthop, port, community): url = "http://%s:%d" % (ptfip, port) data = {"command": "neighbor %s %s route %s next-hop %s local-preference 10000 community [%s]" % (neighbor, operation, route, nexthop, community)} - r = requests.post(url, data=data) + r = requests.post(url, data=data, proxies={"http": None, "https": None}) assert r.status_code == 200 diff --git a/tests/bgp/test_bgp_speaker.py b/tests/bgp/test_bgp_speaker.py index 28c6e26b1db..bd556a23c06 100644 --- a/tests/bgp/test_bgp_speaker.py +++ b/tests/bgp/test_bgp_speaker.py @@ -58,7 +58,7 @@ def withdraw_route(ptfip, neighbor, route, nexthop, port): def change_route(operation, ptfip, neighbor, route, nexthop, port): url = "http://%s:%d" % (ptfip, port) data = {"command": "neighbor %s %s route %s next-hop %s" % (neighbor, operation, route, nexthop)} - r = requests.post(url, data=data) + r = requests.post(url, data=data, proxies={"http": None, "https": None}) assert r.status_code == 200 diff --git a/tests/bgp/test_bgp_suppress_fib.py b/tests/bgp/test_bgp_suppress_fib.py index ea91f4ea461..4273e62517a 100644 --- a/tests/bgp/test_bgp_suppress_fib.py +++ b/tests/bgp/test_bgp_suppress_fib.py @@ -341,7 +341,7 @@ def install_route_from_exabgp(operation, ptfip, route_list, port): data = {"command": command} logger.info("url: {}".format(url)) logger.info("command: {}".format(data)) - r = requests.post(url, data=data, timeout=90) + r = requests.post(url, data=data, timeout=90, proxies={"http": None, "https": None}) assert r.status_code == 200 diff --git a/tests/common/helpers/bgp.py b/tests/common/helpers/bgp.py index 932fc9a11e9..4f14b1d3411 100644 --- a/tests/common/helpers/bgp.py +++ b/tests/common/helpers/bgp.py @@ -143,7 +143,7 @@ def teardown_session(self): msg = msg.format(self.peer_ip) logging.debug("teardown session: %s", msg) url = "http://%s:%d" % (self.ptfip, self.port) - resp = requests.post(url, data={"commands": msg}) + resp = requests.post(url, data={"commands": msg}, proxies={"http": None, "https": None}) logging.debug("teardown session return: %s" % resp) assert resp.status_code == 200 @@ -162,7 +162,7 @@ def announce_route(self, route): msg = msg.format(**route) logging.debug("announce route: %s", msg) url = "http://%s:%d" % (self.ptfip, self.port) - resp = requests.post(url, data={"commands": msg}) + resp = requests.post(url, data={"commands": msg}, proxies={"http": None, "https": None}) logging.debug("announce return: %s", resp) assert resp.status_code == 200 @@ -174,6 +174,6 @@ def withdraw_route(self, route): msg = msg.format(**route) logging.debug("withdraw route: %s", msg) url = "http://%s:%d" % (self.ptfip, self.port) - resp = requests.post(url, data={"commands": msg}) + resp = requests.post(url, data={"commands": msg}, proxies={"http": None, "https": None}) logging.debug("withdraw return: %s", resp) assert resp.status_code == 200 diff --git a/tests/route/test_route_bgp_ecmp.py b/tests/route/test_route_bgp_ecmp.py index 4b18b61aefe..aaa23a26a98 100644 --- a/tests/route/test_route_bgp_ecmp.py +++ b/tests/route/test_route_bgp_ecmp.py @@ -34,7 +34,7 @@ def change_route(operation, ptfip, route, nexthop, port, aspath): url = "http://%s:%d" % (ptfip, port) data = { "command": "%s route %s next-hop %s as-path [ %s ]" % (operation, route, nexthop, aspath)} - r = requests.post(url, data=data, timeout=30) + r = requests.post(url, data=data, timeout=30, proxies={"http": None, "https": None}) if r.status_code != 200: raise Exception( "Change routes failed: url={}, data={}, r.status_code={}, r.reason={}, r.headers={}, r.text={}".format( diff --git a/tests/route/test_route_flap.py b/tests/route/test_route_flap.py index 14b61b9f57f..b809ecf845c 100644 --- a/tests/route/test_route_flap.py +++ b/tests/route/test_route_flap.py @@ -77,7 +77,7 @@ def change_route(operation, ptfip, route, nexthop, port, aspath): url = "http://%s:%d" % (ptfip, port) data = { "command": "%s route %s next-hop %s as-path [ %s ]" % (operation, route, nexthop, aspath)} - r = requests.post(url, data=data) + r = requests.post(url, data=data, proxies={"http": None, "https": None}) assert r.status_code == 200 From ba00958b6df73718ffb4919a0d1f0b4f96544fdc Mon Sep 17 00:00:00 2001 From: Anant <127479400+AnantKishorSharma@users.noreply.github.com> Date: Fri, 15 Nov 2024 06:52:05 +0530 Subject: [PATCH 101/175] Skipping test_static_route on 8122 (#15272) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What is the motivation for this PR? All 3 tests in test_static_route are failing on 8122. Tests are failing because “show flowcnt-route stats“ does not show the 1 test pkt that the test has sent. “show flowcnt-route stats“ does not show the test pkt because counter config itself failed. Counter config failed because FLOW_COUNTER_CAPABILITY was enabled recently on ASIC/SDK side for 8122 but 'enable_forwarding_route_counter' is not enabled on SONiC/asic_cfg.json on 8122. 'enable_forwarding_route_counter' is not enabled on SONiC/asic_cfg.json on 8122 because of scale limits (cannot scale more than 50k with the current LPM profile). As the feature is not enabled for this platform, need to skip this testcase How did you do it? Added a skip condition for test_static_route for 8122 platform Type of change
 -Test modification Back port request
 -202311 -202405 How did you verify/test it? Ran test_static_router.py on 8122 and verified it was skipped. --- .../common/plugins/conditional_mark/tests_mark_conditions.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 14f6b68bc0c..72121f410e1 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1652,11 +1652,12 @@ route/test_route_perf.py: route/test_static_route.py: skip: - reason: "Test not supported for 201911 images or older. Does not apply to standalone topos." + reason: "Test not supported for 201911 images or older. Does not apply to standalone topos. Not supported on cisco-8122 platform" conditions_logical_operator: OR conditions: - "release in ['201811', '201911']" - "'standalone' in topo_name" + - "platform in ['x86_64-8122_64eh_o-r0', 'x86_64-8122_64ehf_o-r0']" route/test_static_route.py::test_static_route_ecmp_ipv6: # This test case may fail due to a known issue https://github.com/sonic-net/sonic-buildimage/issues/4930. From 1e86c382262213b8285a749ef2a417a342e24b02 Mon Sep 17 00:00:00 2001 From: Zhaohui Sun <94606222+ZhaohuiS@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:24:27 +0800 Subject: [PATCH 102/175] Skip srv6/test_srv6_basic_sanity.py for non cisco vs topologies (#15564) What is the motivation for this PR? PR introduced new srv6 script, it failed on other topologies, skip it for non cisco vs topologies. #13785 How did you do it? skip it for non cisco vs topologies. How did you verify/test it? Run srv6/test_srv6_basic_sanity.py on t0 testbed. Signed-off-by: Zhaohui Sun --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 72121f410e1..315189c4ed1 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1775,6 +1775,15 @@ span/test_port_mirroring.py: conditions: - "https://github.com/sonic-net/sonic-mgmt/issues/9647 and 'dualtor' in topo_name and asic_type in ['mellanox']" +####################################### +##### srv6 ##### +####################################### +srv6/test_srv6_basic_sanity.py: + skip: + reason: "It's a new test case, skip it for other topologies except cisco vs nodes." + conditions: + - topo_name not in ["ciscovs-7nodes", "ciscovs-5nodes"] + ####################################### ##### ssh ##### ####################################### From 70ef7edc5ab8e57c9f294440f597769dea4fd1b5 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Fri, 15 Nov 2024 10:20:59 +0800 Subject: [PATCH 103/175] [dualtor-io] Support using fix source IP for upstream packets (#15554) What is the motivation for this PR? Let's make the packets for each server belongs to same TCP flow, SONiC will use the same route to forward it. So any route change will forward/drop all the packets from the same server. How did you do it? Use fixed IP to generate packets for a single server. Signed-off-by: Longxiang --- tests/common/dualtor/data_plane_utils.py | 9 +++++---- tests/common/dualtor/dual_tor_io.py | 15 ++++++++++++--- tests/dualtor_io/test_tor_bgp_failure.py | 2 +- 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/tests/common/dualtor/data_plane_utils.py b/tests/common/dualtor/data_plane_utils.py index febaa97d841..1f7b00371b4 100644 --- a/tests/common/dualtor/data_plane_utils.py +++ b/tests/common/dualtor/data_plane_utils.py @@ -161,14 +161,15 @@ def verify_and_report(tor_IO, verify, delay, allowed_disruption, def run_test( duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, tor_vlan_port, send_interval, traffic_direction, - stop_after, cable_type=CableType.active_standby # noqa F811 + stop_after, cable_type=CableType.active_standby, random_dst=None # noqa F811 ): io_ready = threading.Event() peerhost = get_peerhost(duthosts, activehost) tor_IO = DualTorIO( activehost, peerhost, ptfhost, ptfadapter, vmhost, tbinfo, - io_ready, tor_vlan_port=tor_vlan_port, send_interval=send_interval, cable_type=cable_type + io_ready, tor_vlan_port=tor_vlan_port, send_interval=send_interval, cable_type=cable_type, + random_dst=random_dst ) tor_IO.generate_traffic(traffic_direction) @@ -330,7 +331,7 @@ def send_server_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo, def server_to_t1_io_test(activehost, tor_vlan_port=None, delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, - stop_after=None): + stop_after=None, random_dst=None): """ Helper method for `send_server_to_t1_with_action`. Starts sender and sniffer before performing the action on the tor host. @@ -357,7 +358,7 @@ def server_to_t1_io_test(activehost, tor_vlan_port=None, tor_IO = run_test(duthosts, activehost, ptfhost, ptfadapter, vmhost, action, tbinfo, tor_vlan_port, send_interval, traffic_direction="server_to_t1", stop_after=stop_after, - cable_type=cable_type) + cable_type=cable_type, random_dst=random_dst) # If a delay is allowed but no numebr of allowed disruptions # is specified, default to 1 allowed disruption diff --git a/tests/common/dualtor/dual_tor_io.py b/tests/common/dualtor/dual_tor_io.py index 5df1e7b99ea..978c53aa8a8 100644 --- a/tests/common/dualtor/dual_tor_io.py +++ b/tests/common/dualtor/dual_tor_io.py @@ -38,7 +38,8 @@ class DualTorIO: """Class to conduct IO over ports in `active-standby` mode.""" def __init__(self, activehost, standbyhost, ptfhost, ptfadapter, vmhost, tbinfo, - io_ready, tor_vlan_port=None, send_interval=0.01, cable_type=CableType.active_standby): + io_ready, tor_vlan_port=None, send_interval=0.01, cable_type=CableType.active_standby, + random_dst=None): self.tor_pc_intf = None self.tor_vlan_intf = tor_vlan_port self.duthost = activehost @@ -54,6 +55,12 @@ def __init__(self, activehost, standbyhost, ptfhost, ptfadapter, vmhost, tbinfo, self.cable_type = cable_type + if random_dst is None: + # if random_dst is not set, default to true for active standby dualtor. + self.random_dst = (self.cable_type == CableType.active_standby) + else: + self.random_dst = random_dst + self.dataplane = self.ptfadapter.dataplane self.dataplane.flush() self.test_results = dict() @@ -390,8 +397,10 @@ def generate_upstream_traffic(self, src='server'): packet = tcp_tx_packet_orig.copy() packet[scapyall.Ether].src = eth_src packet[scapyall.IP].src = server_ip - packet[scapyall.IP].dst = dst_ips[vlan_intf] \ - if self.cable_type == CableType.active_active else self.random_host_ip() + if self.random_dst: + packet[scapyall.IP].dst = self.random_host_ip() + else: + packet[scapyall.IP].dst = dst_ips[vlan_intf] packet.load = payload packet[scapyall.TCP].chksum = None packet[scapyall.IP].chksum = None diff --git a/tests/dualtor_io/test_tor_bgp_failure.py b/tests/dualtor_io/test_tor_bgp_failure.py index c6643a08134..2ef58a13d52 100644 --- a/tests/dualtor_io/test_tor_bgp_failure.py +++ b/tests/dualtor_io/test_tor_bgp_failure.py @@ -182,7 +182,7 @@ def test_active_tor_shutdown_bgp_sessions_upstream( if cable_type == CableType.active_standby: send_server_to_t1_with_action( upper_tor_host, verify=True, delay=MUX_SIM_ALLOWED_DISRUPTION_SEC, - action=lambda: shutdown_bgp_sessions(upper_tor_host) + action=lambda: shutdown_bgp_sessions(upper_tor_host), random_dst=False ) if cable_type == CableType.active_active: From a7c567d8bb99b64ced3f893e5d509c138ef255d9 Mon Sep 17 00:00:00 2001 From: Chenyang Wang <49756587+cyw233@users.noreply.github.com> Date: Fri, 15 Nov 2024 14:24:33 +1100 Subject: [PATCH 104/175] refactor: optimize BFD traffic test (#15550) Description of PR Optimize the BFD traffic test to reduce the running time. Summary: Fixes # (issue) Microsoft ADO 30056122 Approach What is the motivation for this PR? There are unnecessary setup steps in the BFD traffic test, which can be removed to reduce the running time. The running time will be decreased by at least 35 min after this change. How did you do it? How did you verify/test it? I ran the updated code and can confirm it's working well. co-authorized by: jianquanye@microsoft.com --- tests/bfd/bfd_base.py | 93 +--------------------------- tests/bfd/conftest.py | 3 - tests/bfd/test_bfd_traffic.py | 113 ++++++++++++++++++++++++++++------ 3 files changed, 95 insertions(+), 114 deletions(-) diff --git a/tests/bfd/bfd_base.py b/tests/bfd/bfd_base.py index e801cbfa870..b0f78a1868b 100644 --- a/tests/bfd/bfd_base.py +++ b/tests/bfd/bfd_base.py @@ -4,8 +4,7 @@ import pytest from tests.bfd.bfd_helpers import prepare_bfd_state, selecting_route_to_delete, \ - extract_ip_addresses_for_backend_portchannels, get_dut_asic_static_routes, extract_backend_portchannels, \ - get_src_dst_asic_next_hops + extract_ip_addresses_for_backend_portchannels, get_dut_asic_static_routes from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor logger = logging.getLogger(__name__) @@ -170,93 +169,3 @@ def select_src_dst_dut_with_asic(self, request, get_src_dst_asic_and_duts): "dst_prefix": dst_prefix, "version": version, } - - @pytest.fixture(scope="class") - def select_dut_and_src_dst_asic_index(self, duthosts): - if not duthosts.frontend_nodes: - pytest.skip("DUT does not have any frontend nodes") - - dut_index = random.choice(list(range(len(duthosts.frontend_nodes)))) - asic_namespace_list = duthosts.frontend_nodes[dut_index].get_asic_namespace_list() - if len(asic_namespace_list) < 2: - pytest.skip("DUT does not have more than one ASICs") - - # Random selection of src asic & dst asic on DUT - src_asic_namespace, dst_asic_namespace = random.sample(asic_namespace_list, 2) - src_asic_index = src_asic_namespace.split("asic")[1] - dst_asic_index = dst_asic_namespace.split("asic")[1] - - yield { - "dut_index": dut_index, - "src_asic_index": int(src_asic_index), - "dst_asic_index": int(dst_asic_index), - } - - @pytest.fixture(scope="class") - def get_src_dst_asic(self, request, duthosts, select_dut_and_src_dst_asic_index): - logger.info("Printing select_dut_and_src_dst_asic_index") - logger.info(select_dut_and_src_dst_asic_index) - - logger.info("Printing duthosts.frontend_nodes") - logger.info(duthosts.frontend_nodes) - dut = duthosts.frontend_nodes[select_dut_and_src_dst_asic_index["dut_index"]] - - logger.info("Printing dut asics") - logger.info(dut.asics) - - src_asic = dut.asics[select_dut_and_src_dst_asic_index["src_asic_index"]] - dst_asic = dut.asics[select_dut_and_src_dst_asic_index["dst_asic_index"]] - - request.config.src_asic = src_asic - request.config.dst_asic = dst_asic - request.config.dut = dut - - rtn_dict = { - "src_asic": src_asic, - "dst_asic": dst_asic, - "dut": dut, - } - - rtn_dict.update(select_dut_and_src_dst_asic_index) - yield rtn_dict - - @pytest.fixture(scope="class", params=["ipv4", "ipv6"]) - def prepare_traffic_test_variables(self, get_src_dst_asic, request): - version = request.param - logger.info("Version: %s", version) - - dut = get_src_dst_asic["dut"] - src_asic = get_src_dst_asic["src_asic"] - src_asic_index = get_src_dst_asic["src_asic_index"] - dst_asic = get_src_dst_asic["dst_asic"] - dst_asic_index = get_src_dst_asic["dst_asic_index"] - logger.info( - "DUT: {}, src_asic_index: {}, dst_asic_index: {}".format(dut.hostname, src_asic_index, dst_asic_index) - ) - - backend_port_channels = extract_backend_portchannels(dut) - src_asic_next_hops, dst_asic_next_hops, src_prefix, dst_prefix = get_src_dst_asic_next_hops( - version, - dut, - src_asic, - dst_asic, - request, - backend_port_channels, - ) - - src_asic_router_mac = src_asic.get_router_mac() - - yield { - "dut": dut, - "src_asic": src_asic, - "src_asic_index": src_asic_index, - "dst_asic": dst_asic, - "dst_asic_index": dst_asic_index, - "src_asic_next_hops": src_asic_next_hops, - "dst_asic_next_hops": dst_asic_next_hops, - "src_prefix": src_prefix, - "dst_prefix": dst_prefix, - "src_asic_router_mac": src_asic_router_mac, - "backend_port_channels": backend_port_channels, - "version": version, - } diff --git a/tests/bfd/conftest.py b/tests/bfd/conftest.py index 7892b067991..f69f7170d31 100644 --- a/tests/bfd/conftest.py +++ b/tests/bfd/conftest.py @@ -64,9 +64,6 @@ def bfd_cleanup_db(request, duthosts, enum_supervisor_dut_hostname): if hasattr(request.config, "src_dut") and hasattr(request.config, "dst_dut"): clear_bfd_configs(request.config.src_dut, request.config.src_asic.asic_index, request.config.src_prefix) clear_bfd_configs(request.config.dst_dut, request.config.dst_asic.asic_index, request.config.dst_prefix) - elif hasattr(request.config, "dut"): - clear_bfd_configs(request.config.dut, request.config.src_asic.asic_index, request.config.src_prefix) - clear_bfd_configs(request.config.dut, request.config.dst_asic.asic_index, request.config.dst_prefix) logger.info("Bringing up portchannels or respective members") portchannels_on_dut = None diff --git a/tests/bfd/test_bfd_traffic.py b/tests/bfd/test_bfd_traffic.py index fd3aa77d614..67833573c79 100644 --- a/tests/bfd/test_bfd_traffic.py +++ b/tests/bfd/test_bfd_traffic.py @@ -1,11 +1,12 @@ import logging +import random import pytest -from tests.bfd.bfd_base import BfdBase from tests.bfd.bfd_helpers import get_ptf_src_port, get_backend_interface_in_use_by_counter, \ get_random_bgp_neighbor_ip_of_asic, toggle_port_channel_or_member, get_port_channel_by_member, \ - wait_until_given_bfd_down, assert_traffic_switching, create_and_verify_bfd_state, verify_bfd_only + wait_until_given_bfd_down, assert_traffic_switching, verify_bfd_only, extract_backend_portchannels, \ + get_src_dst_asic_next_hops from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor pytestmark = [ @@ -16,9 +17,99 @@ logger = logging.getLogger(__name__) -class TestBfdTraffic(BfdBase): +class TestBfdTraffic: PACKET_COUNT = 10000 + @pytest.fixture(scope="class") + def select_dut_and_src_dst_asic_index(self, duthosts): + if not duthosts.frontend_nodes: + pytest.skip("DUT does not have any frontend nodes") + + dut_index = random.choice(list(range(len(duthosts.frontend_nodes)))) + asic_namespace_list = duthosts.frontend_nodes[dut_index].get_asic_namespace_list() + if len(asic_namespace_list) < 2: + pytest.skip("DUT does not have more than one ASICs") + + # Random selection of src asic & dst asic on DUT + src_asic_namespace, dst_asic_namespace = random.sample(asic_namespace_list, 2) + src_asic_index = src_asic_namespace.split("asic")[1] + dst_asic_index = dst_asic_namespace.split("asic")[1] + + yield { + "dut_index": dut_index, + "src_asic_index": int(src_asic_index), + "dst_asic_index": int(dst_asic_index), + } + + @pytest.fixture(scope="class") + def get_src_dst_asic(self, request, duthosts, select_dut_and_src_dst_asic_index): + logger.info("Printing select_dut_and_src_dst_asic_index") + logger.info(select_dut_and_src_dst_asic_index) + + logger.info("Printing duthosts.frontend_nodes") + logger.info(duthosts.frontend_nodes) + dut = duthosts.frontend_nodes[select_dut_and_src_dst_asic_index["dut_index"]] + + logger.info("Printing dut asics") + logger.info(dut.asics) + + src_asic = dut.asics[select_dut_and_src_dst_asic_index["src_asic_index"]] + dst_asic = dut.asics[select_dut_and_src_dst_asic_index["dst_asic_index"]] + + request.config.src_asic = src_asic + request.config.dst_asic = dst_asic + request.config.dut = dut + + rtn_dict = { + "src_asic": src_asic, + "dst_asic": dst_asic, + "dut": dut, + } + + rtn_dict.update(select_dut_and_src_dst_asic_index) + yield rtn_dict + + @pytest.fixture(scope="class", params=["ipv4", "ipv6"]) + def prepare_traffic_test_variables(self, get_src_dst_asic, request): + version = request.param + logger.info("Version: %s", version) + + dut = get_src_dst_asic["dut"] + src_asic = get_src_dst_asic["src_asic"] + src_asic_index = get_src_dst_asic["src_asic_index"] + dst_asic = get_src_dst_asic["dst_asic"] + dst_asic_index = get_src_dst_asic["dst_asic_index"] + logger.info( + "DUT: {}, src_asic_index: {}, dst_asic_index: {}".format(dut.hostname, src_asic_index, dst_asic_index) + ) + + backend_port_channels = extract_backend_portchannels(dut) + src_asic_next_hops, dst_asic_next_hops, src_prefix, dst_prefix = get_src_dst_asic_next_hops( + version, + dut, + src_asic, + dst_asic, + request, + backend_port_channels, + ) + + src_asic_router_mac = src_asic.get_router_mac() + + yield { + "dut": dut, + "src_asic": src_asic, + "src_asic_index": src_asic_index, + "dst_asic": dst_asic, + "dst_asic_index": dst_asic_index, + "src_asic_next_hops": src_asic_next_hops, + "dst_asic_next_hops": dst_asic_next_hops, + "src_prefix": src_prefix, + "dst_prefix": dst_prefix, + "src_asic_router_mac": src_asic_router_mac, + "backend_port_channels": backend_port_channels, + "version": version, + } + def test_bfd_traffic_remote_port_channel_shutdown( self, request, @@ -44,10 +135,6 @@ def test_bfd_traffic_remote_port_channel_shutdown( ("dst", dst_asic, dst_prefix, dst_asic_next_hops), ] - with SafeThreadPoolExecutor(max_workers=8) as executor: - for _, asic, prefix, next_hops in src_dst_context: - executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) - dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: pytest.skip("No BGP neighbor found on asic{} of dut {}".format(dst_asic_index, dut.hostname)) @@ -155,10 +242,6 @@ def test_bfd_traffic_local_port_channel_shutdown( ("dst", dst_asic, dst_prefix, dst_asic_next_hops), ] - with SafeThreadPoolExecutor(max_workers=8) as executor: - for _, asic, prefix, next_hops in src_dst_context: - executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) - dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: pytest.skip("No BGP neighbor found on asic{} of dut {}".format(dst_asic_index, dut.hostname)) @@ -266,10 +349,6 @@ def test_bfd_traffic_remote_port_channel_member_shutdown( ("dst", dst_asic, dst_prefix, dst_asic_next_hops), ] - with SafeThreadPoolExecutor(max_workers=8) as executor: - for _, asic, prefix, next_hops in src_dst_context: - executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) - dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: pytest.skip("No BGP neighbor found on asic{} of dut {}".format(dst_asic_index, dut.hostname)) @@ -377,10 +456,6 @@ def test_bfd_traffic_local_port_channel_member_shutdown( ("dst", dst_asic, dst_prefix, dst_asic_next_hops), ] - with SafeThreadPoolExecutor(max_workers=8) as executor: - for _, asic, prefix, next_hops in src_dst_context: - executor.submit(create_and_verify_bfd_state, asic, prefix, dut, next_hops) - dst_neighbor_ip = get_random_bgp_neighbor_ip_of_asic(dut, dst_asic_index, version) if not dst_neighbor_ip: pytest.skip("No BGP neighbor found on asic{} of dut {}".format(dst_asic_index, dut.hostname)) From 1ca2a25558c344af0a0c84689a575940477623af Mon Sep 17 00:00:00 2001 From: Justin Wong <51811017+justin-wong-ce@users.noreply.github.com> Date: Thu, 14 Nov 2024 21:43:50 -0800 Subject: [PATCH 105/175] Add wait between mock dualtor setup commands (#15399) On topologies with higher number of interfaces (i.e. >100), config commands take more time to run properly. Executing the subsequent commands too quickly may cause the config to not change properly, causing problems in the mock dualtor setup. Adding a wait_until and delays to give more time for config changes. --- tests/common/dualtor/dual_tor_mock.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/common/dualtor/dual_tor_mock.py b/tests/common/dualtor/dual_tor_mock.py index a5d1bb12181..b883d28e6c4 100644 --- a/tests/common/dualtor/dual_tor_mock.py +++ b/tests/common/dualtor/dual_tor_mock.py @@ -11,6 +11,7 @@ from tests.common.dualtor.dual_tor_utils import tor_mux_intfs # noqa F401 from tests.common.helpers.assertions import pytest_assert from tests.common.platform.processes_utils import wait_critical_processes +from tests.common.utilities import wait_until __all__ = [ 'apply_active_state_to_orchagent', @@ -67,6 +68,9 @@ def set_dual_tor_state_to_orchagent(dut, state, tor_mux_intfs): # noqa F """ Helper function for setting active/standby state to orchagent """ + def check_config_applied(num_tor_mux_intfs): + out = dut.shell('redis-cli -n 0 keys "MUX_CABLE_TABLE:*" | wc -l') + return out['stdout_lines'][0] == str(num_tor_mux_intfs) logger.info("Applying {} state to orchagent".format(state)) intf_configs = [] @@ -97,6 +101,7 @@ def set_dual_tor_state_to_orchagent(dut, state, tor_mux_intfs): # noqa F logger.debug('SWSS config string is {}'.format(swss_config_str)) swss_filename = '/mux{}.json'.format(state) _apply_config_to_swss(dut, swss_config_str, swss_filename) + wait_until(120, 5, 5, check_config_applied, len(tor_mux_intfs)) def del_dual_tor_state_from_orchagent(dut, state, tor_mux_intfs): # noqa F811 @@ -295,6 +300,7 @@ def apply_dual_tor_neigh_entries(cleanup_mocked_configs, rand_selected_dut, tbin for ipv6, mac in list(mock_server_ipv6_mac_map.items()): cmds.append('ip -6 neigh replace {} lladdr {} dev {}'.format(ipv6, mac, vlan)) dut.shell_cmds(cmds=cmds) + time.sleep(5) return @@ -323,6 +329,7 @@ def apply_dual_tor_peer_switch_route(cleanup_mocked_configs, rand_selected_dut, # Use `ip route replace` in case a rule already exists for this IP # If there are no pre-existing routes, equivalent to `ip route add` dut.shell('ip route replace {} {}'.format(mock_peer_switch_loopback_ip, nexthop_str)) + time.sleep(5) return @@ -333,6 +340,12 @@ def apply_peer_switch_table_to_dut(cleanup_mocked_configs, rand_selected_dut, mo Adds the PEER_SWITCH table to config DB and the peer_switch field to the device metadata Also adds the 'subtype' field in the device metadata table and sets it to 'DualToR' ''' + def check_config_applied(): + out = dut.shell('redis-cli -n 4 HGETALL "DEVICE_METADATA|localhost"')['stdout_lines'][-1] + device_metadata_done = 'DualToR' in out + out = dut.shell('redis-cli -n 4 HGETALL "PEER_SWITCH|switch_hostname"')['stdout_lines'][0] + peerswitch_done = 'ipv4_address' in out + return device_metadata_done and peerswitch_done logger.info("Applying PEER_SWITCH table") dut = rand_selected_dut peer_switch_hostname = 'switch_hostname' @@ -359,6 +372,7 @@ def apply_peer_switch_table_to_dut(cleanup_mocked_configs, rand_selected_dut, mo logger.info("Restarting swss service") dut.shell('systemctl reset-failed swss; systemctl restart swss') wait_critical_processes(dut) + wait_until(120, 5, 5, check_config_applied) @pytest.fixture(scope='module') @@ -366,6 +380,11 @@ def apply_tunnel_table_to_dut(cleanup_mocked_configs, rand_selected_dut, mock_pe ''' Adds the TUNNEL table to config DB ''' + def check_config_applied(tunnel_params): + out = dut.shell('redis-cli -n 4 HGETALL "TUNNEL|MuxTunnel0" | wc -l')['stdout_lines'][0] + + # *2 because each key value pair is represented with 2 rows in redis-cli + return out == str(len(tunnel_params['TUNNEL']['MuxTunnel0'])*2) logger.info("Applying TUNNEL table") dut = rand_selected_dut @@ -389,6 +408,7 @@ def apply_tunnel_table_to_dut(cleanup_mocked_configs, rand_selected_dut, mock_pe dut.copy(content=json.dumps(tunnel_params, indent=2), dest="/tmp/tunnel_params.json") dut.shell("sonic-cfggen -j /tmp/tunnel_params.json --write-to-db") + wait_until(120, 5, 5, check_config_applied, tunnel_params) return @@ -399,6 +419,9 @@ def apply_mux_cable_table_to_dut(cleanup_mocked_configs, rand_selected_dut, ''' Adds the MUX_CABLE table to config DB ''' + def check_config_applied(num_tor_mux_intfs): + out = dut.shell('redis-cli -n 4 keys "MUX_CABLE|*" | wc -l') + return out['stdout_lines'][0] == str(num_tor_mux_intfs) logger.info("Applying MUX_CABLE table") dut = rand_selected_dut @@ -420,6 +443,7 @@ def apply_mux_cable_table_to_dut(cleanup_mocked_configs, rand_selected_dut, mux_cable_params = {'MUX_CABLE': mux_cable_params} dut.copy(content=json.dumps(mux_cable_params, indent=2), dest="/tmp/mux_cable_params.json") dut.shell("sonic-cfggen -j /tmp/mux_cable_params.json --write-to-db") + wait_until(120, 5, 5, check_config_applied, len(tor_mux_intfs)) return From c9b9d6acec47d77937a67c3441a74f4fb2859dce Mon Sep 17 00:00:00 2001 From: Javier Tan <47554099+Javier-Tan@users.noreply.github.com> Date: Fri, 15 Nov 2024 17:06:55 +1100 Subject: [PATCH 106/175] [tests/common/reboot.py]: Correct REBOOT_TYPE_SUPERVISOR "check" value (#15577) Description of PR Summary: Fixes #15444 Approach What is the motivation for this PR? Bad regex in supervisor reboot check is causing unwarranted test failures How did you do it? Remove bad regex flags in supervisor reboot checks How did you verify/test it? Ran affected tests to ensure behaviour was correct Signed-off-by: Javier Tan javiertan@microsoft.com --- tests/common/reboot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/reboot.py b/tests/common/reboot.py index 904f22c6c08..d6956646177 100644 --- a/tests/common/reboot.py +++ b/tests/common/reboot.py @@ -116,7 +116,7 @@ "timeout": 300, "wait": 120, # When linecards are rebooted due to supervisor cold reboot - "cause": r"^Reboot from Supervisor$|^reboot from Supervisor$", + "cause": r"Reboot from Supervisor|reboot from Supervisor", "test_reboot_cause_only": False }, REBOOT_TYPE_SUPERVISOR_HEARTBEAT_LOSS: { From eb0081706b5166d4fa36255e7eb9504eedb32901 Mon Sep 17 00:00:00 2001 From: arista-nwolfe <94405414+arista-nwolfe@users.noreply.github.com> Date: Fri, 15 Nov 2024 01:10:08 -0500 Subject: [PATCH 107/175] Fix condition intended to skip iBGP neighbors to work on single-asic (#15411) Fixes #13662 added support for running bgp/test_bgp_session_flap.py on T2 topology. However, the condition it added to skip iBGP neighbors only works on multi-asic LCs: if 'asic' not in v['description'].lower(): The better solution is to check the BGP session's peer group which will indicate if it's internal or not regardless of single-asic or multi-asic --- tests/bgp/test_bgp_session_flap.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/bgp/test_bgp_session_flap.py b/tests/bgp/test_bgp_session_flap.py index f41fafc6894..3ed3d564ee9 100644 --- a/tests/bgp/test_bgp_session_flap.py +++ b/tests/bgp/test_bgp_session_flap.py @@ -66,7 +66,8 @@ def setup(tbinfo, nbrhosts, duthosts, enum_frontend_dut_hostname, enum_rand_one_ tor_neighbors = dict() neigh_asn = dict() for k, v in bgp_facts['bgp_neighbors'].items(): - if 'asic' not in v['description'].lower(): + # Skip iBGP neighbors + if "INTERNAL" not in v["peer group"] and "VOQ_CHASSIS" not in v["peer group"]: neigh_keys.append(v['description']) neigh_asn[v['description']] = v['remote AS'] tor_neighbors[v['description']] = nbrhosts[v['description']]["host"] From 3869cad918884365324c80b2b401755c191b5f7f Mon Sep 17 00:00:00 2001 From: ansrajpu-git <113939367+ansrajpu-git@users.noreply.github.com> Date: Fri, 15 Nov 2024 01:36:49 -0500 Subject: [PATCH 108/175] [Chassis][voq] Skip sonic-mgmt HdrmPoolSizeTest for Nokia-IXR7250E hwsku (#13513) Below test cases are skipped for hwsku in Nokia-IXR7250E-36x400G & platform asic in ['x86_64-nokia_ixr7250e_36x400g-r0'] -testQosSaiHeadroomPoolSize -testQosSaiHeadroomPoolWatermark Issue : #13503 What is the motivation for this PR? How did you do it? set skip for hwsku in Nokia-IXR7250E-36x400G & platform asic in ['x86_64-nokia_ixr7250e_36x400g-r0'] How did you verify/test it? Execute the qos test cases --- tests/common/plugins/conditional_mark/tests_mark_conditions.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 315189c4ed1..be3300b0241 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1481,6 +1481,7 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolSize: and topo_type in ['t1-64-lag'] and hwsku not in ['Arista-7060CX-32S-C32', 'Celestica-DX010-C32', 'Arista-7260CX3-D108C8', 'Force10-S6100', 'Arista-7260CX3-Q64', 'Arista-7050CX3-32S-C32', 'Arista-7050CX3-32S-D48C8', 'Arista-7060CX-32S-D48C8'] and asic_type not in ['mellanox'] and asic_type in ['cisco-8000']" - "topo_type in ['m0', 'mx']" + - "'t2' in topo_name and asic_subtype in ['broadcom-dnx']" qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolWatermark: skip: From d565dcdbdee84e440f3abf0be1036fd217be8132 Mon Sep 17 00:00:00 2001 From: "Austin (Thang Pham)" Date: Fri, 15 Nov 2024 17:51:55 +1100 Subject: [PATCH 109/175] chore: update comments on skipped test (#15581) Description of PR Summary: Fixes # (issue) 29946125 Approach What is the motivation for this PR? Updated comments for skipped test cases on cisco 8800 platform as discussed with cisco. Signed-off-by: Austin Pham --- tests/qos/test_qos_sai.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 03ee7986cec..3463fc09800 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -327,6 +327,7 @@ def testQosSaiPfcXoffLimit( ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile, egressLosslessProfile ): + # NOTE: this test will be skipped for t2 cisco 8800 if it's not xoff_1 or xoff_2 """ Test QoS SAI XOFF limits @@ -430,6 +431,7 @@ def testPfcStormWithSharedHeadroomOccupancy( Raises: RunAnsibleModuleFail if ptf test fails """ + # NOTE: this is a mellanox test only and will be skipped for cisco 8800 normal_profile = ["xon_1", "xon_2"] if not dutConfig["dualTor"] and xonProfile not in normal_profile: pytest.skip( @@ -590,6 +592,7 @@ def testQosSaiPfcXonLimit( self, get_src_dst_asic_and_duts, xonProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile ): + # NOTE: cisco 8800 will skip this test if it's not xon_1 or xon_2 """ Test QoS SAI XON limits @@ -763,6 +766,7 @@ def testQosSaiHeadroomPoolSize( self, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile ): + # NOTE: cisco-8800 will skip this test since there are no headroom pool """ Test QoS SAI Headroom pool size @@ -875,6 +879,7 @@ def testQosSaiSharedReservationSize( self, sharedResSizeKey, ptfhost, dutTestParams, dutConfig, dutQosConfig, get_src_dst_asic_and_duts, check_skip_shared_res_test ): + # NOTE: Cisco T2 skip due to reduced number of port in multi asic """ Test QoS SAI shared reservation size Args: @@ -892,9 +897,11 @@ def testQosSaiSharedReservationSize( if ('modular_chassis' in get_src_dst_asic_and_duts['src_dut'].facts and get_src_dst_asic_and_duts['src_dut'].facts["modular_chassis"]): if dutConfig['dstDutAsic'] != "pac": + # Skipped due to reduced number of ports in multi-asic platforms pytest.skip("This test is skipped since not enough ports on cisco-8000 " "T2 Q200.") if "shared_res_size_2" in sharedResSizeKey: + # Skipped due to reduced number of ports in multi-asic platforms pytest.skip("This test is skipped since on cisco-8000 Q100, " "SQG thresholds have no impact on XOFF thresholds.") @@ -948,6 +955,7 @@ def testQosSaiHeadroomPoolWatermark( dutConfig, dutQosConfig, ingressLosslessProfile, sharedHeadroomPoolSize, resetWatermark ): + # NOTE: cisco 8800 will skip this test since there is no headroom pool """ Test QoS SAI Headroom pool watermark @@ -1214,6 +1222,7 @@ def testQosSaiLossyQueueVoq( ingressLossyProfile, duthost, localhost, get_src_dst_asic_and_duts, skip_src_dst_different_asic, dut_qos_maps # noqa: F811 ): + # NOTE: cisco 8800 will skip this test, this test only for single asic with long link """ Test QoS SAI Lossy queue with non_default voq and default voq Args: @@ -1368,6 +1377,7 @@ def testQosSaiDscpQueueMapping( @pytest.mark.parametrize("direction", ["downstream", "upstream"]) def testQosSaiSeparatedDscpQueueMapping(self, duthost, ptfhost, dutTestParams, dutConfig, direction, dut_qos_maps): # noqa F811 + # NOTE: cisco t2 8800 will skip this test since because of the topology """ Test QoS SAI DSCP to queue mapping. We will have separated DSCP_TO_TC_MAP for uplink/downlink ports on T1 if PCBB enabled. @@ -1430,6 +1440,7 @@ def testQosSaiSeparatedDscpQueueMapping(self, duthost, ptfhost, dutTestParams, def testQosSaiDot1pQueueMapping( self, ptfhost, dutTestParams, dutConfig ): + # NOTE: cisco 8800 will skip this test Dot1p-PG mapping is only supported on backend """ Test QoS SAI Dot1p to queue mapping @@ -1468,6 +1479,7 @@ def testQosSaiDot1pQueueMapping( def testQosSaiDot1pPgMapping( self, ptfhost, dutTestParams, dutConfig ): + # NOTE: cisco 8800 will skip this test Dot1p-PG mapping is only supported on backend """ Test QoS SAI Dot1p to PG mapping Args: @@ -1989,6 +2001,7 @@ def testIPIPQosSaiDscpToPgMapping( @pytest.mark.parametrize("direction", ["downstream", "upstream"]) def testQosSaiSeparatedDscpToPgMapping(self, duthost, request, ptfhost, dutTestParams, dutConfig, direction, dut_qos_maps): # noqa F811 + # NOTE: cisco 8800 will skip this test for both upstream and downstream """ Test QoS SAI DSCP to PG mapping ptf test. Since we are using different DSCP_TO_TC_MAP on uplink/downlink port, the test case also need to @@ -2207,6 +2220,7 @@ def testQosSaiLossyQueueVoqMultiSrc( self, ptfhost, dutTestParams, dutConfig, dutQosConfig, get_src_dst_asic_and_duts, skip_longlink ): + # NOTE: testQosSaiLossyQueueVoqMultiSrc[lossy_queue_voq_3] will be skipped for t2 cisco since it's multi-asic """ Test QoS SAI Lossy queue with multiple source ports, applicable for fair-voq and split-voq Args: @@ -2282,6 +2296,7 @@ def testQosSaiFullMeshTrafficSanity( get_src_dst_asic_and_duts, dut_qos_maps, # noqa F811 set_static_route_ptf64 ): + # NOTE: this test will skip for t2 cisco 8800 since it requires ptf64 topo """ Test QoS SAI traffic sanity Args: From 129959cbb22a2caa091f9ee3416f8f7674bf48fd Mon Sep 17 00:00:00 2001 From: "Austin (Thang Pham)" Date: Fri, 15 Nov 2024 20:18:23 +1100 Subject: [PATCH 110/175] fix: fix failure pfcwd_multiport (#15562) Description of PR Summary: Fixes # (issue) 30115858 Approach What is the motivation for this PR? From the original PR #10198 these changes were left out. After adding it back in it passed all the tests Signed-off-by: Austin Pham --- tests/pfcwd/test_pfcwd_function.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/pfcwd/test_pfcwd_function.py b/tests/pfcwd/test_pfcwd_function.py index 22a082b4fde..b9f60a65a59 100644 --- a/tests/pfcwd/test_pfcwd_function.py +++ b/tests/pfcwd/test_pfcwd_function.py @@ -518,6 +518,7 @@ def __init__(self, ptf, router_mac, tx_mac, pfc_params, is_dualtor): self.pfc_wd_rx_port_vlan_id = pfc_params['rx_port_vlan_id'] self.port_id_to_type_map = pfc_params['port_id_to_type_map'] self.port_type = pfc_params['port_type'] + self.is_dualtor = is_dualtor if is_dualtor: self.vlan_mac = "00:aa:bb:cc:dd:ee" else: @@ -569,7 +570,7 @@ def verify_rx_ingress(self, action): else: dst_port = "[ " + str(self.pfc_wd_rx_port_id) + " ]" ptf_params = {'router_mac': self.tx_mac, - 'vlan_mac': self.vlan_mac, + 'vlan_mac': self.vlan_mac if self.is_dualtor else self.tx_mac, 'queue_index': self.pfc_queue_index, 'pkt_count': self.pfc_wd_test_pkt_count, 'port_src': self.pfc_wd_test_port_id, @@ -635,7 +636,7 @@ def verify_other_pfc_pg(self): other_pg = self.pfc_queue_index + 1 ptf_params = {'router_mac': self.tx_mac, - 'vlan_mac': self.vlan_mac, + 'vlan_mac': self.vlan_mac if self.is_dualtor else self.tx_mac, 'queue_index': other_pg, 'pkt_count': self.pfc_wd_test_pkt_count, 'port_src': self.pfc_wd_test_port_id, From 32e7e9d4911466410ab6985fa28848d684cb4ba5 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Fri, 15 Nov 2024 09:46:09 -0800 Subject: [PATCH 111/175] Wait BGP sessions after changing mgmt IP (#15570) --- tests/common/fixtures/duthost_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/common/fixtures/duthost_utils.py b/tests/common/fixtures/duthost_utils.py index d2913307fdf..f23ec06b152 100644 --- a/tests/common/fixtures/duthost_utils.py +++ b/tests/common/fixtures/duthost_utils.py @@ -754,7 +754,7 @@ def convert_and_restore_config_db_to_ipv6_only(duthosts): if config_db_modified[duthost.hostname]: logger.info(f"config changed. Doing config reload for {duthost.hostname}") try: - config_reload(duthost, wait=120) + config_reload(duthost, wait=120, wait_for_bgp=True) except AnsibleConnectionFailure as e: # IPV4 mgmt interface been deleted by config reload # In latest SONiC, config reload command will exit after mgmt interface restart From f992360638c718a3e22bfa5b87bda18d6bfedbeb Mon Sep 17 00:00:00 2001 From: Chris <156943338+ccroy-arista@users.noreply.github.com> Date: Sun, 17 Nov 2024 16:41:52 -0800 Subject: [PATCH 112/175] sonic-mgmt: fix t0-isolated-d128u128s2 topo (#15542) Description of PR This PR contains the following changes, in order to achieve a functional t0-isolated-d128u128s2 topo: The field 'bp_interfaces' in the t0-isolated-d128u128s2 yml file is corrected to 'bp_interface' i.e. w/o the 's'. Added more VMs, to support the larger number of VMs needed for the topo. Added the missing leaf template file for the topo. Fixed the synthesis of the MACs used in ansible roles so that it does not error out after 256 interfaces. Additionally, this PR fixed the 'bp_interfaces' for t0-isolated-d128u128s1 yml file. --- .../templates/t0-isolated-d128u128s2-leaf.j2 | 1 + .../roles/test/files/helpers/change_mac.sh | 5 +- ansible/testbed-new.yaml | 138 +++++++++- ansible/vars/topo_t0-isolated-d128u128s1.yml | 258 ++++++++--------- ansible/vars/topo_t0-isolated-d128u128s2.yml | 260 +++++++++--------- ansible/veos | 138 +++++++++- 6 files changed, 525 insertions(+), 275 deletions(-) create mode 100644 ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 diff --git a/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 b/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 new file mode 100644 index 00000000000..a60cf79c0e0 --- /dev/null +++ b/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 @@ -0,0 +1 @@ +t0-leaf.j2 diff --git a/ansible/roles/test/files/helpers/change_mac.sh b/ansible/roles/test/files/helpers/change_mac.sh index 05f8dd66778..0952f51f38c 100644 --- a/ansible/roles/test/files/helpers/change_mac.sh +++ b/ansible/roles/test/files/helpers/change_mac.sh @@ -6,8 +6,9 @@ INTF_LIST=$(ls /sys/class/net | grep -E "^eth[0-9]+$") for INTF in ${INTF_LIST}; do ADDR="$(cat /sys/class/net/${INTF}/address)" - PREFIX="$(cut -c1-15 <<< ${ADDR})" - SUFFIX="$(printf "%02x" ${INTF##eth})" + PREFIX="$(cut -c1-13 <<< ${ADDR})" + INTF_ID=${INTF##eth} + SUFFIX="$(printf "%x:%02x" $(expr ${INTF_ID} / 256) $(expr ${INTF_ID} % 256))" MAC="${PREFIX}${SUFFIX}" echo "Update ${INTF} MAC address: ${ADDR}->$MAC" diff --git a/ansible/testbed-new.yaml b/ansible/testbed-new.yaml index d02fd2b1227..06f1f380877 100644 --- a/ansible/testbed-new.yaml +++ b/ansible/testbed-new.yaml @@ -242,9 +242,9 @@ veos_groups: eos: children: [vms_1, vms_2] # source: sonic-mgmt/veos vms_2: - host: [VM0200, VM0201, VM0202, VM0203] # source: sonic-mgmt/veos + host: [VM0300, VM0301, VM0302, VM0203] # source: sonic-mgmt/veos vms_1: - host: [VM0100, VM0101, VM0102, VM0103, VM0104, VM0105, VM0106, VM0107, VM0108, VM0109, VM0110, VM0111, VM0112, VM0113, VM0114, VM0115, VM0116, VM0117, VM0118, VM0119, VM0120, VM0121, VM0122, VM0123, VM0124, VM0125, VM0126, VM0127, VM0128, VM0129, VM0130, VM0131, VM0132, VM0133, VM0134, VM0135, VM0136, VM0137, VM0138, VM0139, VM0140, VM0141, VM0142, VM0143, VM0144, VM0145, VM0146, VM0147, VM0148, VM0149, VM0150, VM0151, VM0152, VM0153, VM0154, VM0155, VM0156, VM0157, VM0158, VM0159, VM0160, VM0161, VM0162, VM0163, VM0164, VM0165, VM0166, VM0167] # source: sonic-mgmt/veos + host: [VM0100, VM0101, VM0102, VM0103, VM0104, VM0105, VM0106, VM0107, VM0108, VM0109, VM0110, VM0111, VM0112, VM0113, VM0114, VM0115, VM0116, VM0117, VM0118, VM0119, VM0120, VM0121, VM0122, VM0123, VM0124, VM0125, VM0126, VM0127, VM0128, VM0129, VM0130, VM0131, VM0132, VM0133, VM0134, VM0135, VM0136, VM0137, VM0138, VM0139, VM0140, VM0141, VM0142, VM0143, VM0144, VM0145, VM0146, VM0147, VM0148, VM0149, VM0150, VM0151, VM0152, VM0153, VM0154, VM0155, VM0156, VM0157, VM0158, VM0159, VM0160, VM0161, VM0162, VM0163, VM0164, VM0165, VM0166, VM0167, VM0168, VM0169, VM0170, VM0171, VM0172, VM0173, VM0174, VM0175, VM0176, VM0177, VM0178, VM0179, VM0180, VM0181, VM0182, VM0183, VM0184, VM0185, VM0186, VM0187, VM0188, VM0189, VM0190, VM0191, VM0192, VM0193, VM0194, VM0195, VM0196, VM0197, VM0198, VM0199, VM0200, VM0201, VM0202, VM0203, VM0204, VM0205, VM0206, VM0207, VM0208, VM0209, VM0210, VM0211, VM0212, VM0213, VM0214, VM0215, VM0216, VM0217, VM0218, VM0219, VM0220, VM0221, VM0222, VM0223, VM0224, VM0225, VM0226, VM0227, VM0228, VM0229] # source: sonic-mgmt/veos vm_host: children: [vm_host_1, vm_host_2] # source: sonic-mgmt/veos vm_host_2: @@ -414,15 +414,139 @@ veos: ansible_host: 10.250.0.68 VM0167: ansible_host: 10.250.0.69 - vms_2: + VM0168: + ansible_host: 10.250.0.70 + VM0169: + ansible_host: 10.250.0.71 + VM0170: + ansible_host: 10.250.0.72 + VM0171: + ansible_host: 10.250.0.73 + VM0172: + ansible_host: 10.250.0.74 + VM0173: + ansible_host: 10.250.0.75 + VM0174: + ansible_host: 10.250.0.76 + VM0175: + ansible_host: 10.250.0.77 + VM0176: + ansible_host: 10.250.0.78 + VM0177: + ansible_host: 10.250.0.79 + VM0178: + ansible_host: 10.250.0.80 + VM0179: + ansible_host: 10.250.0.81 + VM0180: + ansible_host: 10.250.0.82 + VM0181: + ansible_host: 10.250.0.83 + VM0182: + ansible_host: 10.250.0.84 + VM0183: + ansible_host: 10.250.0.85 + VM0184: + ansible_host: 10.250.0.86 + VM0185: + ansible_host: 10.250.0.87 + VM0186: + ansible_host: 10.250.0.88 + VM0187: + ansible_host: 10.250.0.89 + VM0188: + ansible_host: 10.250.0.90 + VM0189: + ansible_host: 10.250.0.91 + VM0190: + ansible_host: 10.250.0.92 + VM0191: + ansible_host: 10.250.0.93 + VM0192: + ansible_host: 10.250.0.94 + VM0193: + ansible_host: 10.250.0.95 + VM0194: + ansible_host: 10.250.0.96 + VM0195: + ansible_host: 10.250.0.97 + VM0196: + ansible_host: 10.250.0.98 + VM0197: + ansible_host: 10.250.0.99 + VM0198: + ansible_host: 10.250.0.100 + VM0199: + ansible_host: 10.250.0.101 VM0200: - ansible_host: 10.250.0.51 + ansible_host: 10.250.0.102 VM0201: - ansible_host: 10.250.0.52 + ansible_host: 10.250.0.103 VM0202: - ansible_host: 10.250.0.53 + ansible_host: 10.250.0.104 VM0203: - ansible_host: 10.250.0.54 + ansible_host: 10.250.0.105 + VM0204: + ansible_host: 10.250.0.106 + VM0205: + ansible_host: 10.250.0.107 + VM0206: + ansible_host: 10.250.0.108 + VM0207: + ansible_host: 10.250.0.109 + VM0208: + ansible_host: 10.250.0.110 + VM0209: + ansible_host: 10.250.0.111 + VM0210: + ansible_host: 10.250.0.112 + VM0211: + ansible_host: 10.250.0.113 + VM0212: + ansible_host: 10.250.0.114 + VM0213: + ansible_host: 10.250.0.115 + VM0214: + ansible_host: 10.250.0.116 + VM0215: + ansible_host: 10.250.0.117 + VM0216: + ansible_host: 10.250.0.118 + VM0217: + ansible_host: 10.250.0.119 + VM0218: + ansible_host: 10.250.0.120 + VM0219: + ansible_host: 10.250.0.121 + VM0220: + ansible_host: 10.250.0.122 + VM0221: + ansible_host: 10.250.0.123 + VM0222: + ansible_host: 10.250.0.124 + VM0223: + ansible_host: 10.250.0.125 + VM0224: + ansible_host: 10.250.0.126 + VM0225: + ansible_host: 10.250.0.127 + VM0226: + ansible_host: 10.250.0.128 + VM0227: + ansible_host: 10.250.0.129 + VM0228: + ansible_host: 10.250.0.130 + VM0229: + ansible_host: 10.250.0.131 + vms_2: + VM0300: + ansible_host: 10.250.0.252 + VM0301: + ansible_host: 10.250.0.253 + VM0302: + ansible_host: 10.250.0.254 + VM0303: + ansible_host: 10.250.0.255 # testbed dictionary contains information about the testbed # testbed is used to generate testbed.csv diff --git a/ansible/vars/topo_t0-isolated-d128u128s1.yml b/ansible/vars/topo_t0-isolated-d128u128s1.yml index b187d8d09c3..75ae5f25da8 100644 --- a/ansible/vars/topo_t0-isolated-d128u128s1.yml +++ b/ansible/vars/topo_t0-isolated-d128u128s1.yml @@ -825,7 +825,7 @@ configuration: Ethernet1: ipv4: 10.0.0.65/31 ipv6: fc00::82/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.2/24 ipv6: fc0a::2/64 ARISTA02T1: @@ -844,7 +844,7 @@ configuration: Ethernet1: ipv4: 10.0.0.67/31 ipv6: fc00::86/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.3/24 ipv6: fc0a::3/64 ARISTA03T1: @@ -863,7 +863,7 @@ configuration: Ethernet1: ipv4: 10.0.0.69/31 ipv6: fc00::8a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.4/24 ipv6: fc0a::4/64 ARISTA04T1: @@ -882,7 +882,7 @@ configuration: Ethernet1: ipv4: 10.0.0.71/31 ipv6: fc00::8e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.5/24 ipv6: fc0a::5/64 ARISTA05T1: @@ -901,7 +901,7 @@ configuration: Ethernet1: ipv4: 10.0.0.73/31 ipv6: fc00::92/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.6/24 ipv6: fc0a::6/64 ARISTA06T1: @@ -920,7 +920,7 @@ configuration: Ethernet1: ipv4: 10.0.0.75/31 ipv6: fc00::96/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.7/24 ipv6: fc0a::7/64 ARISTA07T1: @@ -939,7 +939,7 @@ configuration: Ethernet1: ipv4: 10.0.0.77/31 ipv6: fc00::9a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.8/24 ipv6: fc0a::8/64 ARISTA08T1: @@ -958,7 +958,7 @@ configuration: Ethernet1: ipv4: 10.0.0.79/31 ipv6: fc00::9e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.9/24 ipv6: fc0a::9/64 ARISTA09T1: @@ -977,7 +977,7 @@ configuration: Ethernet1: ipv4: 10.0.0.81/31 ipv6: fc00::a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.10/24 ipv6: fc0a::a/64 ARISTA10T1: @@ -996,7 +996,7 @@ configuration: Ethernet1: ipv4: 10.0.0.83/31 ipv6: fc00::a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.11/24 ipv6: fc0a::b/64 ARISTA11T1: @@ -1015,7 +1015,7 @@ configuration: Ethernet1: ipv4: 10.0.0.85/31 ipv6: fc00::aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.12/24 ipv6: fc0a::c/64 ARISTA12T1: @@ -1034,7 +1034,7 @@ configuration: Ethernet1: ipv4: 10.0.0.87/31 ipv6: fc00::ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.13/24 ipv6: fc0a::d/64 ARISTA13T1: @@ -1053,7 +1053,7 @@ configuration: Ethernet1: ipv4: 10.0.0.89/31 ipv6: fc00::b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.14/24 ipv6: fc0a::e/64 ARISTA14T1: @@ -1072,7 +1072,7 @@ configuration: Ethernet1: ipv4: 10.0.0.91/31 ipv6: fc00::b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.15/24 ipv6: fc0a::f/64 ARISTA15T1: @@ -1091,7 +1091,7 @@ configuration: Ethernet1: ipv4: 10.0.0.93/31 ipv6: fc00::ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.16/24 ipv6: fc0a::10/64 ARISTA16T1: @@ -1110,7 +1110,7 @@ configuration: Ethernet1: ipv4: 10.0.0.95/31 ipv6: fc00::be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.17/24 ipv6: fc0a::11/64 ARISTA17T1: @@ -1129,7 +1129,7 @@ configuration: Ethernet1: ipv4: 10.0.0.97/31 ipv6: fc00::c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.18/24 ipv6: fc0a::12/64 ARISTA18T1: @@ -1148,7 +1148,7 @@ configuration: Ethernet1: ipv4: 10.0.0.99/31 ipv6: fc00::c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.19/24 ipv6: fc0a::13/64 ARISTA19T1: @@ -1167,7 +1167,7 @@ configuration: Ethernet1: ipv4: 10.0.0.101/31 ipv6: fc00::ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.20/24 ipv6: fc0a::14/64 ARISTA20T1: @@ -1186,7 +1186,7 @@ configuration: Ethernet1: ipv4: 10.0.0.103/31 ipv6: fc00::ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.21/24 ipv6: fc0a::15/64 ARISTA21T1: @@ -1205,7 +1205,7 @@ configuration: Ethernet1: ipv4: 10.0.0.105/31 ipv6: fc00::d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.22/24 ipv6: fc0a::16/64 ARISTA22T1: @@ -1224,7 +1224,7 @@ configuration: Ethernet1: ipv4: 10.0.0.107/31 ipv6: fc00::d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.23/24 ipv6: fc0a::17/64 ARISTA23T1: @@ -1243,7 +1243,7 @@ configuration: Ethernet1: ipv4: 10.0.0.109/31 ipv6: fc00::da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.24/24 ipv6: fc0a::18/64 ARISTA24T1: @@ -1262,7 +1262,7 @@ configuration: Ethernet1: ipv4: 10.0.0.111/31 ipv6: fc00::de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.25/24 ipv6: fc0a::19/64 ARISTA25T1: @@ -1281,7 +1281,7 @@ configuration: Ethernet1: ipv4: 10.0.0.113/31 ipv6: fc00::e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.26/24 ipv6: fc0a::1a/64 ARISTA26T1: @@ -1300,7 +1300,7 @@ configuration: Ethernet1: ipv4: 10.0.0.115/31 ipv6: fc00::e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.27/24 ipv6: fc0a::1b/64 ARISTA27T1: @@ -1319,7 +1319,7 @@ configuration: Ethernet1: ipv4: 10.0.0.117/31 ipv6: fc00::ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.28/24 ipv6: fc0a::1c/64 ARISTA28T1: @@ -1338,7 +1338,7 @@ configuration: Ethernet1: ipv4: 10.0.0.119/31 ipv6: fc00::ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.29/24 ipv6: fc0a::1d/64 ARISTA29T1: @@ -1357,7 +1357,7 @@ configuration: Ethernet1: ipv4: 10.0.0.121/31 ipv6: fc00::f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.30/24 ipv6: fc0a::1e/64 ARISTA30T1: @@ -1376,7 +1376,7 @@ configuration: Ethernet1: ipv4: 10.0.0.123/31 ipv6: fc00::f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.31/24 ipv6: fc0a::1f/64 ARISTA31T1: @@ -1395,7 +1395,7 @@ configuration: Ethernet1: ipv4: 10.0.0.125/31 ipv6: fc00::fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.32/24 ipv6: fc0a::20/64 ARISTA32T1: @@ -1414,7 +1414,7 @@ configuration: Ethernet1: ipv4: 10.0.0.127/31 ipv6: fc00::fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.33/24 ipv6: fc0a::21/64 ARISTA33T1: @@ -1433,7 +1433,7 @@ configuration: Ethernet1: ipv4: 10.0.0.129/31 ipv6: fc00::102/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.34/24 ipv6: fc0a::22/64 ARISTA34T1: @@ -1452,7 +1452,7 @@ configuration: Ethernet1: ipv4: 10.0.0.131/31 ipv6: fc00::106/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.35/24 ipv6: fc0a::23/64 ARISTA35T1: @@ -1471,7 +1471,7 @@ configuration: Ethernet1: ipv4: 10.0.0.133/31 ipv6: fc00::10a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.36/24 ipv6: fc0a::24/64 ARISTA36T1: @@ -1490,7 +1490,7 @@ configuration: Ethernet1: ipv4: 10.0.0.135/31 ipv6: fc00::10e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.37/24 ipv6: fc0a::25/64 ARISTA37T1: @@ -1509,7 +1509,7 @@ configuration: Ethernet1: ipv4: 10.0.0.137/31 ipv6: fc00::112/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.38/24 ipv6: fc0a::26/64 ARISTA38T1: @@ -1528,7 +1528,7 @@ configuration: Ethernet1: ipv4: 10.0.0.139/31 ipv6: fc00::116/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.39/24 ipv6: fc0a::27/64 ARISTA39T1: @@ -1547,7 +1547,7 @@ configuration: Ethernet1: ipv4: 10.0.0.141/31 ipv6: fc00::11a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.40/24 ipv6: fc0a::28/64 ARISTA40T1: @@ -1566,7 +1566,7 @@ configuration: Ethernet1: ipv4: 10.0.0.143/31 ipv6: fc00::11e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.41/24 ipv6: fc0a::29/64 ARISTA41T1: @@ -1585,7 +1585,7 @@ configuration: Ethernet1: ipv4: 10.0.0.145/31 ipv6: fc00::122/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.42/24 ipv6: fc0a::2a/64 ARISTA42T1: @@ -1604,7 +1604,7 @@ configuration: Ethernet1: ipv4: 10.0.0.147/31 ipv6: fc00::126/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.43/24 ipv6: fc0a::2b/64 ARISTA43T1: @@ -1623,7 +1623,7 @@ configuration: Ethernet1: ipv4: 10.0.0.149/31 ipv6: fc00::12a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.44/24 ipv6: fc0a::2c/64 ARISTA44T1: @@ -1642,7 +1642,7 @@ configuration: Ethernet1: ipv4: 10.0.0.151/31 ipv6: fc00::12e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.45/24 ipv6: fc0a::2d/64 ARISTA45T1: @@ -1661,7 +1661,7 @@ configuration: Ethernet1: ipv4: 10.0.0.153/31 ipv6: fc00::132/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.46/24 ipv6: fc0a::2e/64 ARISTA46T1: @@ -1680,7 +1680,7 @@ configuration: Ethernet1: ipv4: 10.0.0.155/31 ipv6: fc00::136/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.47/24 ipv6: fc0a::2f/64 ARISTA47T1: @@ -1699,7 +1699,7 @@ configuration: Ethernet1: ipv4: 10.0.0.157/31 ipv6: fc00::13a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.48/24 ipv6: fc0a::30/64 ARISTA48T1: @@ -1718,7 +1718,7 @@ configuration: Ethernet1: ipv4: 10.0.0.159/31 ipv6: fc00::13e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.49/24 ipv6: fc0a::31/64 ARISTA49T1: @@ -1737,7 +1737,7 @@ configuration: Ethernet1: ipv4: 10.0.0.161/31 ipv6: fc00::142/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.50/24 ipv6: fc0a::32/64 ARISTA50T1: @@ -1756,7 +1756,7 @@ configuration: Ethernet1: ipv4: 10.0.0.163/31 ipv6: fc00::146/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.51/24 ipv6: fc0a::33/64 ARISTA51T1: @@ -1775,7 +1775,7 @@ configuration: Ethernet1: ipv4: 10.0.0.165/31 ipv6: fc00::14a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.52/24 ipv6: fc0a::34/64 ARISTA52T1: @@ -1794,7 +1794,7 @@ configuration: Ethernet1: ipv4: 10.0.0.167/31 ipv6: fc00::14e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.53/24 ipv6: fc0a::35/64 ARISTA53T1: @@ -1813,7 +1813,7 @@ configuration: Ethernet1: ipv4: 10.0.0.169/31 ipv6: fc00::152/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.54/24 ipv6: fc0a::36/64 ARISTA54T1: @@ -1832,7 +1832,7 @@ configuration: Ethernet1: ipv4: 10.0.0.171/31 ipv6: fc00::156/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.55/24 ipv6: fc0a::37/64 ARISTA55T1: @@ -1851,7 +1851,7 @@ configuration: Ethernet1: ipv4: 10.0.0.173/31 ipv6: fc00::15a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.56/24 ipv6: fc0a::38/64 ARISTA56T1: @@ -1870,7 +1870,7 @@ configuration: Ethernet1: ipv4: 10.0.0.175/31 ipv6: fc00::15e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.57/24 ipv6: fc0a::39/64 ARISTA57T1: @@ -1889,7 +1889,7 @@ configuration: Ethernet1: ipv4: 10.0.0.177/31 ipv6: fc00::162/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.58/24 ipv6: fc0a::3a/64 ARISTA58T1: @@ -1908,7 +1908,7 @@ configuration: Ethernet1: ipv4: 10.0.0.179/31 ipv6: fc00::166/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.59/24 ipv6: fc0a::3b/64 ARISTA59T1: @@ -1927,7 +1927,7 @@ configuration: Ethernet1: ipv4: 10.0.0.181/31 ipv6: fc00::16a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.60/24 ipv6: fc0a::3c/64 ARISTA60T1: @@ -1946,7 +1946,7 @@ configuration: Ethernet1: ipv4: 10.0.0.183/31 ipv6: fc00::16e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.61/24 ipv6: fc0a::3d/64 ARISTA61T1: @@ -1965,7 +1965,7 @@ configuration: Ethernet1: ipv4: 10.0.0.185/31 ipv6: fc00::172/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.62/24 ipv6: fc0a::3e/64 ARISTA62T1: @@ -1984,7 +1984,7 @@ configuration: Ethernet1: ipv4: 10.0.0.187/31 ipv6: fc00::176/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.63/24 ipv6: fc0a::3f/64 ARISTA63T1: @@ -2003,7 +2003,7 @@ configuration: Ethernet1: ipv4: 10.0.0.189/31 ipv6: fc00::17a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.64/24 ipv6: fc0a::40/64 ARISTA64T1: @@ -2022,7 +2022,7 @@ configuration: Ethernet1: ipv4: 10.0.0.191/31 ipv6: fc00::17e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.65/24 ipv6: fc0a::41/64 ARISTA65T1: @@ -2041,7 +2041,7 @@ configuration: Ethernet1: ipv4: 10.0.1.65/31 ipv6: fc00::282/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.66/24 ipv6: fc0a::42/64 ARISTA66T1: @@ -2060,7 +2060,7 @@ configuration: Ethernet1: ipv4: 10.0.1.67/31 ipv6: fc00::286/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.67/24 ipv6: fc0a::43/64 ARISTA67T1: @@ -2079,7 +2079,7 @@ configuration: Ethernet1: ipv4: 10.0.1.69/31 ipv6: fc00::28a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.68/24 ipv6: fc0a::44/64 ARISTA68T1: @@ -2098,7 +2098,7 @@ configuration: Ethernet1: ipv4: 10.0.1.71/31 ipv6: fc00::28e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.69/24 ipv6: fc0a::45/64 ARISTA69T1: @@ -2117,7 +2117,7 @@ configuration: Ethernet1: ipv4: 10.0.1.73/31 ipv6: fc00::292/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.70/24 ipv6: fc0a::46/64 ARISTA70T1: @@ -2136,7 +2136,7 @@ configuration: Ethernet1: ipv4: 10.0.1.75/31 ipv6: fc00::296/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.71/24 ipv6: fc0a::47/64 ARISTA71T1: @@ -2155,7 +2155,7 @@ configuration: Ethernet1: ipv4: 10.0.1.77/31 ipv6: fc00::29a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.72/24 ipv6: fc0a::48/64 ARISTA72T1: @@ -2174,7 +2174,7 @@ configuration: Ethernet1: ipv4: 10.0.1.79/31 ipv6: fc00::29e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.73/24 ipv6: fc0a::49/64 ARISTA73T1: @@ -2193,7 +2193,7 @@ configuration: Ethernet1: ipv4: 10.0.1.81/31 ipv6: fc00::2a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.74/24 ipv6: fc0a::4a/64 ARISTA74T1: @@ -2212,7 +2212,7 @@ configuration: Ethernet1: ipv4: 10.0.1.83/31 ipv6: fc00::2a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.75/24 ipv6: fc0a::4b/64 ARISTA75T1: @@ -2231,7 +2231,7 @@ configuration: Ethernet1: ipv4: 10.0.1.85/31 ipv6: fc00::2aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.76/24 ipv6: fc0a::4c/64 ARISTA76T1: @@ -2250,7 +2250,7 @@ configuration: Ethernet1: ipv4: 10.0.1.87/31 ipv6: fc00::2ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.77/24 ipv6: fc0a::4d/64 ARISTA77T1: @@ -2269,7 +2269,7 @@ configuration: Ethernet1: ipv4: 10.0.1.89/31 ipv6: fc00::2b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.78/24 ipv6: fc0a::4e/64 ARISTA78T1: @@ -2288,7 +2288,7 @@ configuration: Ethernet1: ipv4: 10.0.1.91/31 ipv6: fc00::2b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.79/24 ipv6: fc0a::4f/64 ARISTA79T1: @@ -2307,7 +2307,7 @@ configuration: Ethernet1: ipv4: 10.0.1.93/31 ipv6: fc00::2ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.80/24 ipv6: fc0a::50/64 ARISTA80T1: @@ -2326,7 +2326,7 @@ configuration: Ethernet1: ipv4: 10.0.1.95/31 ipv6: fc00::2be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.81/24 ipv6: fc0a::51/64 ARISTA81T1: @@ -2345,7 +2345,7 @@ configuration: Ethernet1: ipv4: 10.0.1.97/31 ipv6: fc00::2c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.82/24 ipv6: fc0a::52/64 ARISTA82T1: @@ -2364,7 +2364,7 @@ configuration: Ethernet1: ipv4: 10.0.1.99/31 ipv6: fc00::2c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.83/24 ipv6: fc0a::53/64 ARISTA83T1: @@ -2383,7 +2383,7 @@ configuration: Ethernet1: ipv4: 10.0.1.101/31 ipv6: fc00::2ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.84/24 ipv6: fc0a::54/64 ARISTA84T1: @@ -2402,7 +2402,7 @@ configuration: Ethernet1: ipv4: 10.0.1.103/31 ipv6: fc00::2ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.85/24 ipv6: fc0a::55/64 ARISTA85T1: @@ -2421,7 +2421,7 @@ configuration: Ethernet1: ipv4: 10.0.1.105/31 ipv6: fc00::2d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.86/24 ipv6: fc0a::56/64 ARISTA86T1: @@ -2440,7 +2440,7 @@ configuration: Ethernet1: ipv4: 10.0.1.107/31 ipv6: fc00::2d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.87/24 ipv6: fc0a::57/64 ARISTA87T1: @@ -2459,7 +2459,7 @@ configuration: Ethernet1: ipv4: 10.0.1.109/31 ipv6: fc00::2da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.88/24 ipv6: fc0a::58/64 ARISTA88T1: @@ -2478,7 +2478,7 @@ configuration: Ethernet1: ipv4: 10.0.1.111/31 ipv6: fc00::2de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.89/24 ipv6: fc0a::59/64 ARISTA89T1: @@ -2497,7 +2497,7 @@ configuration: Ethernet1: ipv4: 10.0.1.113/31 ipv6: fc00::2e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.90/24 ipv6: fc0a::5a/64 ARISTA90T1: @@ -2516,7 +2516,7 @@ configuration: Ethernet1: ipv4: 10.0.1.115/31 ipv6: fc00::2e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.91/24 ipv6: fc0a::5b/64 ARISTA91T1: @@ -2535,7 +2535,7 @@ configuration: Ethernet1: ipv4: 10.0.1.117/31 ipv6: fc00::2ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.92/24 ipv6: fc0a::5c/64 ARISTA92T1: @@ -2554,7 +2554,7 @@ configuration: Ethernet1: ipv4: 10.0.1.119/31 ipv6: fc00::2ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.93/24 ipv6: fc0a::5d/64 ARISTA93T1: @@ -2573,7 +2573,7 @@ configuration: Ethernet1: ipv4: 10.0.1.121/31 ipv6: fc00::2f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.94/24 ipv6: fc0a::5e/64 ARISTA94T1: @@ -2592,7 +2592,7 @@ configuration: Ethernet1: ipv4: 10.0.1.123/31 ipv6: fc00::2f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.95/24 ipv6: fc0a::5f/64 ARISTA95T1: @@ -2611,7 +2611,7 @@ configuration: Ethernet1: ipv4: 10.0.1.125/31 ipv6: fc00::2fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.96/24 ipv6: fc0a::60/64 ARISTA96T1: @@ -2630,7 +2630,7 @@ configuration: Ethernet1: ipv4: 10.0.1.127/31 ipv6: fc00::2fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.97/24 ipv6: fc0a::61/64 ARISTA97T1: @@ -2649,7 +2649,7 @@ configuration: Ethernet1: ipv4: 10.0.1.129/31 ipv6: fc00::302/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.98/24 ipv6: fc0a::62/64 ARISTA98T1: @@ -2668,7 +2668,7 @@ configuration: Ethernet1: ipv4: 10.0.1.131/31 ipv6: fc00::306/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.99/24 ipv6: fc0a::63/64 ARISTA99T1: @@ -2687,7 +2687,7 @@ configuration: Ethernet1: ipv4: 10.0.1.133/31 ipv6: fc00::30a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.100/24 ipv6: fc0a::64/64 ARISTA100T1: @@ -2706,7 +2706,7 @@ configuration: Ethernet1: ipv4: 10.0.1.135/31 ipv6: fc00::30e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.101/24 ipv6: fc0a::65/64 ARISTA101T1: @@ -2725,7 +2725,7 @@ configuration: Ethernet1: ipv4: 10.0.1.137/31 ipv6: fc00::312/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.102/24 ipv6: fc0a::66/64 ARISTA102T1: @@ -2744,7 +2744,7 @@ configuration: Ethernet1: ipv4: 10.0.1.139/31 ipv6: fc00::316/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.103/24 ipv6: fc0a::67/64 ARISTA103T1: @@ -2763,7 +2763,7 @@ configuration: Ethernet1: ipv4: 10.0.1.141/31 ipv6: fc00::31a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.104/24 ipv6: fc0a::68/64 ARISTA104T1: @@ -2782,7 +2782,7 @@ configuration: Ethernet1: ipv4: 10.0.1.143/31 ipv6: fc00::31e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.105/24 ipv6: fc0a::69/64 ARISTA105T1: @@ -2801,7 +2801,7 @@ configuration: Ethernet1: ipv4: 10.0.1.145/31 ipv6: fc00::322/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.106/24 ipv6: fc0a::6a/64 ARISTA106T1: @@ -2820,7 +2820,7 @@ configuration: Ethernet1: ipv4: 10.0.1.147/31 ipv6: fc00::326/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.107/24 ipv6: fc0a::6b/64 ARISTA107T1: @@ -2839,7 +2839,7 @@ configuration: Ethernet1: ipv4: 10.0.1.149/31 ipv6: fc00::32a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.108/24 ipv6: fc0a::6c/64 ARISTA108T1: @@ -2858,7 +2858,7 @@ configuration: Ethernet1: ipv4: 10.0.1.151/31 ipv6: fc00::32e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.109/24 ipv6: fc0a::6d/64 ARISTA109T1: @@ -2877,7 +2877,7 @@ configuration: Ethernet1: ipv4: 10.0.1.153/31 ipv6: fc00::332/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.110/24 ipv6: fc0a::6e/64 ARISTA110T1: @@ -2896,7 +2896,7 @@ configuration: Ethernet1: ipv4: 10.0.1.155/31 ipv6: fc00::336/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.111/24 ipv6: fc0a::6f/64 ARISTA111T1: @@ -2915,7 +2915,7 @@ configuration: Ethernet1: ipv4: 10.0.1.157/31 ipv6: fc00::33a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.112/24 ipv6: fc0a::70/64 ARISTA112T1: @@ -2934,7 +2934,7 @@ configuration: Ethernet1: ipv4: 10.0.1.159/31 ipv6: fc00::33e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.113/24 ipv6: fc0a::71/64 ARISTA113T1: @@ -2953,7 +2953,7 @@ configuration: Ethernet1: ipv4: 10.0.1.161/31 ipv6: fc00::342/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.114/24 ipv6: fc0a::72/64 ARISTA114T1: @@ -2972,7 +2972,7 @@ configuration: Ethernet1: ipv4: 10.0.1.163/31 ipv6: fc00::346/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.115/24 ipv6: fc0a::73/64 ARISTA115T1: @@ -2991,7 +2991,7 @@ configuration: Ethernet1: ipv4: 10.0.1.165/31 ipv6: fc00::34a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.116/24 ipv6: fc0a::74/64 ARISTA116T1: @@ -3010,7 +3010,7 @@ configuration: Ethernet1: ipv4: 10.0.1.167/31 ipv6: fc00::34e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.117/24 ipv6: fc0a::75/64 ARISTA117T1: @@ -3029,7 +3029,7 @@ configuration: Ethernet1: ipv4: 10.0.1.169/31 ipv6: fc00::352/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.118/24 ipv6: fc0a::76/64 ARISTA118T1: @@ -3048,7 +3048,7 @@ configuration: Ethernet1: ipv4: 10.0.1.171/31 ipv6: fc00::356/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.119/24 ipv6: fc0a::77/64 ARISTA119T1: @@ -3067,7 +3067,7 @@ configuration: Ethernet1: ipv4: 10.0.1.173/31 ipv6: fc00::35a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.120/24 ipv6: fc0a::78/64 ARISTA120T1: @@ -3086,7 +3086,7 @@ configuration: Ethernet1: ipv4: 10.0.1.175/31 ipv6: fc00::35e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.121/24 ipv6: fc0a::79/64 ARISTA121T1: @@ -3105,7 +3105,7 @@ configuration: Ethernet1: ipv4: 10.0.1.177/31 ipv6: fc00::362/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.122/24 ipv6: fc0a::7a/64 ARISTA122T1: @@ -3124,7 +3124,7 @@ configuration: Ethernet1: ipv4: 10.0.1.179/31 ipv6: fc00::366/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.123/24 ipv6: fc0a::7b/64 ARISTA123T1: @@ -3143,7 +3143,7 @@ configuration: Ethernet1: ipv4: 10.0.1.181/31 ipv6: fc00::36a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.124/24 ipv6: fc0a::7c/64 ARISTA124T1: @@ -3162,7 +3162,7 @@ configuration: Ethernet1: ipv4: 10.0.1.183/31 ipv6: fc00::36e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.125/24 ipv6: fc0a::7d/64 ARISTA125T1: @@ -3181,7 +3181,7 @@ configuration: Ethernet1: ipv4: 10.0.1.185/31 ipv6: fc00::372/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.126/24 ipv6: fc0a::7e/64 ARISTA126T1: @@ -3200,7 +3200,7 @@ configuration: Ethernet1: ipv4: 10.0.1.187/31 ipv6: fc00::376/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.127/24 ipv6: fc0a::7f/64 ARISTA127T1: @@ -3219,7 +3219,7 @@ configuration: Ethernet1: ipv4: 10.0.1.189/31 ipv6: fc00::37a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.128/24 ipv6: fc0a::80/64 ARISTA128T1: @@ -3238,7 +3238,7 @@ configuration: Ethernet1: ipv4: 10.0.1.191/31 ipv6: fc00::37e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.129/24 ipv6: fc0a::81/64 ARISTA01PT0: @@ -3257,6 +3257,6 @@ configuration: Ethernet1: ipv4: 10.0.2.1/31 ipv6: fc00::402/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.130/24 ipv6: fc0a::82/64 diff --git a/ansible/vars/topo_t0-isolated-d128u128s2.yml b/ansible/vars/topo_t0-isolated-d128u128s2.yml index 5830728fee7..201b68672e5 100644 --- a/ansible/vars/topo_t0-isolated-d128u128s2.yml +++ b/ansible/vars/topo_t0-isolated-d128u128s2.yml @@ -829,7 +829,7 @@ configuration: Ethernet1: ipv4: 10.0.0.65/31 ipv6: fc00::82/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.2/24 ipv6: fc0a::2/64 ARISTA02T1: @@ -848,7 +848,7 @@ configuration: Ethernet1: ipv4: 10.0.0.67/31 ipv6: fc00::86/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.3/24 ipv6: fc0a::3/64 ARISTA03T1: @@ -867,7 +867,7 @@ configuration: Ethernet1: ipv4: 10.0.0.69/31 ipv6: fc00::8a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.4/24 ipv6: fc0a::4/64 ARISTA04T1: @@ -886,7 +886,7 @@ configuration: Ethernet1: ipv4: 10.0.0.71/31 ipv6: fc00::8e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.5/24 ipv6: fc0a::5/64 ARISTA05T1: @@ -905,7 +905,7 @@ configuration: Ethernet1: ipv4: 10.0.0.73/31 ipv6: fc00::92/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.6/24 ipv6: fc0a::6/64 ARISTA06T1: @@ -924,7 +924,7 @@ configuration: Ethernet1: ipv4: 10.0.0.75/31 ipv6: fc00::96/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.7/24 ipv6: fc0a::7/64 ARISTA07T1: @@ -943,7 +943,7 @@ configuration: Ethernet1: ipv4: 10.0.0.77/31 ipv6: fc00::9a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.8/24 ipv6: fc0a::8/64 ARISTA08T1: @@ -962,7 +962,7 @@ configuration: Ethernet1: ipv4: 10.0.0.79/31 ipv6: fc00::9e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.9/24 ipv6: fc0a::9/64 ARISTA09T1: @@ -981,7 +981,7 @@ configuration: Ethernet1: ipv4: 10.0.0.81/31 ipv6: fc00::a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.10/24 ipv6: fc0a::a/64 ARISTA10T1: @@ -1000,7 +1000,7 @@ configuration: Ethernet1: ipv4: 10.0.0.83/31 ipv6: fc00::a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.11/24 ipv6: fc0a::b/64 ARISTA11T1: @@ -1019,7 +1019,7 @@ configuration: Ethernet1: ipv4: 10.0.0.85/31 ipv6: fc00::aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.12/24 ipv6: fc0a::c/64 ARISTA12T1: @@ -1038,7 +1038,7 @@ configuration: Ethernet1: ipv4: 10.0.0.87/31 ipv6: fc00::ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.13/24 ipv6: fc0a::d/64 ARISTA13T1: @@ -1057,7 +1057,7 @@ configuration: Ethernet1: ipv4: 10.0.0.89/31 ipv6: fc00::b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.14/24 ipv6: fc0a::e/64 ARISTA14T1: @@ -1076,7 +1076,7 @@ configuration: Ethernet1: ipv4: 10.0.0.91/31 ipv6: fc00::b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.15/24 ipv6: fc0a::f/64 ARISTA15T1: @@ -1095,7 +1095,7 @@ configuration: Ethernet1: ipv4: 10.0.0.93/31 ipv6: fc00::ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.16/24 ipv6: fc0a::10/64 ARISTA16T1: @@ -1114,7 +1114,7 @@ configuration: Ethernet1: ipv4: 10.0.0.95/31 ipv6: fc00::be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.17/24 ipv6: fc0a::11/64 ARISTA17T1: @@ -1133,7 +1133,7 @@ configuration: Ethernet1: ipv4: 10.0.0.97/31 ipv6: fc00::c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.18/24 ipv6: fc0a::12/64 ARISTA18T1: @@ -1152,7 +1152,7 @@ configuration: Ethernet1: ipv4: 10.0.0.99/31 ipv6: fc00::c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.19/24 ipv6: fc0a::13/64 ARISTA19T1: @@ -1171,7 +1171,7 @@ configuration: Ethernet1: ipv4: 10.0.0.101/31 ipv6: fc00::ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.20/24 ipv6: fc0a::14/64 ARISTA20T1: @@ -1190,7 +1190,7 @@ configuration: Ethernet1: ipv4: 10.0.0.103/31 ipv6: fc00::ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.21/24 ipv6: fc0a::15/64 ARISTA21T1: @@ -1209,7 +1209,7 @@ configuration: Ethernet1: ipv4: 10.0.0.105/31 ipv6: fc00::d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.22/24 ipv6: fc0a::16/64 ARISTA22T1: @@ -1228,7 +1228,7 @@ configuration: Ethernet1: ipv4: 10.0.0.107/31 ipv6: fc00::d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.23/24 ipv6: fc0a::17/64 ARISTA23T1: @@ -1247,7 +1247,7 @@ configuration: Ethernet1: ipv4: 10.0.0.109/31 ipv6: fc00::da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.24/24 ipv6: fc0a::18/64 ARISTA24T1: @@ -1266,7 +1266,7 @@ configuration: Ethernet1: ipv4: 10.0.0.111/31 ipv6: fc00::de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.25/24 ipv6: fc0a::19/64 ARISTA25T1: @@ -1285,7 +1285,7 @@ configuration: Ethernet1: ipv4: 10.0.0.113/31 ipv6: fc00::e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.26/24 ipv6: fc0a::1a/64 ARISTA26T1: @@ -1304,7 +1304,7 @@ configuration: Ethernet1: ipv4: 10.0.0.115/31 ipv6: fc00::e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.27/24 ipv6: fc0a::1b/64 ARISTA27T1: @@ -1323,7 +1323,7 @@ configuration: Ethernet1: ipv4: 10.0.0.117/31 ipv6: fc00::ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.28/24 ipv6: fc0a::1c/64 ARISTA28T1: @@ -1342,7 +1342,7 @@ configuration: Ethernet1: ipv4: 10.0.0.119/31 ipv6: fc00::ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.29/24 ipv6: fc0a::1d/64 ARISTA29T1: @@ -1361,7 +1361,7 @@ configuration: Ethernet1: ipv4: 10.0.0.121/31 ipv6: fc00::f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.30/24 ipv6: fc0a::1e/64 ARISTA30T1: @@ -1380,7 +1380,7 @@ configuration: Ethernet1: ipv4: 10.0.0.123/31 ipv6: fc00::f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.31/24 ipv6: fc0a::1f/64 ARISTA31T1: @@ -1399,7 +1399,7 @@ configuration: Ethernet1: ipv4: 10.0.0.125/31 ipv6: fc00::fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.32/24 ipv6: fc0a::20/64 ARISTA32T1: @@ -1418,7 +1418,7 @@ configuration: Ethernet1: ipv4: 10.0.0.127/31 ipv6: fc00::fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.33/24 ipv6: fc0a::21/64 ARISTA33T1: @@ -1437,7 +1437,7 @@ configuration: Ethernet1: ipv4: 10.0.0.129/31 ipv6: fc00::102/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.34/24 ipv6: fc0a::22/64 ARISTA34T1: @@ -1456,7 +1456,7 @@ configuration: Ethernet1: ipv4: 10.0.0.131/31 ipv6: fc00::106/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.35/24 ipv6: fc0a::23/64 ARISTA35T1: @@ -1475,7 +1475,7 @@ configuration: Ethernet1: ipv4: 10.0.0.133/31 ipv6: fc00::10a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.36/24 ipv6: fc0a::24/64 ARISTA36T1: @@ -1494,7 +1494,7 @@ configuration: Ethernet1: ipv4: 10.0.0.135/31 ipv6: fc00::10e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.37/24 ipv6: fc0a::25/64 ARISTA37T1: @@ -1513,7 +1513,7 @@ configuration: Ethernet1: ipv4: 10.0.0.137/31 ipv6: fc00::112/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.38/24 ipv6: fc0a::26/64 ARISTA38T1: @@ -1532,7 +1532,7 @@ configuration: Ethernet1: ipv4: 10.0.0.139/31 ipv6: fc00::116/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.39/24 ipv6: fc0a::27/64 ARISTA39T1: @@ -1551,7 +1551,7 @@ configuration: Ethernet1: ipv4: 10.0.0.141/31 ipv6: fc00::11a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.40/24 ipv6: fc0a::28/64 ARISTA40T1: @@ -1570,7 +1570,7 @@ configuration: Ethernet1: ipv4: 10.0.0.143/31 ipv6: fc00::11e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.41/24 ipv6: fc0a::29/64 ARISTA41T1: @@ -1589,7 +1589,7 @@ configuration: Ethernet1: ipv4: 10.0.0.145/31 ipv6: fc00::122/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.42/24 ipv6: fc0a::2a/64 ARISTA42T1: @@ -1608,7 +1608,7 @@ configuration: Ethernet1: ipv4: 10.0.0.147/31 ipv6: fc00::126/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.43/24 ipv6: fc0a::2b/64 ARISTA43T1: @@ -1627,7 +1627,7 @@ configuration: Ethernet1: ipv4: 10.0.0.149/31 ipv6: fc00::12a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.44/24 ipv6: fc0a::2c/64 ARISTA44T1: @@ -1646,7 +1646,7 @@ configuration: Ethernet1: ipv4: 10.0.0.151/31 ipv6: fc00::12e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.45/24 ipv6: fc0a::2d/64 ARISTA45T1: @@ -1665,7 +1665,7 @@ configuration: Ethernet1: ipv4: 10.0.0.153/31 ipv6: fc00::132/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.46/24 ipv6: fc0a::2e/64 ARISTA46T1: @@ -1684,7 +1684,7 @@ configuration: Ethernet1: ipv4: 10.0.0.155/31 ipv6: fc00::136/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.47/24 ipv6: fc0a::2f/64 ARISTA47T1: @@ -1703,7 +1703,7 @@ configuration: Ethernet1: ipv4: 10.0.0.157/31 ipv6: fc00::13a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.48/24 ipv6: fc0a::30/64 ARISTA48T1: @@ -1722,7 +1722,7 @@ configuration: Ethernet1: ipv4: 10.0.0.159/31 ipv6: fc00::13e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.49/24 ipv6: fc0a::31/64 ARISTA49T1: @@ -1741,7 +1741,7 @@ configuration: Ethernet1: ipv4: 10.0.0.161/31 ipv6: fc00::142/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.50/24 ipv6: fc0a::32/64 ARISTA50T1: @@ -1760,7 +1760,7 @@ configuration: Ethernet1: ipv4: 10.0.0.163/31 ipv6: fc00::146/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.51/24 ipv6: fc0a::33/64 ARISTA51T1: @@ -1779,7 +1779,7 @@ configuration: Ethernet1: ipv4: 10.0.0.165/31 ipv6: fc00::14a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.52/24 ipv6: fc0a::34/64 ARISTA52T1: @@ -1798,7 +1798,7 @@ configuration: Ethernet1: ipv4: 10.0.0.167/31 ipv6: fc00::14e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.53/24 ipv6: fc0a::35/64 ARISTA53T1: @@ -1817,7 +1817,7 @@ configuration: Ethernet1: ipv4: 10.0.0.169/31 ipv6: fc00::152/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.54/24 ipv6: fc0a::36/64 ARISTA54T1: @@ -1836,7 +1836,7 @@ configuration: Ethernet1: ipv4: 10.0.0.171/31 ipv6: fc00::156/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.55/24 ipv6: fc0a::37/64 ARISTA55T1: @@ -1855,7 +1855,7 @@ configuration: Ethernet1: ipv4: 10.0.0.173/31 ipv6: fc00::15a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.56/24 ipv6: fc0a::38/64 ARISTA56T1: @@ -1874,7 +1874,7 @@ configuration: Ethernet1: ipv4: 10.0.0.175/31 ipv6: fc00::15e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.57/24 ipv6: fc0a::39/64 ARISTA57T1: @@ -1893,7 +1893,7 @@ configuration: Ethernet1: ipv4: 10.0.0.177/31 ipv6: fc00::162/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.58/24 ipv6: fc0a::3a/64 ARISTA58T1: @@ -1912,7 +1912,7 @@ configuration: Ethernet1: ipv4: 10.0.0.179/31 ipv6: fc00::166/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.59/24 ipv6: fc0a::3b/64 ARISTA59T1: @@ -1931,7 +1931,7 @@ configuration: Ethernet1: ipv4: 10.0.0.181/31 ipv6: fc00::16a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.60/24 ipv6: fc0a::3c/64 ARISTA60T1: @@ -1950,7 +1950,7 @@ configuration: Ethernet1: ipv4: 10.0.0.183/31 ipv6: fc00::16e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.61/24 ipv6: fc0a::3d/64 ARISTA61T1: @@ -1969,7 +1969,7 @@ configuration: Ethernet1: ipv4: 10.0.0.185/31 ipv6: fc00::172/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.62/24 ipv6: fc0a::3e/64 ARISTA62T1: @@ -1988,7 +1988,7 @@ configuration: Ethernet1: ipv4: 10.0.0.187/31 ipv6: fc00::176/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.63/24 ipv6: fc0a::3f/64 ARISTA63T1: @@ -2007,7 +2007,7 @@ configuration: Ethernet1: ipv4: 10.0.0.189/31 ipv6: fc00::17a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.64/24 ipv6: fc0a::40/64 ARISTA64T1: @@ -2026,7 +2026,7 @@ configuration: Ethernet1: ipv4: 10.0.0.191/31 ipv6: fc00::17e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.65/24 ipv6: fc0a::41/64 ARISTA65T1: @@ -2045,7 +2045,7 @@ configuration: Ethernet1: ipv4: 10.0.1.65/31 ipv6: fc00::282/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.66/24 ipv6: fc0a::42/64 ARISTA66T1: @@ -2064,7 +2064,7 @@ configuration: Ethernet1: ipv4: 10.0.1.67/31 ipv6: fc00::286/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.67/24 ipv6: fc0a::43/64 ARISTA67T1: @@ -2083,7 +2083,7 @@ configuration: Ethernet1: ipv4: 10.0.1.69/31 ipv6: fc00::28a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.68/24 ipv6: fc0a::44/64 ARISTA68T1: @@ -2102,7 +2102,7 @@ configuration: Ethernet1: ipv4: 10.0.1.71/31 ipv6: fc00::28e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.69/24 ipv6: fc0a::45/64 ARISTA69T1: @@ -2121,7 +2121,7 @@ configuration: Ethernet1: ipv4: 10.0.1.73/31 ipv6: fc00::292/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.70/24 ipv6: fc0a::46/64 ARISTA70T1: @@ -2140,7 +2140,7 @@ configuration: Ethernet1: ipv4: 10.0.1.75/31 ipv6: fc00::296/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.71/24 ipv6: fc0a::47/64 ARISTA71T1: @@ -2159,7 +2159,7 @@ configuration: Ethernet1: ipv4: 10.0.1.77/31 ipv6: fc00::29a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.72/24 ipv6: fc0a::48/64 ARISTA72T1: @@ -2178,7 +2178,7 @@ configuration: Ethernet1: ipv4: 10.0.1.79/31 ipv6: fc00::29e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.73/24 ipv6: fc0a::49/64 ARISTA73T1: @@ -2197,7 +2197,7 @@ configuration: Ethernet1: ipv4: 10.0.1.81/31 ipv6: fc00::2a2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.74/24 ipv6: fc0a::4a/64 ARISTA74T1: @@ -2216,7 +2216,7 @@ configuration: Ethernet1: ipv4: 10.0.1.83/31 ipv6: fc00::2a6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.75/24 ipv6: fc0a::4b/64 ARISTA75T1: @@ -2235,7 +2235,7 @@ configuration: Ethernet1: ipv4: 10.0.1.85/31 ipv6: fc00::2aa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.76/24 ipv6: fc0a::4c/64 ARISTA76T1: @@ -2254,7 +2254,7 @@ configuration: Ethernet1: ipv4: 10.0.1.87/31 ipv6: fc00::2ae/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.77/24 ipv6: fc0a::4d/64 ARISTA77T1: @@ -2273,7 +2273,7 @@ configuration: Ethernet1: ipv4: 10.0.1.89/31 ipv6: fc00::2b2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.78/24 ipv6: fc0a::4e/64 ARISTA78T1: @@ -2292,7 +2292,7 @@ configuration: Ethernet1: ipv4: 10.0.1.91/31 ipv6: fc00::2b6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.79/24 ipv6: fc0a::4f/64 ARISTA79T1: @@ -2311,7 +2311,7 @@ configuration: Ethernet1: ipv4: 10.0.1.93/31 ipv6: fc00::2ba/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.80/24 ipv6: fc0a::50/64 ARISTA80T1: @@ -2330,7 +2330,7 @@ configuration: Ethernet1: ipv4: 10.0.1.95/31 ipv6: fc00::2be/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.81/24 ipv6: fc0a::51/64 ARISTA81T1: @@ -2349,7 +2349,7 @@ configuration: Ethernet1: ipv4: 10.0.1.97/31 ipv6: fc00::2c2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.82/24 ipv6: fc0a::52/64 ARISTA82T1: @@ -2368,7 +2368,7 @@ configuration: Ethernet1: ipv4: 10.0.1.99/31 ipv6: fc00::2c6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.83/24 ipv6: fc0a::53/64 ARISTA83T1: @@ -2387,7 +2387,7 @@ configuration: Ethernet1: ipv4: 10.0.1.101/31 ipv6: fc00::2ca/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.84/24 ipv6: fc0a::54/64 ARISTA84T1: @@ -2406,7 +2406,7 @@ configuration: Ethernet1: ipv4: 10.0.1.103/31 ipv6: fc00::2ce/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.85/24 ipv6: fc0a::55/64 ARISTA85T1: @@ -2425,7 +2425,7 @@ configuration: Ethernet1: ipv4: 10.0.1.105/31 ipv6: fc00::2d2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.86/24 ipv6: fc0a::56/64 ARISTA86T1: @@ -2444,7 +2444,7 @@ configuration: Ethernet1: ipv4: 10.0.1.107/31 ipv6: fc00::2d6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.87/24 ipv6: fc0a::57/64 ARISTA87T1: @@ -2463,7 +2463,7 @@ configuration: Ethernet1: ipv4: 10.0.1.109/31 ipv6: fc00::2da/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.88/24 ipv6: fc0a::58/64 ARISTA88T1: @@ -2482,7 +2482,7 @@ configuration: Ethernet1: ipv4: 10.0.1.111/31 ipv6: fc00::2de/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.89/24 ipv6: fc0a::59/64 ARISTA89T1: @@ -2501,7 +2501,7 @@ configuration: Ethernet1: ipv4: 10.0.1.113/31 ipv6: fc00::2e2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.90/24 ipv6: fc0a::5a/64 ARISTA90T1: @@ -2520,7 +2520,7 @@ configuration: Ethernet1: ipv4: 10.0.1.115/31 ipv6: fc00::2e6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.91/24 ipv6: fc0a::5b/64 ARISTA91T1: @@ -2539,7 +2539,7 @@ configuration: Ethernet1: ipv4: 10.0.1.117/31 ipv6: fc00::2ea/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.92/24 ipv6: fc0a::5c/64 ARISTA92T1: @@ -2558,7 +2558,7 @@ configuration: Ethernet1: ipv4: 10.0.1.119/31 ipv6: fc00::2ee/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.93/24 ipv6: fc0a::5d/64 ARISTA93T1: @@ -2577,7 +2577,7 @@ configuration: Ethernet1: ipv4: 10.0.1.121/31 ipv6: fc00::2f2/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.94/24 ipv6: fc0a::5e/64 ARISTA94T1: @@ -2596,7 +2596,7 @@ configuration: Ethernet1: ipv4: 10.0.1.123/31 ipv6: fc00::2f6/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.95/24 ipv6: fc0a::5f/64 ARISTA95T1: @@ -2615,7 +2615,7 @@ configuration: Ethernet1: ipv4: 10.0.1.125/31 ipv6: fc00::2fa/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.96/24 ipv6: fc0a::60/64 ARISTA96T1: @@ -2634,7 +2634,7 @@ configuration: Ethernet1: ipv4: 10.0.1.127/31 ipv6: fc00::2fe/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.97/24 ipv6: fc0a::61/64 ARISTA97T1: @@ -2653,7 +2653,7 @@ configuration: Ethernet1: ipv4: 10.0.1.129/31 ipv6: fc00::302/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.98/24 ipv6: fc0a::62/64 ARISTA98T1: @@ -2672,7 +2672,7 @@ configuration: Ethernet1: ipv4: 10.0.1.131/31 ipv6: fc00::306/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.99/24 ipv6: fc0a::63/64 ARISTA99T1: @@ -2691,7 +2691,7 @@ configuration: Ethernet1: ipv4: 10.0.1.133/31 ipv6: fc00::30a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.100/24 ipv6: fc0a::64/64 ARISTA100T1: @@ -2710,7 +2710,7 @@ configuration: Ethernet1: ipv4: 10.0.1.135/31 ipv6: fc00::30e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.101/24 ipv6: fc0a::65/64 ARISTA101T1: @@ -2729,7 +2729,7 @@ configuration: Ethernet1: ipv4: 10.0.1.137/31 ipv6: fc00::312/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.102/24 ipv6: fc0a::66/64 ARISTA102T1: @@ -2748,7 +2748,7 @@ configuration: Ethernet1: ipv4: 10.0.1.139/31 ipv6: fc00::316/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.103/24 ipv6: fc0a::67/64 ARISTA103T1: @@ -2767,7 +2767,7 @@ configuration: Ethernet1: ipv4: 10.0.1.141/31 ipv6: fc00::31a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.104/24 ipv6: fc0a::68/64 ARISTA104T1: @@ -2786,7 +2786,7 @@ configuration: Ethernet1: ipv4: 10.0.1.143/31 ipv6: fc00::31e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.105/24 ipv6: fc0a::69/64 ARISTA105T1: @@ -2805,7 +2805,7 @@ configuration: Ethernet1: ipv4: 10.0.1.145/31 ipv6: fc00::322/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.106/24 ipv6: fc0a::6a/64 ARISTA106T1: @@ -2824,7 +2824,7 @@ configuration: Ethernet1: ipv4: 10.0.1.147/31 ipv6: fc00::326/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.107/24 ipv6: fc0a::6b/64 ARISTA107T1: @@ -2843,7 +2843,7 @@ configuration: Ethernet1: ipv4: 10.0.1.149/31 ipv6: fc00::32a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.108/24 ipv6: fc0a::6c/64 ARISTA108T1: @@ -2862,7 +2862,7 @@ configuration: Ethernet1: ipv4: 10.0.1.151/31 ipv6: fc00::32e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.109/24 ipv6: fc0a::6d/64 ARISTA109T1: @@ -2881,7 +2881,7 @@ configuration: Ethernet1: ipv4: 10.0.1.153/31 ipv6: fc00::332/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.110/24 ipv6: fc0a::6e/64 ARISTA110T1: @@ -2900,7 +2900,7 @@ configuration: Ethernet1: ipv4: 10.0.1.155/31 ipv6: fc00::336/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.111/24 ipv6: fc0a::6f/64 ARISTA111T1: @@ -2919,7 +2919,7 @@ configuration: Ethernet1: ipv4: 10.0.1.157/31 ipv6: fc00::33a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.112/24 ipv6: fc0a::70/64 ARISTA112T1: @@ -2938,7 +2938,7 @@ configuration: Ethernet1: ipv4: 10.0.1.159/31 ipv6: fc00::33e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.113/24 ipv6: fc0a::71/64 ARISTA113T1: @@ -2957,7 +2957,7 @@ configuration: Ethernet1: ipv4: 10.0.1.161/31 ipv6: fc00::342/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.114/24 ipv6: fc0a::72/64 ARISTA114T1: @@ -2976,7 +2976,7 @@ configuration: Ethernet1: ipv4: 10.0.1.163/31 ipv6: fc00::346/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.115/24 ipv6: fc0a::73/64 ARISTA115T1: @@ -2995,7 +2995,7 @@ configuration: Ethernet1: ipv4: 10.0.1.165/31 ipv6: fc00::34a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.116/24 ipv6: fc0a::74/64 ARISTA116T1: @@ -3014,7 +3014,7 @@ configuration: Ethernet1: ipv4: 10.0.1.167/31 ipv6: fc00::34e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.117/24 ipv6: fc0a::75/64 ARISTA117T1: @@ -3033,7 +3033,7 @@ configuration: Ethernet1: ipv4: 10.0.1.169/31 ipv6: fc00::352/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.118/24 ipv6: fc0a::76/64 ARISTA118T1: @@ -3052,7 +3052,7 @@ configuration: Ethernet1: ipv4: 10.0.1.171/31 ipv6: fc00::356/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.119/24 ipv6: fc0a::77/64 ARISTA119T1: @@ -3071,7 +3071,7 @@ configuration: Ethernet1: ipv4: 10.0.1.173/31 ipv6: fc00::35a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.120/24 ipv6: fc0a::78/64 ARISTA120T1: @@ -3090,7 +3090,7 @@ configuration: Ethernet1: ipv4: 10.0.1.175/31 ipv6: fc00::35e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.121/24 ipv6: fc0a::79/64 ARISTA121T1: @@ -3109,7 +3109,7 @@ configuration: Ethernet1: ipv4: 10.0.1.177/31 ipv6: fc00::362/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.122/24 ipv6: fc0a::7a/64 ARISTA122T1: @@ -3128,7 +3128,7 @@ configuration: Ethernet1: ipv4: 10.0.1.179/31 ipv6: fc00::366/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.123/24 ipv6: fc0a::7b/64 ARISTA123T1: @@ -3147,7 +3147,7 @@ configuration: Ethernet1: ipv4: 10.0.1.181/31 ipv6: fc00::36a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.124/24 ipv6: fc0a::7c/64 ARISTA124T1: @@ -3166,7 +3166,7 @@ configuration: Ethernet1: ipv4: 10.0.1.183/31 ipv6: fc00::36e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.125/24 ipv6: fc0a::7d/64 ARISTA125T1: @@ -3185,7 +3185,7 @@ configuration: Ethernet1: ipv4: 10.0.1.185/31 ipv6: fc00::372/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.126/24 ipv6: fc0a::7e/64 ARISTA126T1: @@ -3204,7 +3204,7 @@ configuration: Ethernet1: ipv4: 10.0.1.187/31 ipv6: fc00::376/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.127/24 ipv6: fc0a::7f/64 ARISTA127T1: @@ -3223,7 +3223,7 @@ configuration: Ethernet1: ipv4: 10.0.1.189/31 ipv6: fc00::37a/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.128/24 ipv6: fc0a::80/64 ARISTA128T1: @@ -3242,7 +3242,7 @@ configuration: Ethernet1: ipv4: 10.0.1.191/31 ipv6: fc00::37e/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.129/24 ipv6: fc0a::81/64 ARISTA01PT0: @@ -3261,7 +3261,7 @@ configuration: Ethernet1: ipv4: 10.0.2.1/31 ipv6: fc00::402/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.130/24 ipv6: fc0a::82/64 ARISTA02PT0: @@ -3280,6 +3280,6 @@ configuration: Ethernet1: ipv4: 10.0.2.3/31 ipv6: fc00::406/126 - bp_interfaces: + bp_interface: ipv4: 10.10.246.131/24 ipv6: fc0a::83/64 diff --git a/ansible/veos b/ansible/veos index 6d545542b13..634c7eaa421 100644 --- a/ansible/veos +++ b/ansible/veos @@ -208,17 +208,141 @@ vms_1: ansible_host: 10.250.0.68 VM0167: ansible_host: 10.250.0.69 - -vms_2: - hosts: + VM0168: + ansible_host: 10.250.0.70 + VM0169: + ansible_host: 10.250.0.71 + VM0170: + ansible_host: 10.250.0.72 + VM0171: + ansible_host: 10.250.0.73 + VM0172: + ansible_host: 10.250.0.74 + VM0173: + ansible_host: 10.250.0.75 + VM0174: + ansible_host: 10.250.0.76 + VM0175: + ansible_host: 10.250.0.77 + VM0176: + ansible_host: 10.250.0.78 + VM0177: + ansible_host: 10.250.0.79 + VM0178: + ansible_host: 10.250.0.80 + VM0179: + ansible_host: 10.250.0.81 + VM0180: + ansible_host: 10.250.0.82 + VM0181: + ansible_host: 10.250.0.83 + VM0182: + ansible_host: 10.250.0.84 + VM0183: + ansible_host: 10.250.0.85 + VM0184: + ansible_host: 10.250.0.86 + VM0185: + ansible_host: 10.250.0.87 + VM0186: + ansible_host: 10.250.0.88 + VM0187: + ansible_host: 10.250.0.89 + VM0188: + ansible_host: 10.250.0.90 + VM0189: + ansible_host: 10.250.0.91 + VM0190: + ansible_host: 10.250.0.92 + VM0191: + ansible_host: 10.250.0.93 + VM0192: + ansible_host: 10.250.0.94 + VM0193: + ansible_host: 10.250.0.95 + VM0194: + ansible_host: 10.250.0.96 + VM0195: + ansible_host: 10.250.0.97 + VM0196: + ansible_host: 10.250.0.98 + VM0197: + ansible_host: 10.250.0.99 + VM0198: + ansible_host: 10.250.0.100 + VM0199: + ansible_host: 10.250.0.101 VM0200: - ansible_host: 10.250.0.51 + ansible_host: 10.250.0.102 VM0201: - ansible_host: 10.250.0.52 + ansible_host: 10.250.0.103 VM0202: - ansible_host: 10.250.0.53 + ansible_host: 10.250.0.104 VM0203: - ansible_host: 10.250.0.54 + ansible_host: 10.250.0.105 + VM0204: + ansible_host: 10.250.0.106 + VM0205: + ansible_host: 10.250.0.107 + VM0206: + ansible_host: 10.250.0.108 + VM0207: + ansible_host: 10.250.0.109 + VM0208: + ansible_host: 10.250.0.110 + VM0209: + ansible_host: 10.250.0.111 + VM0210: + ansible_host: 10.250.0.112 + VM0211: + ansible_host: 10.250.0.113 + VM0212: + ansible_host: 10.250.0.114 + VM0213: + ansible_host: 10.250.0.115 + VM0214: + ansible_host: 10.250.0.116 + VM0215: + ansible_host: 10.250.0.117 + VM0216: + ansible_host: 10.250.0.118 + VM0217: + ansible_host: 10.250.0.119 + VM0218: + ansible_host: 10.250.0.120 + VM0219: + ansible_host: 10.250.0.121 + VM0220: + ansible_host: 10.250.0.122 + VM0221: + ansible_host: 10.250.0.123 + VM0222: + ansible_host: 10.250.0.124 + VM0223: + ansible_host: 10.250.0.125 + VM0224: + ansible_host: 10.250.0.126 + VM0225: + ansible_host: 10.250.0.127 + VM0226: + ansible_host: 10.250.0.128 + VM0227: + ansible_host: 10.250.0.129 + VM0228: + ansible_host: 10.250.0.130 + VM0229: + ansible_host: 10.250.0.131 + +vms_2: + hosts: + VM0300: + ansible_host: 10.250.0.252 + VM0301: + ansible_host: 10.250.0.253 + VM0302: + ansible_host: 10.250.0.254 + VM0303: + ansible_host: 10.250.0.255 # The groups below are helper to limit running playbooks to specific server(s) only server_1: From 946c6d8ba888d8e6c433e58995de5dbfb7816f5b Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Mon, 18 Nov 2024 09:23:05 +0800 Subject: [PATCH 113/175] Install azure-cli step by step to fix dpkg lock failure (#15558) What is the motivation for this PR? PR/nightly test have a chance to fail in installing azure-cli for unable to acquire dpkg lock, which means there are 2 or more processes are running apt install at the same time How did you do it? Install azure-cli step by step, and add a timeout for all apt action to acquire dpkg lock Ref: https://learn.microsoft.com/en-us/cli/azure/install-azure-cli-linux?pivots=apt How did you verify/test it? --- .../run-test-elastictest-template.yml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml index 595a6cb3136..740bbc8db7b 100644 --- a/.azure-pipelines/run-test-elastictest-template.yml +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -178,7 +178,29 @@ steps: # Check if azure cli is installed. If not, try to install it if ! command -v az; then echo "Azure CLI is not installed. Trying to install it..." - curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + + echo "Get packages needed for the installation process" + sudo apt-get -o DPkg::Lock::Timeout=600 update + sudo apt-get -o DPkg::Lock::Timeout=600 -y install apt-transport-https ca-certificates curl gnupg lsb-release + + echo "Download and install the Microsoft signing key" + sudo mkdir -p /etc/apt/keyrings + curl -sLS https://packages.microsoft.com/keys/microsoft.asc | + gpg --dearmor | sudo tee /etc/apt/keyrings/microsoft.gpg > /dev/null + sudo chmod go+r /etc/apt/keyrings/microsoft.gpg + + echo "Add the Azure CLI software repository" + AZ_DIST=$(lsb_release -cs) + echo "Types: deb + URIs: https://packages.microsoft.com/repos/azure-cli/ + Suites: ${AZ_DIST} + Components: main + Architectures: $(dpkg --print-architecture) + Signed-by: /etc/apt/keyrings/microsoft.gpg" | sudo tee /etc/apt/sources.list.d/azure-cli.sources + + echo "Update repository information and install the azure-cli package" + sudo apt-get -o DPkg::Lock::Timeout=600 update + sudo apt-get -o DPkg::Lock::Timeout=600 -y install azure-cli else echo "Azure CLI is already installed" fi From 4146d8a5dca845b1caca2ca31b88aa498b3669ac Mon Sep 17 00:00:00 2001 From: vincentpcng <129542523+vincentpcng@users.noreply.github.com> Date: Sun, 17 Nov 2024 19:13:09 -0800 Subject: [PATCH 114/175] Add the port FEC BER mgmt test (#15481) * Add the port FEC BER mgmt test Signed-off-by: vincent ng * Add the port FEC BER mgmt test Signed-off-by: vincent ng * Add the port FEC BER mgmt test Signed-off-by: vincent ng --------- Signed-off-by: vincent ng --- tests/platform_tests/test_intf_fec.py | 34 ++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/tests/platform_tests/test_intf_fec.py b/tests/platform_tests/test_intf_fec.py index 88b6fcdc557..7447ff13e56 100644 --- a/tests/platform_tests/test_intf_fec.py +++ b/tests/platform_tests/test_intf_fec.py @@ -11,7 +11,8 @@ SUPPORTED_PLATFORMS = [ "mlnx_msn", "8101_32fh", - "8111_32eh" + "8111_32eh", + "arista" ] SUPPORTED_SPEEDS = [ @@ -35,6 +36,9 @@ def test_verify_fec_oper_mode(duthosts, enum_rand_one_per_hwsku_frontend_hostnam """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + if "broadcom" in duthost.facts.get('platform_asic'): + pytest.skip("Skipping this test on platforms with Broadcom ASICs") + logging.info("Get output of '{}'".format("show interface status")) intf_status = duthost.show_and_parse("show interface status") @@ -63,6 +67,9 @@ def test_config_fec_oper_mode(duthosts, enum_rand_one_per_hwsku_frontend_hostnam """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + if "broadcom" in duthost.facts.get('platform_asic'): + pytest.skip("Skipping this test on platforms with Broadcom ASICs") + logging.info("Get output of '{}'".format("show interface status")) intf_status = duthost.show_and_parse("show interface status") @@ -119,6 +126,17 @@ def test_verify_fec_stats_counters(duthosts, enum_rand_one_per_hwsku_frontend_ho logging.info("Get output of 'show interfaces counters fec-stats'") intf_status = duthost.show_and_parse("show interfaces counters fec-stats") + def skip_ber_counters_test(intf_status: dict) -> bool: + """ + Check whether the BER fields (Pre-FEC and Post-FEC BER) + exists in the "show interfaces counters fec-stats" + CLI output + """ + if intf_status.get('fec_pre_ber') is None or intf_status.get('fec_post_ber') is None: + pytest.fail("Pre-FEC and Port-FEC BER fields missing on interface. intf_status: {}".format(intf_status)) + return True + return False + for intf in intf_status: intf_name = intf['iface'] speed = get_interface_speed(duthost, intf_name) @@ -147,3 +165,17 @@ def test_verify_fec_stats_counters(duthosts, enum_rand_one_per_hwsku_frontend_ho if fec_symbol_err_int > fec_corr_int: pytest.fail("FEC symbol errors:{} are higher than FEC correctable errors:{} for interface {}" .format(intf_name, fec_symbol_err_int, fec_corr_int)) + + if skip_ber_counters_test(intf): + continue + fec_pre_ber = intf.get('fec_pre_ber', '').lower() + fec_post_ber = intf.get('fec_post_ber', '').lower() + try: + if fec_pre_ber != "n/a": + float(fec_pre_ber) + if fec_post_ber != "n/a": + float(fec_post_ber) + except ValueError: + pytest.fail("Pre-FEC and Post-FEC BER are not valid floats for interface {}, \ + fec_pre_ber: {} fec_post_ber: {}" + .format(intf_name, fec_pre_ber, fec_post_ber)) From 3f7b7def527133b54d9683f07507b6fe015791a2 Mon Sep 17 00:00:00 2001 From: Dashuai Zhang <164845223+sdszhang@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:28:33 +1100 Subject: [PATCH 115/175] add back T0/T1 merge (#15598) Description of PR Summary: T0/T1 support in snappi multidut is being overwritten by recent commit. Adding it back in. Approach What is the motivation for this PR? Add back T0/T1 support for multidut snappi. How did you do it? Use T0/T1 specific function to get snappi ports instead of using variables.py co-authorized by: jianquanye@microsoft.com --- tests/snappi_tests/files/helper.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/snappi_tests/files/helper.py b/tests/snappi_tests/files/helper.py index 60be345f6d3..44b86b2c5ec 100644 --- a/tests/snappi_tests/files/helper.py +++ b/tests/snappi_tests/files/helper.py @@ -9,7 +9,7 @@ from tests.common.helpers.parallel import parallel_run from tests.common.utilities import wait_until from tests.common.snappi_tests.snappi_fixtures import get_snappi_ports_for_rdma, \ - snappi_dut_base_config + snappi_dut_base_config, is_snappi_multidut logger = logging.getLogger(__name__) @@ -101,12 +101,15 @@ def setup_ports_and_dut( "testbed {}, subtype {} in variables.py".format( MULTIDUT_TESTBED, testbed_subtype)) logger.info('Running test for testbed subtype: {}'.format(testbed_subtype)) - snappi_ports = get_snappi_ports_for_rdma( - get_snappi_ports, - rdma_ports, - tx_port_count, - rx_port_count, - MULTIDUT_TESTBED) + if is_snappi_multidut(duthosts): + snappi_ports = get_snappi_ports_for_rdma( + get_snappi_ports, + rdma_ports, + tx_port_count, + rx_port_count, + MULTIDUT_TESTBED) + else: + snappi_ports = get_snappi_ports testbed_config, port_config_list, snappi_ports = snappi_dut_base_config( duthosts, snappi_ports, snappi_api, setup=True) From 66228088abb90be622fa9f3f73d9b4ab722de395 Mon Sep 17 00:00:00 2001 From: Zhijian Li Date: Mon, 18 Nov 2024 22:31:13 -0800 Subject: [PATCH 116/175] [M0][test_acl] Wait BGP fully establish after reboot (#15616) We see TestAclWithReboot L3_Scenario test fail on Nokia-7215 M0 platform. The root cause is test start before BGP fully established. Update this testcase to fix M0 L3 scenario. --- tests/acl/test_acl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index 2908cfc2038..d9650ee5be3 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -1307,7 +1307,7 @@ def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo, con # We need some additional delay on e1031 if dut.facts["platform"] == "x86_64-cel_e1031-r0": time.sleep(240) - if 't1' in tbinfo["topo"]["name"]: + if 't1' in tbinfo["topo"]["name"] or 'm0' in tbinfo["topo"]["name"]: # Wait BGP sessions up on T1 as we saw BGP sessions to T0 # established later than T2 bgp_neighbors = dut.get_bgp_neighbors() From 5cab7e6292ee01c10b99af20f03e59bb25648c4d Mon Sep 17 00:00:00 2001 From: rbpittman Date: Tue, 19 Nov 2024 03:53:19 -0500 Subject: [PATCH 117/175] Rename drop-checking iterative variables. (#15571) --- tests/saitests/py3/sai_qos_tests.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 30212910941..8e47b276935 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -3478,22 +3478,23 @@ def get_pfc_tx_cnt(src_port_id, pg_cntr_idx): # Verify no ingress/egress drops for all ports pg_drop_counters = {port_id: sai_thrift_read_pg_drop_counters( self.src_client, port_list['src'][port_id]) for port_id in uniq_srcs} - for src_port_id in uniq_srcs: - for pg in range(len(pg_drop_counters[src_port_id])): - drops = pg_drop_counters[src_port_id][pg] - pg_drop_counters_bases[src_port_id][pg] + for uniq_src_port_id in uniq_srcs: + for pg in range(len(pg_drop_counters[uniq_src_port_id])): + drops = pg_drop_counters[uniq_src_port_id][pg] - pg_drop_counters_bases[uniq_src_port_id][pg] if pg in [3, 4]: - assert drops == 0, "Detected %d lossless drops on PG %d src port %d" % (drops, pg, src_port_id) + assert drops == 0, \ + "Detected %d lossless drops on PG %d src port %d" % (drops, pg, uniq_src_port_id) elif drops > 0: # When memory is full, any new lossy background traffic is dropped. print("Observed lossy drops %d on PG %d src port %d, expected." % - (drops, pg, src_port_id), file=sys.stderr) + (drops, pg, uniq_src_port_id), file=sys.stderr) xmit_counters_list = {port_id: sai_thrift_read_port_counters( self.dst_client, self.asic_type, port_list['dst'][port_id])[0] for port_id in uniq_dsts} - for dst_port_id in uniq_dsts: + for uniq_dst_port_id in uniq_dsts: for cntr in self.egress_counters: - drops = xmit_counters_list[dst_port_id][cntr] - \ - xmit_counters_bases[dst_port_id][cntr] - assert drops == 0, "Detected %d egress drops on dst port id %d" % (drops, dst_port_id) + drops = xmit_counters_list[uniq_dst_port_id][cntr] - \ + xmit_counters_bases[uniq_dst_port_id][cntr] + assert drops == 0, "Detected %d egress drops on dst port id %d" % (drops, uniq_dst_port_id) first_port_id = self.dst_port_ids[0] last_port_id = self.dst_port_ids[-1] From c1af3455f949ca4ed4ac9bf7dc98f31d606a8829 Mon Sep 17 00:00:00 2001 From: rbpittman Date: Tue, 19 Nov 2024 03:54:11 -0500 Subject: [PATCH 118/175] Xfail qos/test_qos_dscp_mapping.py for Cisco-8122 (#15509) * Skip qos/test_qos_dscp_mapping.py * Change skip to xfail with strict checking. --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index be3300b0241..ca8047fbfe3 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1366,6 +1366,13 @@ qos/test_pfc_pause.py::test_pfc_pause_lossless: skip: reason: "Fanout needs to send PFC frames fast enough to completely pause the queue" +qos/test_qos_dscp_mapping.py: + xfail: + reason: "ECN marking in combination with tunnel decap not yet supported" + strict: True + conditions: + - "asic_type in ['cisco-8000'] and platform in ['x86_64-8122_64eh_o-r0']" + qos/test_qos_dscp_mapping.py::TestQoSSaiDSCPQueueMapping_IPIP_Base::test_dscp_to_queue_mapping_pipe_mode: skip: reason: "Pipe decap mode not supported due to either SAI or platform limitation / M0/MX topo does not support qos" From 75952dbd48e68654f0c8a185a80b9e85e9ed1d6a Mon Sep 17 00:00:00 2001 From: Vivek Verma <137406113+vivekverma-arista@users.noreply.github.com> Date: Tue, 19 Nov 2024 14:39:17 +0530 Subject: [PATCH 119/175] Fix testQosSaiDscpQueueMapping (#15109) What is the motivation for this PR? Regression introduced by #14232 14232 06:34:12 __init__._fixture_generator_decorator L0088 ERROR | KeyError(8) Traceback (most recent call last): File "/data/tests/common/plugins/log_section_start/__init__.py", line 84, in _fixture_generator_decorator res = next(it) File "/data/tests/qos/qos_sai_base.py", line 2455, in tc_to_dscp_count for dscp, tc in dscp_to_tc_map.items(): KeyError: 8 How did you do it? Get rid of assumption of 8TCs from the code. How did you verify/test it? Ran the test on Arista 7260X3 platform. --- tests/qos/qos_sai_base.py | 3 +-- tests/saitests/py3/sai_qos_tests.py | 9 ++++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 92e315d128f..574dbc3c2a9 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -2556,11 +2556,10 @@ def skip_longlink(self, dutQosConfig): def tc_to_dscp_count(self, get_src_dst_asic_and_duts): duthost = get_src_dst_asic_and_duts['src_dut'] tc_to_dscp_count_map = {} - for tc in range(8): - tc_to_dscp_count_map[tc] = 0 config_facts = duthost.asic_instance().config_facts(source="running")["ansible_facts"] dscp_to_tc_map = config_facts['DSCP_TO_TC_MAP']['AZURE'] for dscp, tc in dscp_to_tc_map.items(): + tc_to_dscp_count_map.setdefault(int(tc), 0) tc_to_dscp_count_map[int(tc)] += 1 yield tc_to_dscp_count_map diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 8e47b276935..e7ef618eb49 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -941,9 +941,12 @@ def runTest(self): # queue 7 0 1 1 1 1 # noqa E501 if tc_to_dscp_count_map: - for tc in range(7): - assert (queue_results[tc] == tc_to_dscp_count_map[tc] + queue_results_base[tc]) - assert (queue_results[7] >= tc_to_dscp_count_map[7] + queue_results_base[7]) + for tc in tc_to_dscp_count_map.keys(): + if tc == 7: + # LAG ports can have LACP packets on queue 7, hence using >= comparison + assert (queue_results[tc] >= tc_to_dscp_count_map[tc] + queue_results_base[tc]) + else: + assert (queue_results[tc] == tc_to_dscp_count_map[tc] + queue_results_base[tc]) else: assert (queue_results[QUEUE_0] == 1 + queue_results_base[QUEUE_0]) assert (queue_results[QUEUE_3] == 1 + queue_results_base[QUEUE_3]) From 28633395c903f1d1970a64b55a4506e36977bca1 Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Tue, 19 Nov 2024 17:11:05 +0800 Subject: [PATCH 120/175] Add conditions into qos/test_qos_sai.py::TestQosSai:: (#15563) What is the motivation for this PR? In #14912, we added conditions for longer matching entries in conditional marks. However, some conditions were missed under the entry qos/test_qos_sai.py::TestQosSai:. This PR adds these missing conditions to entries that start with and extend beyond qos/test_qos_sai.py::TestQosSai:. How did you do it? This PR adds these missing conditions to entries that start with and extend beyond qos/test_qos_sai.py::TestQosSai:. How did you verify/test it? --- .../tests_mark_conditions.yaml | 58 ++++++++++++------- 1 file changed, 38 insertions(+), 20 deletions(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index ca8047fbfe3..b6c9edab036 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1408,97 +1408,108 @@ qos/test_qos_sai.py::TestQosSai: qos/test_qos_sai.py::TestQosSai::testIPIPQosSaiDscpToPgMapping: skip: - reason: "For DSCP to PG mapping on IPinIP traffic , mellanox device has different behavior to community. For mellanox device, testQosSaiDscpToPgMapping can cover the scenarios / M0/MX topo does not support qos" + reason: "For DSCP to PG mapping on IPinIP traffic , mellanox device has different behavior to community. For mellanox device, testQosSaiDscpToPgMapping can cover the scenarios / Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type in ['mellanox']" - https://github.com/sonic-net/sonic-mgmt/issues/12906 - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testPfcStormWithSharedHeadroomOccupancy: skip: - reason: "This test is only for Mellanox. / M0/MX topo does not support qos" + reason: "This test is only for Mellanox." conditions_logical_operator: or conditions: - "asic_type in ['cisco-8000']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiBufferPoolWatermark: skip: - reason: "sai_thrift_read_buffer_pool_watermark are not supported on DNX / M0/MX topo does not support qos" + reason: "sai_thrift_read_buffer_pool_watermark are not supported on DNX / Unsupported testbed type." conditions_logical_operator: or conditions: - "platform in ['x86_64-nokia_ixr7250e_36x400g-r0', 'x86_64-arista_7800r3_48cq2_lc', 'x86_64-arista_7800r3_48cqm2_lc', 'x86_64-arista_7800r3a_36d2_lc', 'x86_64-arista_7800r3a_36dm2_lc','x86_64-arista_7800r3ak_36dm2_lc']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiDot1pPgMapping: skip: - reason: "Dot1p-PG mapping is only supported on backend. / M0/MX topo does not support qos" + reason: "Dot1p-PG mapping is only supported on backend." conditions_logical_operator: or conditions: - "'backend' not in topo_name" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiDot1pQueueMapping: skip: - reason: "Dot1p-queue mapping is only supported on backend. / M0/MX topo does not support qos" + reason: "Dot1p-queue mapping is only supported on backend." conditions_logical_operator: or conditions: - "'backend' not in topo_name" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiDscpQueueMapping: skip: - reason: "Dscp-queue mapping is not supported on backend. / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "'backend' in topo_name" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiDscpToPgMapping: skip: - reason: "Dscp-PG mapping is not supported on backend. / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "'backend' in topo_name" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiDwrrWeightChange: skip: - reason: "Skip DWRR weight change test on Mellanox platform. / M0/MX topo does not support qos" + reason: "Skip DWRR weight change test on Mellanox platform. / Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type in ['mellanox']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiFullMeshTrafficSanity: skip: - reason: "Unsupported platform or testbed type. / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type not in ['cisco-8000'] or topo_name not in ['ptf64']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolSize: skip: - reason: "Headroom pool size not supported. / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "https://github.com/sonic-net/sonic-mgmt/issues/12292 and hwsku in ['Force10-S6100'] and topo_type in ['t1-64-lag'] and hwsku not in ['Arista-7060CX-32S-C32', 'Celestica-DX010-C32', 'Arista-7260CX3-D108C8', 'Force10-S6100', 'Arista-7260CX3-Q64', 'Arista-7050CX3-32S-C32', 'Arista-7050CX3-32S-D48C8', 'Arista-7060CX-32S-D48C8'] and asic_type not in ['mellanox'] and asic_type in ['cisco-8000']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" - "'t2' in topo_name and asic_subtype in ['broadcom-dnx']" qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolWatermark: skip: - reason: "sai_thrift_read_buffer_pool_watermark are not supported on DNX / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "platform in ['x86_64-nokia_ixr7250e_36x400g-r0', 'x86_64-arista_7800r3_48cq2_lc', 'x86_64-arista_7800r3_48cqm2_lc', 'x86_64-arista_7800r3a_36d2_lc', 'x86_64-arista_7800r3a_36dm2_lc', 'x86_64-arista_7800r3ak_36dm2_lc'] or asic_type in ['mellanox'] and asic_type in ['cisco-8000'] and https://github.com/sonic-net/sonic-mgmt/issues/12292 and hwsku in ['Force10-S6100'] and topo_type in ['t1-64-lag']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" xfail: reason: "Headroom pool size not supported." conditions: @@ -1506,66 +1517,73 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolWatermark: qos/test_qos_sai.py::TestQosSai::testQosSaiLosslessVoq: skip: - reason: "Lossless Voq test is not supported / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type not in ['cisco-8000'] or platform in ['x86_64-8122_64eh_o-r0']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiLossyQueueVoq: skip: - reason: "Lossy Queue Voq test is not supported / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type not in ['cisco-8000'] or platform in ['x86_64-8122_64eh_o-r0']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiLossyQueueVoqMultiSrc: skip: - reason: "Lossy Queue Voq multiple source test is not supported / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type not in ['cisco-8000'] or platform in ['x86_64-8122_64eh_o-r0']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiPGDrop: skip: - reason: "PG drop size test is not supported. / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type not in ['cisco-8000'] or platform in ['x86_64-8122_64eh_o-r0']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiPgHeadroomWatermark: skip: - reason: "Priority Group Headroom Watermark is not supported on cisco asic. PG drop counter stat is covered as a part of testQosSaiPfcXoffLimit - / M0/MX topo does not support qos" + reason: "Unsupported testbed type." conditions_logical_operator: or conditions: - "asic_type in ['cisco-8000'] and platform not in ['x86_64-8122_64eh_o-r0']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiPgSharedWatermark[None-wm_pg_shared_lossy]: xfail: - reason: "Image issue on Arista platforms" + reason: "Image issue on Arista platforms / Unsupported testbed type." conditions: - "platform in ['x86_64-arista_7050cx3_32s']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiQWatermarkAllPorts: skip: - reason: "All Port Watermark test is verified only on Cisco Platforms. / M0/MX topo does not support qos" + reason: "All Port Watermark test is verified only on Cisco Platforms." conditions_logical_operator: or conditions: - "asic_type not in ['cisco-8000']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiSharedReservationSize: skip: - reason: "Shared reservation size test is not supported. / M0/MX topo does not support qos" + reason: "Shared reservation size test is not supported." conditions_logical_operator: or conditions: - "asic_type not in ['cisco-8000'] or platform in ['x86_64-8122_64eh_o-r0']" - "topo_type in ['m0', 'mx']" + - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_tunnel_qos_remap.py::test_pfc_watermark_extra_lossless_active: xfail: From 4cfeeacee2b69da0815184df4a2fddded951828f Mon Sep 17 00:00:00 2001 From: Dawei Huang Date: Tue, 19 Nov 2024 15:30:37 -0600 Subject: [PATCH 121/175] Fix test_l2_configure failure (#15608) Description of PR Fix test_l2_configure failure by removing minigraph.xml temporarily Approach What is the motivation for this PR? Fix test failure How did you do it? Temporary remove minigraph.xml during config_reload How did you verify/test it? on physical and virtual switch, latest image Any platform specific information? no --- tests/l2/test_l2_configure.py | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/tests/l2/test_l2_configure.py b/tests/l2/test_l2_configure.py index bdb392b9e7f..5c3609a9a2b 100644 --- a/tests/l2/test_l2_configure.py +++ b/tests/l2/test_l2_configure.py @@ -4,15 +4,14 @@ import logging import pytest +import tempfile from tests.common import config_reload from tests.common.platform.processes_utils import wait_critical_processes from tests.common.helpers.assertions import pytest_assert CONFIG_DB = "/etc/sonic/config_db.json" -CONFIG_DB_BAK = "/etc/sonic/config_db.json.bak" -DUT_IMG_PATH = "/tmp/dut-sonic-img.bin" -LOCALHOST_IMG_PATH = "/tmp/localhost-sonic-img.bin" +MINIGRAPH = "/etc/sonic/minigraph.xml" logger = logging.getLogger(__name__) @@ -24,6 +23,20 @@ ] +def generate_backup_filename(prefix): + """ + @summary: Generate a backup filename. + + Args: + prefix: Prefix of the backup filename. + + Returns: + A backup filename. + """ + with tempfile.NamedTemporaryFile(prefix=prefix, suffix=".bak", delete=False) as f: + return f.name + + @pytest.fixture(autouse=True) def setup_env(duthosts, rand_one_dut_hostname): """ @@ -35,6 +48,7 @@ def setup_env(duthosts, rand_one_dut_hostname): rand_selected_dut: The fixture returns a randomly selected DuT. """ duthost = duthosts[rand_one_dut_hostname] + CONFIG_DB_BAK = generate_backup_filename("config_db.json") duthost.shell("sudo cp {} {}".format(CONFIG_DB, CONFIG_DB_BAK)) yield @@ -80,9 +94,10 @@ def get_db_version(duthost): return "" -def test_no_hardcoded_minigraph(duthosts, rand_one_dut_hostname, tbinfo): +def test_no_hardcoded_tables(duthosts, rand_one_dut_hostname, tbinfo): """ - @summary: A testcase asserts no hardcoded minigraph config is imported to config_db during L2 configuration. + @summary: A test case asserting no hardcoded tables (such as TELEMETRY and RESTAPI) + is migrated to config_db during L2 configuration. Args: duthosts: list of DUTs. @@ -99,8 +114,6 @@ def test_no_hardcoded_minigraph(duthosts, rand_one_dut_hostname, tbinfo): mgmt_fact = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_mgmt_interface"] # Step 2: Configure DUT into L2 mode. - # Save original config - duthost.shell("sudo cp {} {}".format(CONFIG_DB, CONFIG_DB_BAK)) # Perform L2 configuration L2_INIT_CFG_FILE = "/tmp/init_l2_cfg.json" MGMT_CFG_FILE = "/tmp/mgmt_cfg.json" @@ -147,12 +160,17 @@ def test_no_hardcoded_minigraph(duthosts, rand_one_dut_hostname, tbinfo): logger.info( "Database version before L2 configuration reload: {}".format(db_version_before) ) + # Move minigraph away to avoid config coming from minigraph. + MINIGRAPH_BAK = generate_backup_filename("minigraph.xml") + duthost.shell("sudo mv {} {}".format(MINIGRAPH, MINIGRAPH_BAK)) config_reload(duthost) wait_critical_processes(duthost) db_version_after = get_db_version(duthost) logger.info( "Database version after L2 configuration reload: {}".format(db_version_after) ) + # Move minigraph back. + duthost.shell("sudo mv {} {}".format(MINIGRAPH_BAK, MINIGRAPH)) # Verify no minigraph config is present. for table in ["TELEMETRY", "RESTAPI"]: From 174c6bf76535d1022f7a652daa251d605a5b3d21 Mon Sep 17 00:00:00 2001 From: Riff Date: Tue, 19 Nov 2024 17:49:14 -0800 Subject: [PATCH 122/175] Update j2cli to jinjanator. (#15600) j2cli is not being maintained anymore and will start to fail to work on ubuntu 24.04, because it is trying to load the imp module, which is deprecated now. However, j2cli is being archived not being maintained anymore. The author recommends using other alternatives that is actively being maintained, such as jinjanator. Hence, making this change in order to support the latest OS. --- ansible/setup-management-network.sh | 8 ++++---- docs/testbed/README.testbed.Setup.md | 2 +- docs/testbed/README.testbed.VsSetup.md | 2 +- setup-container.sh | 12 +++++++++--- 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/ansible/setup-management-network.sh b/ansible/setup-management-network.sh index 3347d216b6d..fd2eae5892b 100755 --- a/ansible/setup-management-network.sh +++ b/ansible/setup-management-network.sh @@ -33,10 +33,10 @@ echo "Refreshing apt package lists..." apt-get update echo -echo "STEP 1: Checking for j2cli package..." -if ! command -v j2; then - echo "j2cli not found, installing j2cli" - cmd="install --user j2cli==0.3.10" +echo "STEP 1: Checking for jinjanator package..." +if ! command -v jinjanate; then + echo "jinjanator not found, installing jinjanator" + cmd="install --user jinjanator==24.4.0" if ! command -v pip &> /dev/null; then pip3 $cmd else diff --git a/docs/testbed/README.testbed.Setup.md b/docs/testbed/README.testbed.Setup.md index f26f162befa..c6dcf6431fb 100644 --- a/docs/testbed/README.testbed.Setup.md +++ b/docs/testbed/README.testbed.Setup.md @@ -20,7 +20,7 @@ This document describes the steps to setup the testbed and deploy a topology. ``` - Install Python prerequisites ``` - sudo pip3 install j2cli + sudo pip3 install jinjanator ``` - Install Docker (all credits to https://docs.docker.com/engine/install/ubuntu/ ) ``` diff --git a/docs/testbed/README.testbed.VsSetup.md b/docs/testbed/README.testbed.VsSetup.md index f6eea3fab0e..daa38c6fbca 100644 --- a/docs/testbed/README.testbed.VsSetup.md +++ b/docs/testbed/README.testbed.VsSetup.md @@ -22,7 +22,7 @@ First, we need to prepare the host where we will be configuring the virtual test ``` sudo apt install python python-pip openssh-server # v0.3.10 Jinja2 is required, lower version may cause uncompatible issue - sudo pip install j2cli==0.3.10 + sudo pip install jinjanate==24.4.0 ``` 3. Run the host setup script to install required packages and initialize the management bridge network diff --git a/setup-container.sh b/setup-container.sh index 90bae4ef4f8..5318aa806e9 100755 --- a/setup-container.sh +++ b/setup-container.sh @@ -275,7 +275,7 @@ ROOT_PASS=${ROOT_PASS} EOF log_info "generate a Dockerfile: ${TMP_DIR}/Dockerfile" - j2 -o "${TMP_DIR}/Dockerfile" "${TMP_DIR}/Dockerfile.j2" "${TMP_DIR}/data.env" || \ + jinjanate -o "${TMP_DIR}/Dockerfile" "${TMP_DIR}/Dockerfile.j2" "${TMP_DIR}/data.env" || \ log_error "failed to generate a Dockerfile: ${TMP_DIR}/Dockerfile" log_info "building docker image from ${TMP_DIR}: ${LOCAL_IMAGE} ..." @@ -445,8 +445,14 @@ if docker ps -a --format "{{.Names}}" | grep -q "^${CONTAINER_NAME}$"; then fi fi -if ! which j2 &> /dev/null; then - exit_failure "missing Jinja2 templates support: make sure j2cli package is installed" +if ! which jinjanate &> /dev/null; then + echo "jinjanator not found, installing jinjanator" + cmd="install --user jinjanator==24.4.0" + if ! command -v pip &> /dev/null; then + pip3 $cmd + else + pip $cmd + fi fi pull_sonic_mgmt_docker_image From 2c0cb9f3947df4db5a1a6e18d3a3469320a21afa Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Tue, 19 Nov 2024 18:15:38 -0800 Subject: [PATCH 123/175] Skip dhcp test events for mx (#15541) What is the motivation for this PR? isc-dhcpv4 process is not expected to run for MX topologies so skipping dhcp_relay events testing How did you do it? Check for switch type BmcMgmtToRRouter How did you verify/test it? Manual test/pipeline --- tests/telemetry/events/dhcp-relay_events.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/telemetry/events/dhcp-relay_events.py b/tests/telemetry/events/dhcp-relay_events.py index 331cdeaece2..dd2fdf8bfcc 100644 --- a/tests/telemetry/events/dhcp-relay_events.py +++ b/tests/telemetry/events/dhcp-relay_events.py @@ -18,6 +18,10 @@ def test_event(duthost, gnxi_path, ptfhost, ptfadapter, data_dir, validate_yang) features_states, succeeded = duthost.get_feature_status() if not succeeded or features_states["dhcp_relay"] != "enabled": pytest.skip("dhcp_relay is not enabled, skipping dhcp_relay events") + device_metadata = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']['DEVICE_METADATA'] + switch_role = device_metadata['localhost'].get('type', '') + if switch_role == 'BmcMgmtToRRouter': + pytest.skip("Skipping dhcp_relay events for mx topologies") logger.info("Beginning to test dhcp-relay events") run_test(duthost, gnxi_path, ptfhost, data_dir, validate_yang, trigger_dhcp_relay_discard, "dhcp_relay_discard.json", "sonic-events-dhcp-relay:dhcp-relay-discard", tag, False, 30, ptfadapter) From 302332457e19e68ff40c39bf8520ae4d9e498a9a Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Wed, 20 Nov 2024 11:27:12 +0800 Subject: [PATCH 124/175] Eliminate cross-feature dependency from macsec module (#15617) What is the motivation for this PR? Previously, the common script tests/conftest.py relied on importing a module from the feature-specific macsec folder, creating a cross-feature dependency. To eliminate this dependency and improve code organization, we created a Python package named macsec under the common path tests/common. The shared scripts were refactored and relocated into this new package, ensuring a cleaner and more modular structure. How did you do it? To eliminate this dependency and improve code organization, we created a Python package named macsec under the common path tests/common. The shared scripts were refactored and relocated into this new package, ensuring a cleaner and more modular structure. How did you verify/test it? --- tests/common/devices/ptf.py | 2 +- tests/common/macsec/__init__.py | 266 ++++++++++++++++++ .../macsec/macsec_config_helper.py | 4 +- tests/{ => common}/macsec/macsec_helper.py | 29 +- .../macsec/macsec_platform_helper.py | 0 tests/{ => common}/macsec/profile.json | 0 tests/conftest.py | 2 +- tests/macsec/__init__.py | 266 ------------------ tests/macsec/conftest.py | 2 +- tests/macsec/test_controlplane.py | 4 +- tests/macsec/test_dataplane.py | 4 +- tests/macsec/test_deployment.py | 2 +- tests/macsec/test_docker_restart.py | 2 +- tests/macsec/test_fault_handling.py | 7 +- tests/macsec/test_interop_protocol.py | 7 +- tests/macsec/test_interop_wan_isis.py | 6 +- 16 files changed, 307 insertions(+), 296 deletions(-) create mode 100644 tests/common/macsec/__init__.py rename tests/{ => common}/macsec/macsec_config_helper.py (97%) rename tests/{ => common}/macsec/macsec_helper.py (95%) rename tests/{ => common}/macsec/macsec_platform_helper.py (100%) rename tests/{ => common}/macsec/profile.json (100%) diff --git a/tests/common/devices/ptf.py b/tests/common/devices/ptf.py index 1e652052a33..048fcbdd35c 100644 --- a/tests/common/devices/ptf.py +++ b/tests/common/devices/ptf.py @@ -3,7 +3,7 @@ import tempfile from tests.common.devices.base import AnsibleHostBase -from tests.macsec.macsec_helper import load_macsec_info +from tests.common.macsec.macsec_helper import load_macsec_info logger = logging.getLogger(__name__) diff --git a/tests/common/macsec/__init__.py b/tests/common/macsec/__init__.py new file mode 100644 index 00000000000..234c61c8485 --- /dev/null +++ b/tests/common/macsec/__init__.py @@ -0,0 +1,266 @@ +import collections +import json +import logging +import os +import sys +from ipaddress import ip_address, IPv4Address + +import natsort +import pytest + +if sys.version_info.major > 2: + from pathlib import Path + sys.path.insert(0, str(Path(__file__).parent)) + +from .macsec_config_helper import enable_macsec_feature +from .macsec_config_helper import disable_macsec_feature +from .macsec_config_helper import setup_macsec_configuration +from .macsec_config_helper import cleanup_macsec_configuration +# flake8: noqa: F401 +from tests.common.plugins.sanity_check import sanity_check + +logger = logging.getLogger(__name__) + + +class MacsecPlugin(object): + """ + Pytest macsec plugin + """ + + def __init__(self): + with open(os.path.dirname(__file__) + '/profile.json') as f: + self.macsec_profiles = json.load(f) + for k, v in list(self.macsec_profiles.items()): + self.macsec_profiles[k]["name"] = k + # Set default value + if "rekey_period" not in v: + self.macsec_profiles[k]["rekey_period"] = 0 + + def _generate_macsec_profile(self, metafunc): + value = metafunc.config.getoption("macsec_profile") + if value == 'all': + return natsort.natsorted(list(self.macsec_profiles.keys())) + return [x for x in value.split(',') if x in self.macsec_profiles] + + def pytest_generate_tests(self, metafunc): + if 'macsec_profile' in metafunc.fixturenames: + profiles = self._generate_macsec_profile(metafunc) + assert profiles, "Specify valid macsec profile!" + metafunc.parametrize('macsec_profile', + [self.macsec_profiles[x] for x in profiles], + ids=profiles, + scope="module") + + def get_ctrl_nbr_names(self, macsec_duthost, nbrhosts, tbinfo): + return NotImplementedError() + + def downstream_neighbor(self,tbinfo, neighbor): + return NotImplementedError() + + def upstream_neighbor(self,tbinfo, neighbor): + return NotImplementedError() + + @pytest.fixture(scope="module") + def start_macsec_service(self, macsec_duthost, macsec_nbrhosts): + def __start_macsec_service(): + enable_macsec_feature(macsec_duthost, macsec_nbrhosts) + return __start_macsec_service + + @pytest.fixture(scope="module") + def stop_macsec_service(self, macsec_duthost, macsec_nbrhosts): + def __stop_macsec_service(): + disable_macsec_feature(macsec_duthost, macsec_nbrhosts) + return __stop_macsec_service + + @pytest.fixture(scope="module") + def macsec_feature(self, start_macsec_service, stop_macsec_service): + start_macsec_service() + yield + stop_macsec_service() + + @pytest.fixture(scope="module") + def startup_macsec(self, request, macsec_duthost, ctrl_links, macsec_profile, tbinfo): + topo_name = tbinfo['topo']['name'] + def __startup_macsec(): + profile = macsec_profile + if request.config.getoption("neighbor_type") == "eos": + if macsec_duthost.facts["asic_type"] == "vs" and profile['send_sci'] == "false": + # On EOS, portchannel mac is not same as the member port mac (being as SCI), + # then src mac is not equal to SCI in its sending packet. The receiver of vSONIC + # will drop it for macsec kernel module does not correctly handle it. + pytest.skip( + "macsec on dut vsonic, neighbor eos, send_sci false") + if 't2' not in topo_name: + cleanup_macsec_configuration(macsec_duthost, ctrl_links, profile['name']) + setup_macsec_configuration(macsec_duthost, ctrl_links, + profile['name'], profile['priority'], profile['cipher_suite'], + profile['primary_cak'], profile['primary_ckn'], profile['policy'], + profile['send_sci'], profile['rekey_period']) + logger.info( + "Setup MACsec configuration with arguments:\n{}".format(locals())) + return __startup_macsec + + @pytest.fixture(scope="module") + def shutdown_macsec(self, macsec_duthost, ctrl_links, macsec_profile): + def __shutdown_macsec(): + profile = macsec_profile + cleanup_macsec_configuration(macsec_duthost, ctrl_links, profile['name']) + return __shutdown_macsec + + @pytest.fixture(scope="module", autouse=True) + def macsec_setup(self, startup_macsec, shutdown_macsec, macsec_feature): + ''' + setup macsec links + ''' + startup_macsec() + yield + shutdown_macsec() + + @pytest.fixture(scope="module") + def macsec_nbrhosts(self, ctrl_links): + return {nbr["name"]: nbr for nbr in list(ctrl_links.values())} + + @pytest.fixture(scope="module") + def ctrl_links(self, macsec_duthost, tbinfo, nbrhosts): + + if not nbrhosts: + topo_name = tbinfo['topo']['name'] + pytest.skip("None of neighbors on topology {}".format(topo_name)) + + ctrl_nbr_names = self.get_ctrl_nbr_names(macsec_duthost, nbrhosts, tbinfo) + logger.info("Controlled links {}".format(ctrl_nbr_names)) + nbrhosts = {name: nbrhosts[name] for name in ctrl_nbr_names} + return self.find_links_from_nbr(macsec_duthost, tbinfo, nbrhosts) + + @pytest.fixture(scope="module") + def unctrl_links(self, macsec_duthost, tbinfo, nbrhosts, ctrl_links): + unctrl_nbr_names = set(nbrhosts.keys()) + for _, nbr in ctrl_links.items(): + if nbr["name"] in unctrl_nbr_names: + unctrl_nbr_names.remove(nbr["name"]) + + logger.info("Uncontrolled links {}".format(unctrl_nbr_names)) + nbrhosts = {name: nbrhosts[name] for name in unctrl_nbr_names} + return self.find_links_from_nbr(macsec_duthost, tbinfo, nbrhosts) + + @pytest.fixture(scope="module") + def downstream_links(self, macsec_duthost, tbinfo, nbrhosts): + links = collections.defaultdict(dict) + + def filter(interface, neighbor, mg_facts, tbinfo): + if self.downstream_neighbor(tbinfo, neighbor): + port = mg_facts["minigraph_neighbors"][interface]["port"] + if interface not in mg_facts["minigraph_ptf_indices"]: + logger.info("Interface {} not in minigraph_ptf_indices".format(interface)) + return + links[interface] = { + "name": neighbor["name"], + "ptf_port_id": mg_facts["minigraph_ptf_indices"][interface], + "port": port + } + self.find_links(macsec_duthost, tbinfo, filter) + return links + + @pytest.fixture(scope="module") + def upstream_links(self, macsec_duthost, tbinfo, nbrhosts): + links = collections.defaultdict(dict) + + def filter(interface, neighbor, mg_facts, tbinfo): + if self.upstream_neighbor(tbinfo, neighbor): + for item in mg_facts["minigraph_bgp"]: + if item["name"] == neighbor["name"]: + if isinstance(ip_address(item["addr"]), IPv4Address): + # The address of neighbor device + local_ipv4_addr = item["addr"] + # The address of DUT + peer_ipv4_addr = item["peer_addr"] + break + if interface not in mg_facts["minigraph_ptf_indices"]: + logger.info("Interface {} not in minigraph_ptf_indices".format(interface)) + return + port = mg_facts["minigraph_neighbors"][interface]["port"] + links[interface] = { + "name": neighbor["name"], + "ptf_port_id": mg_facts["minigraph_ptf_indices"][interface], + "local_ipv4_addr": local_ipv4_addr, + "peer_ipv4_addr": peer_ipv4_addr, + "port": port, + "host": nbrhosts[neighbor["name"]]["host"] + } + self.find_links(macsec_duthost, tbinfo, filter) + return links + + def find_links(self, duthost, tbinfo, filter): + + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + for interface, neighbor in mg_facts["minigraph_neighbors"].items(): + filter(interface, neighbor, mg_facts, tbinfo) + + def is_interface_portchannel_member(self, pc, interface): + for pc_name, elements in list(pc.items()): + if interface in elements['members']: + return True + return False + + def find_links_from_nbr(self, duthost, tbinfo, nbrhosts): + links = collections.defaultdict(dict) + def filter(interface, neighbor, mg_facts, tbinfo): + if neighbor["name"] not in list(nbrhosts.keys()): + return + port = mg_facts["minigraph_neighbors"][interface]["port"] + + links[interface] = { + "name": neighbor["name"], + "host": nbrhosts[neighbor["name"]]["host"], + "port": port, + "dut_name": duthost.hostname + } + self.find_links(duthost, tbinfo, filter) + return links + +class MacsecPluginT0(MacsecPlugin): + """ + Pytest macsec plugin + """ + + + def __init__(self): + super(MacsecPluginT0, self).__init__() + + def get_ctrl_nbr_names(self, macsec_duthost, nbrhosts, tbinfo): + ctrl_nbr_names = natsort.natsorted(nbrhosts.keys())[:2] + return ctrl_nbr_names + + def downstream_neighbor(self,tbinfo, neighbor): + if (tbinfo["topo"]["type"] == "t0" and "Server" in neighbor["name"]): + return True + return False + + def upstream_neighbor(self,tbinfo, neighbor): + if (tbinfo["topo"]["type"] == "t0" and "T1" in neighbor["name"]): + return True + return False + +class MacsecPluginT2(MacsecPlugin): + """ + Pytest macsec plugin + """ + + + def __init__(self): + super(MacsecPluginT2, self).__init__() + + def get_ctrl_nbr_names(self, macsec_duthost, nbrhosts, tbinfo): + mg_facts = macsec_duthost.get_extended_minigraph_facts(tbinfo) + ctrl_nbr_names = mg_facts['macsec_neighbors'] + return ctrl_nbr_names + + def downstream_neighbor(self,tbinfo, neighbor): + if ("t2" in tbinfo["topo"]["type"] and "T1" in neighbor["name"]): + return True + return False + + def upstream_neighbor(self,tbinfo, neighbor): + if ("t2" in tbinfo["topo"]["type"] and "T3" in neighbor["name"]): + return True + return False diff --git a/tests/macsec/macsec_config_helper.py b/tests/common/macsec/macsec_config_helper.py similarity index 97% rename from tests/macsec/macsec_config_helper.py rename to tests/common/macsec/macsec_config_helper.py index 87e74afbb76..ffec635677a 100644 --- a/tests/macsec/macsec_config_helper.py +++ b/tests/common/macsec/macsec_config_helper.py @@ -1,8 +1,8 @@ import logging import time -from .macsec_helper import get_mka_session, getns_prefix, wait_all_complete, submit_async_task -from .macsec_platform_helper import global_cmd, find_portchannel_from_member, get_portchannel +from tests.common.macsec.macsec_helper import get_mka_session, getns_prefix, wait_all_complete, submit_async_task +from tests.common.macsec.macsec_platform_helper import global_cmd, find_portchannel_from_member, get_portchannel from tests.common.devices.eos import EosHost from tests.common.utilities import wait_until diff --git a/tests/macsec/macsec_helper.py b/tests/common/macsec/macsec_helper.py similarity index 95% rename from tests/macsec/macsec_helper.py rename to tests/common/macsec/macsec_helper.py index da13721f417..b00b2058eb6 100644 --- a/tests/macsec/macsec_helper.py +++ b/tests/common/macsec/macsec_helper.py @@ -15,7 +15,7 @@ import scapy.all as scapy import scapy.contrib.macsec as scapy_macsec -from .macsec_platform_helper import sonic_db_cli +from tests.common.macsec.macsec_platform_helper import sonic_db_cli from tests.common.devices.eos import EosHost __all__ = [ @@ -192,7 +192,8 @@ def get_mka_session(host): ''' Here is an output example of `ip macsec show` admin@vlab-01:~$ ip macsec show - 130: macsec_eth29: protect on validate strict sc off sa off encrypt on send_sci on end_station off scb off replay off + 130: macsec_eth29: protect on validate strict sc off sa off encrypt + on send_sci on end_station off scb off replay off cipher suite: GCM-AES-128, using ICV length 16 TXSC: 52540041303f0001 on SA 0 0: PN 1041, state on, SSCI 16777216, key 0ecddfe0f462491c13400dbf7433465d @@ -200,7 +201,8 @@ def get_mka_session(host): RXSC: 525400b5be690001, state on 0: PN 1041, state on, SSCI 16777216, key 0ecddfe0f462491c13400dbf7433465d 3: PN 0, state on, SSCI 16777216, key 0ecddfe0f462491c13400dbf7433465d - 131: macsec_eth30: protect on validate strict sc off sa off encrypt on send_sci on end_station off scb off replay off + 131: macsec_eth30: protect on validate strict sc off sa off encrypt + on send_sci on end_station off scb off replay off cipher suite: GCM-AES-128, using ICV length 16 TXSC: 52540041303f0001 on SA 0 0: PN 1041, state on, key daa8169cde2fe1e238aaa83672e40279 @@ -438,14 +440,16 @@ def macsec_dp_poll(test, device_number=0, port_number=None, timeout=None, exp_pk ret = __origin_dp_poll( test, device_number=device_number, port_number=port_number, timeout=timeout, exp_pkt=None) timeout -= time.time() - start_time - # Since we call __origin_dp_poll with exp_pkt=None, it should only ever fail if no packets are received at all. In this case, continue normally + # Since we call __origin_dp_poll with exp_pkt=None, it should only ever fail if no packets are received at all. + # In this case, continue normally # until we exceed the timeout value provided to macsec_dp_poll. if isinstance(ret, test.dataplane.PollFailure): if timeout <= 0: break else: continue - # The device number of PTF host is 0, if the target port isn't a injected port(belong to ptf host), Don't need to do MACsec further. + # The device number of PTF host is 0, if the target port isn't a injected port(belong to ptf host), + # Don't need to do MACsec further. if ret.device != 0 or exp_pkt is None: return ret pkt = scapy.Ether(ret.packet) @@ -454,17 +458,22 @@ def macsec_dp_poll(test, device_number=0, port_number=None, timeout=None, exp_pk if ptf.dataplane.match_exp_pkt(exp_pkt, pkt): return ret else: - macsec_info = load_macsec_info(test.duthost, find_portname_from_ptf_id(test.mg_facts, ret.port), force_reload[ret.port]) + macsec_info = load_macsec_info(test.duthost, find_portname_from_ptf_id(test.mg_facts, ret.port), + force_reload[ret.port]) if macsec_info: encrypt, send_sci, xpn_en, sci, an, sak, ssci, salt = macsec_info force_reload[ret.port] = False pkt, decap_success = decap_macsec_pkt(pkt, sci, an, sak, encrypt, send_sci, 0, xpn_en, ssci, salt) if decap_success and ptf.dataplane.match_exp_pkt(exp_pkt, pkt): return ret - # Normally, if __origin_dp_poll returns a PollFailure, the PollFailure object will contain a list of recently received packets - # to help with debugging. However, since we call __origin_dp_poll multiple times, only the packets from the most recent call is retained. - # If we don't find a matching packet (either with or without MACsec decoding), we need to manually store the packet we received. - # Later if we return a PollFailure, we can provide the received packets to emulate the behavior of __origin_dp_poll. + # Normally, if __origin_dp_poll returns a PollFailure, + # the PollFailure object will contain a list of recently received packets + # to help with debugging. However, since we call __origin_dp_poll multiple times, + # only the packets from the most recent call is retained. + # If we don't find a matching packet (either with or without MACsec decoding), + # we need to manually store the packet we received. + # Later if we return a PollFailure, + # we can provide the received packets to emulate the behavior of __origin_dp_poll. recent_packets.append(pkt) packet_count += 1 if timeout <= 0: diff --git a/tests/macsec/macsec_platform_helper.py b/tests/common/macsec/macsec_platform_helper.py similarity index 100% rename from tests/macsec/macsec_platform_helper.py rename to tests/common/macsec/macsec_platform_helper.py diff --git a/tests/macsec/profile.json b/tests/common/macsec/profile.json similarity index 100% rename from tests/macsec/profile.json rename to tests/common/macsec/profile.json diff --git a/tests/conftest.py b/tests/conftest.py index f0531bb5ba0..4885d240aaa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -69,7 +69,7 @@ from tests.common.plugins.ptfadapter.dummy_testutils import DummyTestUtils try: - from tests.macsec import MacsecPluginT2, MacsecPluginT0 + from tests.common.macsec import MacsecPluginT2, MacsecPluginT0 except ImportError as e: logging.error(e) diff --git a/tests/macsec/__init__.py b/tests/macsec/__init__.py index 234c61c8485..e69de29bb2d 100644 --- a/tests/macsec/__init__.py +++ b/tests/macsec/__init__.py @@ -1,266 +0,0 @@ -import collections -import json -import logging -import os -import sys -from ipaddress import ip_address, IPv4Address - -import natsort -import pytest - -if sys.version_info.major > 2: - from pathlib import Path - sys.path.insert(0, str(Path(__file__).parent)) - -from .macsec_config_helper import enable_macsec_feature -from .macsec_config_helper import disable_macsec_feature -from .macsec_config_helper import setup_macsec_configuration -from .macsec_config_helper import cleanup_macsec_configuration -# flake8: noqa: F401 -from tests.common.plugins.sanity_check import sanity_check - -logger = logging.getLogger(__name__) - - -class MacsecPlugin(object): - """ - Pytest macsec plugin - """ - - def __init__(self): - with open(os.path.dirname(__file__) + '/profile.json') as f: - self.macsec_profiles = json.load(f) - for k, v in list(self.macsec_profiles.items()): - self.macsec_profiles[k]["name"] = k - # Set default value - if "rekey_period" not in v: - self.macsec_profiles[k]["rekey_period"] = 0 - - def _generate_macsec_profile(self, metafunc): - value = metafunc.config.getoption("macsec_profile") - if value == 'all': - return natsort.natsorted(list(self.macsec_profiles.keys())) - return [x for x in value.split(',') if x in self.macsec_profiles] - - def pytest_generate_tests(self, metafunc): - if 'macsec_profile' in metafunc.fixturenames: - profiles = self._generate_macsec_profile(metafunc) - assert profiles, "Specify valid macsec profile!" - metafunc.parametrize('macsec_profile', - [self.macsec_profiles[x] for x in profiles], - ids=profiles, - scope="module") - - def get_ctrl_nbr_names(self, macsec_duthost, nbrhosts, tbinfo): - return NotImplementedError() - - def downstream_neighbor(self,tbinfo, neighbor): - return NotImplementedError() - - def upstream_neighbor(self,tbinfo, neighbor): - return NotImplementedError() - - @pytest.fixture(scope="module") - def start_macsec_service(self, macsec_duthost, macsec_nbrhosts): - def __start_macsec_service(): - enable_macsec_feature(macsec_duthost, macsec_nbrhosts) - return __start_macsec_service - - @pytest.fixture(scope="module") - def stop_macsec_service(self, macsec_duthost, macsec_nbrhosts): - def __stop_macsec_service(): - disable_macsec_feature(macsec_duthost, macsec_nbrhosts) - return __stop_macsec_service - - @pytest.fixture(scope="module") - def macsec_feature(self, start_macsec_service, stop_macsec_service): - start_macsec_service() - yield - stop_macsec_service() - - @pytest.fixture(scope="module") - def startup_macsec(self, request, macsec_duthost, ctrl_links, macsec_profile, tbinfo): - topo_name = tbinfo['topo']['name'] - def __startup_macsec(): - profile = macsec_profile - if request.config.getoption("neighbor_type") == "eos": - if macsec_duthost.facts["asic_type"] == "vs" and profile['send_sci'] == "false": - # On EOS, portchannel mac is not same as the member port mac (being as SCI), - # then src mac is not equal to SCI in its sending packet. The receiver of vSONIC - # will drop it for macsec kernel module does not correctly handle it. - pytest.skip( - "macsec on dut vsonic, neighbor eos, send_sci false") - if 't2' not in topo_name: - cleanup_macsec_configuration(macsec_duthost, ctrl_links, profile['name']) - setup_macsec_configuration(macsec_duthost, ctrl_links, - profile['name'], profile['priority'], profile['cipher_suite'], - profile['primary_cak'], profile['primary_ckn'], profile['policy'], - profile['send_sci'], profile['rekey_period']) - logger.info( - "Setup MACsec configuration with arguments:\n{}".format(locals())) - return __startup_macsec - - @pytest.fixture(scope="module") - def shutdown_macsec(self, macsec_duthost, ctrl_links, macsec_profile): - def __shutdown_macsec(): - profile = macsec_profile - cleanup_macsec_configuration(macsec_duthost, ctrl_links, profile['name']) - return __shutdown_macsec - - @pytest.fixture(scope="module", autouse=True) - def macsec_setup(self, startup_macsec, shutdown_macsec, macsec_feature): - ''' - setup macsec links - ''' - startup_macsec() - yield - shutdown_macsec() - - @pytest.fixture(scope="module") - def macsec_nbrhosts(self, ctrl_links): - return {nbr["name"]: nbr for nbr in list(ctrl_links.values())} - - @pytest.fixture(scope="module") - def ctrl_links(self, macsec_duthost, tbinfo, nbrhosts): - - if not nbrhosts: - topo_name = tbinfo['topo']['name'] - pytest.skip("None of neighbors on topology {}".format(topo_name)) - - ctrl_nbr_names = self.get_ctrl_nbr_names(macsec_duthost, nbrhosts, tbinfo) - logger.info("Controlled links {}".format(ctrl_nbr_names)) - nbrhosts = {name: nbrhosts[name] for name in ctrl_nbr_names} - return self.find_links_from_nbr(macsec_duthost, tbinfo, nbrhosts) - - @pytest.fixture(scope="module") - def unctrl_links(self, macsec_duthost, tbinfo, nbrhosts, ctrl_links): - unctrl_nbr_names = set(nbrhosts.keys()) - for _, nbr in ctrl_links.items(): - if nbr["name"] in unctrl_nbr_names: - unctrl_nbr_names.remove(nbr["name"]) - - logger.info("Uncontrolled links {}".format(unctrl_nbr_names)) - nbrhosts = {name: nbrhosts[name] for name in unctrl_nbr_names} - return self.find_links_from_nbr(macsec_duthost, tbinfo, nbrhosts) - - @pytest.fixture(scope="module") - def downstream_links(self, macsec_duthost, tbinfo, nbrhosts): - links = collections.defaultdict(dict) - - def filter(interface, neighbor, mg_facts, tbinfo): - if self.downstream_neighbor(tbinfo, neighbor): - port = mg_facts["minigraph_neighbors"][interface]["port"] - if interface not in mg_facts["minigraph_ptf_indices"]: - logger.info("Interface {} not in minigraph_ptf_indices".format(interface)) - return - links[interface] = { - "name": neighbor["name"], - "ptf_port_id": mg_facts["minigraph_ptf_indices"][interface], - "port": port - } - self.find_links(macsec_duthost, tbinfo, filter) - return links - - @pytest.fixture(scope="module") - def upstream_links(self, macsec_duthost, tbinfo, nbrhosts): - links = collections.defaultdict(dict) - - def filter(interface, neighbor, mg_facts, tbinfo): - if self.upstream_neighbor(tbinfo, neighbor): - for item in mg_facts["minigraph_bgp"]: - if item["name"] == neighbor["name"]: - if isinstance(ip_address(item["addr"]), IPv4Address): - # The address of neighbor device - local_ipv4_addr = item["addr"] - # The address of DUT - peer_ipv4_addr = item["peer_addr"] - break - if interface not in mg_facts["minigraph_ptf_indices"]: - logger.info("Interface {} not in minigraph_ptf_indices".format(interface)) - return - port = mg_facts["minigraph_neighbors"][interface]["port"] - links[interface] = { - "name": neighbor["name"], - "ptf_port_id": mg_facts["minigraph_ptf_indices"][interface], - "local_ipv4_addr": local_ipv4_addr, - "peer_ipv4_addr": peer_ipv4_addr, - "port": port, - "host": nbrhosts[neighbor["name"]]["host"] - } - self.find_links(macsec_duthost, tbinfo, filter) - return links - - def find_links(self, duthost, tbinfo, filter): - - mg_facts = duthost.get_extended_minigraph_facts(tbinfo) - for interface, neighbor in mg_facts["minigraph_neighbors"].items(): - filter(interface, neighbor, mg_facts, tbinfo) - - def is_interface_portchannel_member(self, pc, interface): - for pc_name, elements in list(pc.items()): - if interface in elements['members']: - return True - return False - - def find_links_from_nbr(self, duthost, tbinfo, nbrhosts): - links = collections.defaultdict(dict) - def filter(interface, neighbor, mg_facts, tbinfo): - if neighbor["name"] not in list(nbrhosts.keys()): - return - port = mg_facts["minigraph_neighbors"][interface]["port"] - - links[interface] = { - "name": neighbor["name"], - "host": nbrhosts[neighbor["name"]]["host"], - "port": port, - "dut_name": duthost.hostname - } - self.find_links(duthost, tbinfo, filter) - return links - -class MacsecPluginT0(MacsecPlugin): - """ - Pytest macsec plugin - """ - - - def __init__(self): - super(MacsecPluginT0, self).__init__() - - def get_ctrl_nbr_names(self, macsec_duthost, nbrhosts, tbinfo): - ctrl_nbr_names = natsort.natsorted(nbrhosts.keys())[:2] - return ctrl_nbr_names - - def downstream_neighbor(self,tbinfo, neighbor): - if (tbinfo["topo"]["type"] == "t0" and "Server" in neighbor["name"]): - return True - return False - - def upstream_neighbor(self,tbinfo, neighbor): - if (tbinfo["topo"]["type"] == "t0" and "T1" in neighbor["name"]): - return True - return False - -class MacsecPluginT2(MacsecPlugin): - """ - Pytest macsec plugin - """ - - - def __init__(self): - super(MacsecPluginT2, self).__init__() - - def get_ctrl_nbr_names(self, macsec_duthost, nbrhosts, tbinfo): - mg_facts = macsec_duthost.get_extended_minigraph_facts(tbinfo) - ctrl_nbr_names = mg_facts['macsec_neighbors'] - return ctrl_nbr_names - - def downstream_neighbor(self,tbinfo, neighbor): - if ("t2" in tbinfo["topo"]["type"] and "T1" in neighbor["name"]): - return True - return False - - def upstream_neighbor(self,tbinfo, neighbor): - if ("t2" in tbinfo["topo"]["type"] and "T3" in neighbor["name"]): - return True - return False diff --git a/tests/macsec/conftest.py b/tests/macsec/conftest.py index 352887c41d3..d7b1bc6b2a6 100644 --- a/tests/macsec/conftest.py +++ b/tests/macsec/conftest.py @@ -1,6 +1,6 @@ import pytest -from .macsec_helper import check_appl_db +from tests.common.macsec.macsec_helper import check_appl_db from tests.common.utilities import wait_until diff --git a/tests/macsec/test_controlplane.py b/tests/macsec/test_controlplane.py index 61ffb58e02b..ad140df323c 100644 --- a/tests/macsec/test_controlplane.py +++ b/tests/macsec/test_controlplane.py @@ -5,9 +5,9 @@ from tests.common.utilities import wait_until from tests.common.devices.eos import EosHost -from .macsec_helper import check_wpa_supplicant_process, check_appl_db, check_mka_session,\ +from tests.common.macsec.macsec_helper import check_wpa_supplicant_process, check_appl_db, check_mka_session,\ get_mka_session, get_sci, get_appl_db, get_ipnetns_prefix -from .macsec_platform_helper import get_platform, get_macsec_ifname +from tests.common.macsec.macsec_platform_helper import get_platform, get_macsec_ifname logger = logging.getLogger(__name__) diff --git a/tests/macsec/test_dataplane.py b/tests/macsec/test_dataplane.py index b70eac40ae9..a6d5bd6e2ff 100644 --- a/tests/macsec/test_dataplane.py +++ b/tests/macsec/test_dataplane.py @@ -7,9 +7,9 @@ from collections import Counter from tests.common.devices.eos import EosHost -from .macsec_helper import create_pkt, create_exp_pkt, check_macsec_pkt,\ +from tests.common.macsec.macsec_helper import create_pkt, create_exp_pkt, check_macsec_pkt,\ get_ipnetns_prefix, get_macsec_sa_name, get_macsec_counters -from .macsec_platform_helper import get_portchannel, find_portchannel_from_member +from tests.common.macsec.macsec_platform_helper import get_portchannel, find_portchannel_from_member logger = logging.getLogger(__name__) diff --git a/tests/macsec/test_deployment.py b/tests/macsec/test_deployment.py index 58b3278ff02..ce1dfb2c245 100644 --- a/tests/macsec/test_deployment.py +++ b/tests/macsec/test_deployment.py @@ -3,7 +3,7 @@ from tests.common.utilities import wait_until from tests.common import config_reload -from .macsec_helper import check_appl_db +from tests.common.macsec.macsec_helper import check_appl_db logger = logging.getLogger(__name__) pytestmark = [ diff --git a/tests/macsec/test_docker_restart.py b/tests/macsec/test_docker_restart.py index a4fa0bd6664..eda0c32278c 100644 --- a/tests/macsec/test_docker_restart.py +++ b/tests/macsec/test_docker_restart.py @@ -2,7 +2,7 @@ import logging from tests.common.utilities import wait_until -from .macsec_helper import check_appl_db +from tests.common.macsec.macsec_helper import check_appl_db logger = logging.getLogger(__name__) diff --git a/tests/macsec/test_fault_handling.py b/tests/macsec/test_fault_handling.py index 53c19007e9c..ffd2c23b0b4 100644 --- a/tests/macsec/test_fault_handling.py +++ b/tests/macsec/test_fault_handling.py @@ -4,9 +4,10 @@ from tests.common.utilities import wait_until from tests.common.devices.eos import EosHost -from .macsec_helper import get_appl_db -from .macsec_config_helper import disable_macsec_port, enable_macsec_port, delete_macsec_profile, set_macsec_profile -from .macsec_platform_helper import get_eth_ifname, find_portchannel_from_member, get_portchannel +from tests.common.macsec.macsec_helper import get_appl_db +from tests.common.macsec.macsec_config_helper import disable_macsec_port, \ + enable_macsec_port, delete_macsec_profile, set_macsec_profile +from tests.common.macsec.macsec_platform_helper import get_eth_ifname, find_portchannel_from_member, get_portchannel logger = logging.getLogger(__name__) diff --git a/tests/macsec/test_interop_protocol.py b/tests/macsec/test_interop_protocol.py index 78bfd23657d..5351cea9261 100644 --- a/tests/macsec/test_interop_protocol.py +++ b/tests/macsec/test_interop_protocol.py @@ -3,9 +3,10 @@ import ipaddress from tests.common.utilities import wait_until -from .macsec_helper import getns_prefix -from .macsec_config_helper import disable_macsec_port, enable_macsec_port -from .macsec_platform_helper import find_portchannel_from_member, get_portchannel, get_lldp_list, sonic_db_cli +from tests.common.macsec.macsec_helper import getns_prefix +from tests.common.macsec.macsec_config_helper import disable_macsec_port, enable_macsec_port +from tests.common.macsec.macsec_platform_helper import find_portchannel_from_member, \ + get_portchannel, get_lldp_list, sonic_db_cli from tests.common.helpers.snmp_helpers import get_snmp_output logger = logging.getLogger(__name__) diff --git a/tests/macsec/test_interop_wan_isis.py b/tests/macsec/test_interop_wan_isis.py index 6e6e80527bc..ab0530aa708 100644 --- a/tests/macsec/test_interop_wan_isis.py +++ b/tests/macsec/test_interop_wan_isis.py @@ -3,9 +3,9 @@ from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert -from .macsec_platform_helper import get_portchannel -from .macsec_platform_helper import find_portchannel_from_member -from .macsec_config_helper import enable_macsec_port, disable_macsec_port +from tests.common.macsec.macsec_platform_helper import get_portchannel +from tests.common.macsec.macsec_platform_helper import find_portchannel_from_member +from tests.common.macsec.macsec_config_helper import enable_macsec_port, disable_macsec_port logger = logging.getLogger(__name__) From 0f11ff3feaf2cf900bd68f32e4ed6b15a584fefe Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Wed, 20 Nov 2024 11:31:00 +0800 Subject: [PATCH 125/175] Add a new checker to check cross-feature dependency. (#15559) What is the motivation for this PR? We introduced a new approach to PR testing called Impacted area based PR testing. In this model, the scope of PR testing is determined by the specific areas of the code that are impacted by the changes, allowing for more focused and efficient testing. This means, we need to establish clear boundaries between different sections of code and minimize dependencies as much as possible. In the tests directory of the sonic-mgmt repository, we have categorized scripts into two main groups: shared scripts and feature-specific scripts. Shared scripts provide common utilities or functionality used across multiple features, while feature-specific scripts are tied to particular features and their corresponding logic. However, the previous codebase contained a significant number of cross-feature dependencies, where scripts from one feature directly referenced or relied on scripts from another. To address this issue and align with our new testing model, we manually reviewed the existing code and removed all cross-feature references. But we need a mechanism to check future modifications and new code to prevent reintroducing these issues. In this PR, we introduce a new checker to identify any cross-feature dependencies. At this stage, since some cross-feature dependencies remain in the code, this checker is configured to flag these dependencies without causing the entire test to fail. Once all current dependencies are fully removed, any reintroduced cross-feature dependencies detected by this checker will result in a test failure. How did you do it? In this PR, we introduce a new checker to identify any cross-feature dependencies. At this stage, since some cross-feature dependencies remain in the code, this checker is configured to flag these dependencies without causing the entire test to fail. Once all current dependencies are fully removed, any reintroduced cross-feature dependencies detected by this checker will result in a test failure. How did you verify/test it? --- .azure-pipelines/dependency-check.yml | 8 + .azure-pipelines/dependency_check/README.md | 108 +++++++++ .azure-pipelines/dependency_check/__init__.py | 0 .../dependency_check/dependency_check.py | 218 ++++++++++++++++++ azure-pipelines.yml | 9 + 5 files changed, 343 insertions(+) create mode 100644 .azure-pipelines/dependency-check.yml create mode 100644 .azure-pipelines/dependency_check/README.md create mode 100644 .azure-pipelines/dependency_check/__init__.py create mode 100644 .azure-pipelines/dependency_check/dependency_check.py diff --git a/.azure-pipelines/dependency-check.yml b/.azure-pipelines/dependency-check.yml new file mode 100644 index 00000000000..ea9161927c3 --- /dev/null +++ b/.azure-pipelines/dependency-check.yml @@ -0,0 +1,8 @@ +steps: +- script: | + set -x + + pip3 install natsort + + python3 ./.azure-pipelines/dependency_check/dependency_check.py tests + displayName: "Dependency Check" diff --git a/.azure-pipelines/dependency_check/README.md b/.azure-pipelines/dependency_check/README.md new file mode 100644 index 00000000000..a5f8731a08f --- /dev/null +++ b/.azure-pipelines/dependency_check/README.md @@ -0,0 +1,108 @@ +## Background +We introduced a new approach to PR testing called _Impacted area based PR testing_. \ +In this model, the scope of PR testing is determined by the specific areas of the code that are impacted by the changes, +allowing for more focused and efficient testing. +This means, we need to establish clear boundaries between different sections of code +and minimize dependencies as much as possible. + +We can consider the test scripts in this way: +``` +sonic-mgmgt + | + | - tests + | + | - common ---------- shared + | - arp -----| + | - ecmp | --- features + | - vlan | + | - ...... -----| +``` +Within the tests directory in sonic-mgmt, we categorize scripts into two sections: shared and features. +Scripts in the common folder fall under the shared section and can be utilized across different folders. +In contrast, scripts in other folders belong to the features section, representing specific functionalities such as arp, ecmp, and vlan, +and are intended for use within their respective folders. + +However, the previous code had numerous cross-feature dependencies. +To achieve the above goal, we have removed the cross-feature references from the existing code. +But we need a mechanism to check future modifications and new code to prevent reintroducing these issues. + + +## Design +The _ast_ module helps python applications to process trees of the python abstract syntax grammar. +This module produces a tree of objects, where each object is an instance of a class that inherits from _ast.AST_. +There are two classes related to the imports: + +#### ast.Import + - An import statement such as `import x as a,y` + - _names_ is a list of alias nodes. +``` + Import(names=[ + alias(name='x', + asname='a') + ]), + Import(names=[ + alias(name='y', + asname=None) + ]), +``` +#### ast.ImportFrom + - Represents `from x import y,z`. + - _module_ is a raw string of the ‘from’ name, without any leading dots, or None for statements such as `from . import foo.` + - _level_ is an integer holding the level of the relative import (0 means absolute import) +``` +ImportFrom( + module='x', + names=[ + alias(name='y', asname=None), + alias(name='z', asname=None)], + level=0) +``` + +To achieve our goal, we need to follow these steps. + + Gather all scripts to be analyzed + + Identify all imported modules in each script along with their import paths + + Compare each imported path with its corresponding script path + +### Gather all scripts to be analyzed +To collect all scripts for analysis, +we can use `os.walk` to gather every script within the specified path + +### Identify all imported modules in each script along with their import paths +To identify all imported modules, +we can use the _ast_ module, as mentioned above, to analyze each collected script and obtain its abstract syntax tree. +Then, using the _ast.ImportFrom_ and _ast.Import_ classes, we can extract the imported modules from each script. + + +Here are the steps and configuration methods for Python to search for module paths: ++ The current script's directory or the directory from which the Python interpreter is started. ++ Standard library path: Contains the standard library modules from the Python installation directory. ++ Third-party library path: For example, the site-packages directory, where third-party libraries installed via pip and other tools are stored. ++ Environment variable path: Custom directories can be added to sys.path via the PYTHONPATH environment variable. + +As paths of project is not included in the sys path, we need to add them into sys path first. + ++ `importlib.util.find_spec` is a function in Python that is used to find the specification of a module. + The specification contains details about the module, such as its location (file path), loader, and other attributes. + It can find the path of standard library, third-party libraries and custom modules which are imported with no hierarchy. + + For statement like `import math`, `from tests.common.plugins.allure_wrapper import allure_step_wrapper`, `from gnmi_utils import apply_gnmi_file`, + we can use `importlib.util.find_spec` to get their imported path. ++ For hierarchy imported, we can calculate the abs path using the current file path and level to navigate up to the corresponding directory. + +### Compare each imported path with its corresponding script path +We will focus only on imported paths that start with `sonic-mgmt/tests`. +Paths imported from other folders within `sonic-mgmt` are treated as common locations. + +For paths beginning with `sonic-mgmt/tests`, there are three special cases: ++ sonic-mgmt/tests/common ++ sonic-mgmt/tests/ptf_runner.py ++ sonic-mgmt/tests/conftest.py +which are also considered as common paths. + +For all other paths, we will compare each imported path to the path of the corresponding script based on the following principles: ++ The first-level folders under `sonic-mgmt/tests` (e.g., arp, bgp) are considered feature folders. ++ If both the imported module and the script are in the same feature folder, there is no cross-feature dependency. ++ If they are in different feature folders, it indicates a cross-feature dependency, causing the check to fail. + + +We will add this check as a step in `Pre_test` in PR test. diff --git a/.azure-pipelines/dependency_check/__init__.py b/.azure-pipelines/dependency_check/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/.azure-pipelines/dependency_check/dependency_check.py b/.azure-pipelines/dependency_check/dependency_check.py new file mode 100644 index 00000000000..17c24a2b35b --- /dev/null +++ b/.azure-pipelines/dependency_check/dependency_check.py @@ -0,0 +1,218 @@ +import ast +import sys +import os +import importlib.util +from natsort import natsorted +from contextlib import contextmanager + + +def collect_all_scripts(): + """ + Recursively find all files ending with ".py" under the folder "tests" + Note: The full path and name of files are stored in a list named "files" + + Returns: + A list of files ending with ".py" under the folder "tests" + """ + location = sys.argv[1] + files = [] + for root, dirs, file in os.walk(location): + for f in file: + if f.endswith(".py"): + files.append(os.path.join(root, f)) + files = natsorted(files) + return files + + +@contextmanager +def set_sys_path(file_path): + """ + Add all the paths related to the file into sys path + + Args: + file_path (list): A list of files ending with ".py" under the folder "tests" + Returns: + None + """ + original_sys_path = sys.path.copy() + try: + current_dir = os.path.abspath(os.path.dirname(file_path)) + while current_dir != os.path.dirname(current_dir): + if current_dir.endswith("/tests"): + sys.path.append(os.path.join(current_dir, "common")) + + sys.path.append(current_dir) + current_dir = os.path.dirname(current_dir) + yield + finally: + sys.path = original_sys_path + + +def get_module_path(imported_module, level=0, file_path=""): + """ + Get the abs path of the imported module + + Args: + imported_module (string): The imported module imported in the script. + level (int): The import level that generated by ast. + file_path (string): The path of a test script. + Returns: + string/None: The absolute path of the imported module or None + """ + try: + if level == 0: + # Level 0 means an absolute import. + # This means that the import statement is intended to refer directly + # to the module or package path as specified without any relative hierarchy. + # So we can get the module path using "importlib.util.find_spec" + spec = importlib.util.find_spec(imported_module) + if spec and spec.origin: + return spec.origin + if level == 1: + # Level 1 means the import is relative to the current package level, + # so the module path shares the same dirname with the file. + # To save time, we don't need to check such import module. + return None + else: + # For level which is higher than 1, + # the number represents how many levels up in the package hierarchy the import should go. + # Based on the current file path and the specified level, we can navigate up to the corresponding directory + # and then combine the module name with the upper-level path to form an absolute path + base_dir = os.path.abspath(file_path) + for _ in range(level): + base_dir = os.path.dirname(base_dir) + return os.path.join(base_dir, *imported_module.split(".")) + except ModuleNotFoundError: + return None + + +def get_imported_modules(files): + """ + Get all imported modules in each file. + + Args: + files (list): A list of files ending with ".py" under the folder "tests" + Returns: + dict: All imported modules in test scripts. The output formatted as below + { + '../tests/acl/custom_acl_table/test_custom_acl_table.py': [ + { + 'type': 'from_import', + 'module': 'ptf.mask', + 'module_path': '/usr/local/lib/python3.8/dist-packages/ptf/mask.py', + 'alias': 'Mask', + 'asname': None + }, + { + 'type': 'from_import', + 'module': 'tests.common.fixtures.ptfhost_utils', + 'module_path': '/data/sonic-mgmt/tests/common/fixtures/ptfhost_utils.py', + 'alias': 'skip_traffic_test', + 'asname': None + } + ], + '../tests/bgp/test_bgp_session_flap.py': [ + { + 'type': 'from_import', + 'module': 'tests.common.utilities', + 'module_path': '/data/sonic-mgmt/tests/common/utilities.py', + 'alias': 'InterruptableThread', + 'asname': None + } + ] + } + """ + imported_modules_in_files = {} + for file_path in files: + # For each file, we need to add its related path into sys path + with set_sys_path(file_path): + # We use ast to analyse the file as an abstract syntax tree, + # and get all imported modules using class `ast.Import` and `ast.ImportFrom` + with open(file_path, "r", encoding="utf-8") as file: + tree = ast.parse(file.read(), filename=file_path) + imported_modules_in_files[file_path] = [] + for node in ast.walk(tree): + # Check for `import` statements + if isinstance(node, ast.Import): + for entry in node.names: + imported_modules_in_files[file_path].append({ + "type": "import", + "module": entry.name, + "module_path": get_module_path(entry.name), + "asname": entry.asname + }) + # Check for `from ... import ...` statements + if isinstance(node, ast.ImportFrom): + for entry in node.names: + imported_modules_in_files[file_path].append({ + "type": "from_import", + "module": node.module, + "module_path": get_module_path(node.module, node.level, file_path), + "alias": entry.name, + "asname": entry.asname + }) + return imported_modules_in_files + + +def get_feature_path(path): + """ + For our repo, we can consider the folders like "acl", "bgp" as feature folders. + In this function, we will retrieve the path of the top-level feature directories. + In other words, we will retrieve the absolute paths of the first-level folders under `sonic-mgmt/tests` + + Args: + path (string): The path of a file or an import module + Returns: + string/None: The absolute feature path or None + """ + if path is None: + return None + + file_path = os.path.abspath(path) + target_path = "tests" + index = file_path.find(target_path) + + if index != -1: + project_path = file_path[:index + len(target_path)] + else: + return None + + feature = file_path[len(project_path) + 1:].split("/")[0] + return os.path.join(project_path, feature) + + +def check_cross_dependency(imports_in_script): + """ + Check if there are cross-feature dependency in each file. + + Args: + imports_in_script (dict): All imported modules in test scripts. + Returns: + bool: True is there are cross-feature dependencies and False is there is no cross-feature dependencies + """ + cross_dependency = False + for file_path, imported_modules in imports_in_script.items(): + file_feature_path = get_feature_path(file_path) + for imported_module in imported_modules: + imported_module_feature_path = get_feature_path(imported_module["module_path"]) + if imported_module_feature_path is not None: + project_path = os.path.dirname(file_feature_path) + # Import from these paths are allowed. + if imported_module_feature_path not in [os.path.join(project_path, "common"), + os.path.join(project_path, "ptf_runner.py"), + os.path.join(project_path, "conftest.py"), + file_feature_path]: + print("There is a cross-feature dependence. File: {}, import module: {}" + .format(file_path, imported_module["module"])) + cross_dependency = True + return cross_dependency + + +if __name__ == '__main__': + files = collect_all_scripts() + imported_modules_in_files = get_imported_modules(files) + cross_dependency = check_cross_dependency(imported_modules_in_files) + if cross_dependency: + print("\033[31mThere are cross-feature dependencies, which is not allowed in our repo\033[0m") + print("\033[31mTo resolve this issue, please move the shared function to common place, " + "such as 'tests/common'\033[0m") diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 1256f817404..76cacd39c1d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -43,6 +43,15 @@ stages: parameters: MGMT_BRANCH: "" + - job: dependency_check + displayName: "Dependency Check" + timeoutInMinutes: 10 + continueOnError: true + pool: sonic-common + steps: + - template: .azure-pipelines/dependency-check.yml + + - stage: Test dependsOn: Pre_test condition: and(succeeded(), in(dependencies.Pre_test.result, 'Succeeded')) From 28f0f7a60b75a95ad94bb02f5035fc957e90c2fd Mon Sep 17 00:00:00 2001 From: ShiyanWangMS Date: Wed, 20 Nov 2024 11:40:24 +0800 Subject: [PATCH 126/175] Ignore test_bgp_prefix.py::test_bgp_prefix_tc1_suite for Cisco 8122 backend compute ai deployment (#15622) * init commit * revise --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index b6c9edab036..51de4527a66 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -793,6 +793,12 @@ generic_config_updater: conditions: - "'t2' in topo_name" +generic_config_updater/test_bgp_prefix.py::test_bgp_prefix_tc1_suite[empty]: + skip: + reason: "Cisco 8122 backend compute ai platform is not supported." + conditions: + - "platform in ['x86_64-8122_64eh_o-r0', 'x86_64-8122_64ehf_o-r0']" + generic_config_updater/test_dhcp_relay.py: skip: reason: "Need to skip for platform x86_64-8111_32eh_o-r0 or backend topology / generic_config_updater is not a supported feature for T2" From 142d8ec68b5278fb7b6407ee1a7a185d3aa30aca Mon Sep 17 00:00:00 2001 From: Chun'ang Li <39114813+lerry-lee@users.noreply.github.com> Date: Wed, 20 Nov 2024 15:03:17 +0800 Subject: [PATCH 127/175] Revert "Update j2cli to jinjanator. (#15600)" (#15636) This reverts commit 174c6bf76535d1022f7a652daa251d605a5b3d21. --- ansible/setup-management-network.sh | 8 ++++---- docs/testbed/README.testbed.Setup.md | 2 +- docs/testbed/README.testbed.VsSetup.md | 2 +- setup-container.sh | 12 +++--------- 4 files changed, 9 insertions(+), 15 deletions(-) diff --git a/ansible/setup-management-network.sh b/ansible/setup-management-network.sh index fd2eae5892b..3347d216b6d 100755 --- a/ansible/setup-management-network.sh +++ b/ansible/setup-management-network.sh @@ -33,10 +33,10 @@ echo "Refreshing apt package lists..." apt-get update echo -echo "STEP 1: Checking for jinjanator package..." -if ! command -v jinjanate; then - echo "jinjanator not found, installing jinjanator" - cmd="install --user jinjanator==24.4.0" +echo "STEP 1: Checking for j2cli package..." +if ! command -v j2; then + echo "j2cli not found, installing j2cli" + cmd="install --user j2cli==0.3.10" if ! command -v pip &> /dev/null; then pip3 $cmd else diff --git a/docs/testbed/README.testbed.Setup.md b/docs/testbed/README.testbed.Setup.md index c6dcf6431fb..f26f162befa 100644 --- a/docs/testbed/README.testbed.Setup.md +++ b/docs/testbed/README.testbed.Setup.md @@ -20,7 +20,7 @@ This document describes the steps to setup the testbed and deploy a topology. ``` - Install Python prerequisites ``` - sudo pip3 install jinjanator + sudo pip3 install j2cli ``` - Install Docker (all credits to https://docs.docker.com/engine/install/ubuntu/ ) ``` diff --git a/docs/testbed/README.testbed.VsSetup.md b/docs/testbed/README.testbed.VsSetup.md index daa38c6fbca..f6eea3fab0e 100644 --- a/docs/testbed/README.testbed.VsSetup.md +++ b/docs/testbed/README.testbed.VsSetup.md @@ -22,7 +22,7 @@ First, we need to prepare the host where we will be configuring the virtual test ``` sudo apt install python python-pip openssh-server # v0.3.10 Jinja2 is required, lower version may cause uncompatible issue - sudo pip install jinjanate==24.4.0 + sudo pip install j2cli==0.3.10 ``` 3. Run the host setup script to install required packages and initialize the management bridge network diff --git a/setup-container.sh b/setup-container.sh index 5318aa806e9..90bae4ef4f8 100755 --- a/setup-container.sh +++ b/setup-container.sh @@ -275,7 +275,7 @@ ROOT_PASS=${ROOT_PASS} EOF log_info "generate a Dockerfile: ${TMP_DIR}/Dockerfile" - jinjanate -o "${TMP_DIR}/Dockerfile" "${TMP_DIR}/Dockerfile.j2" "${TMP_DIR}/data.env" || \ + j2 -o "${TMP_DIR}/Dockerfile" "${TMP_DIR}/Dockerfile.j2" "${TMP_DIR}/data.env" || \ log_error "failed to generate a Dockerfile: ${TMP_DIR}/Dockerfile" log_info "building docker image from ${TMP_DIR}: ${LOCAL_IMAGE} ..." @@ -445,14 +445,8 @@ if docker ps -a --format "{{.Names}}" | grep -q "^${CONTAINER_NAME}$"; then fi fi -if ! which jinjanate &> /dev/null; then - echo "jinjanator not found, installing jinjanator" - cmd="install --user jinjanator==24.4.0" - if ! command -v pip &> /dev/null; then - pip3 $cmd - else - pip $cmd - fi +if ! which j2 &> /dev/null; then + exit_failure "missing Jinja2 templates support: make sure j2cli package is installed" fi pull_sonic_mgmt_docker_image From 71793e7f1282889f0d2b3a38b8084f9ed1e4d4cc Mon Sep 17 00:00:00 2001 From: vkjammala-arista <152394203+vkjammala-arista@users.noreply.github.com> Date: Wed, 20 Nov 2024 15:58:13 +0530 Subject: [PATCH 128/175] [sonic-mgmt] Correct conditional_mark for testcase "test_standby_tor_downstream_loopback_route_readded" (#15534) What is the motivation for this PR? dualtor/test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_loopback_route_readded was being skipped earlier with reason This testcase is designed for single tor testbed with mock dualtor config. This has started running recently after infra changes done for conditional_mark through #14395. How did you do it? Updated conditional_mark to skip this test for dualtor topologies (to be in consistent with earlier behaviour as the test was getting skipped earlier). How did you verify/test it? With the above change verified that test is getting skipped on dualtor topologies. --- .../plugins/conditional_mark/tests_mark_conditions.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 51de4527a66..cd5255da248 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -477,9 +477,9 @@ dualtor/test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_bg dualtor/test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_loopback_route_readded: skip: - reason: "This testcase is designed for single tor testbed with mock dualtor config and dualtor." + reason: "This testcase is designed for single tor testbed with mock dualtor config." conditions: - - "(topo_type not in ['t0'])" + - "(topo_type not in ['t0']) or ('dualtor' in topo_name)" dualtor/test_orchagent_standby_tor_downstream.py::test_standby_tor_downstream_t1_link_recovered: skip: From 7136df6b9502dac594c2cc7ad9cb049c480fa1cb Mon Sep 17 00:00:00 2001 From: Xichen96 Date: Wed, 20 Nov 2024 18:30:34 +0800 Subject: [PATCH 129/175] [arp] add conntrack table test for incomplete neighbor (#14747) What is the motivation for this PR? Need test to test conntrack table size when there is neighbor in incomplete state. How did you do it? Create neighbor in incomplete state, ping to create icmpv6 packets, and check conntrack table size. How did you verify/test it? Run test --- tests/arp/test_stress_arp.py | 120 ++++++++++++++++++++++++++++++----- 1 file changed, 103 insertions(+), 17 deletions(-) diff --git a/tests/arp/test_stress_arp.py b/tests/arp/test_stress_arp.py index c6dcd250261..dcd1afb4e07 100644 --- a/tests/arp/test_stress_arp.py +++ b/tests/arp/test_stress_arp.py @@ -1,6 +1,7 @@ import logging import time import pytest +import random from .arp_utils import MacToInt, IntToMac, get_crm_resources, fdb_cleanup, \ clear_dut_arp_cache, get_fdb_dynamic_mac_count import ptf.testutils as testutils @@ -11,9 +12,12 @@ from tests.common.utilities import wait_until, increment_ipv6_addr from tests.common.errors import RunAnsibleModuleFail + ARP_BASE_IP = "172.16.0.1/16" ARP_SRC_MAC = "00:00:01:02:03:04" ENTRIES_NUMBERS = 12000 +TEST_CONNTRACK_TIMEOUT = 300 +TEST_INCOMPLETE_NEIGHBOR_CNT = 10 logger = logging.getLogger(__name__) @@ -95,15 +99,15 @@ def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, if normalized_level is None: normalized_level = "debug" asic_type = duthost.facts['asic_type'] - ipv4_avaliable = get_crm_resources(duthost, "ipv4_neighbor", "available") - fdb_avaliable = get_crm_resources(duthost, "fdb_entry", "available") - pytest_assert(ipv4_avaliable > 0 and fdb_avaliable > 0, "Entries have been filled") + ipv4_available = get_crm_resources(duthost, "ipv4_neighbor", "available") + fdb_available = get_crm_resources(duthost, "fdb_entry", "available") + pytest_assert(ipv4_available > 0 and fdb_available > 0, "Entries have been filled") - arp_avaliable = min(min(ipv4_avaliable, fdb_avaliable), ENTRIES_NUMBERS) + arp_available = min(min(ipv4_available, fdb_available), ENTRIES_NUMBERS) pytest_require(garp_enabled, 'Gratuitous ARP not enabled for this device') ptf_intf_ipv4_hosts = genrate_ipv4_ip() - ptf_intf_ipv4_hosts = ptf_intf_ipv4_hosts[1:arp_avaliable + 1] + ptf_intf_ipv4_hosts = ptf_intf_ipv4_hosts[1:arp_available + 1] _, _, intf1_index, _, = intfs_for_test loop_times = LOOP_TIMES_LEVEL_MAP[normalized_level] @@ -116,9 +120,9 @@ def test_ipv4_arp(duthost, garp_enabled, ip_and_intf_info, intfs_for_test, # There is a certain probability of hash collision, we set the percentage as 1% here # The entries we add will not exceed 10000, so the number we tolerate is 100 logger.debug("Expected route number: {}, real route number {}" - .format(arp_avaliable, get_fdb_dynamic_mac_count(duthost))) + .format(arp_available, get_fdb_dynamic_mac_count(duthost))) pytest_assert(wait_until(20, 1, 0, - lambda: abs(arp_avaliable - get_fdb_dynamic_mac_count(duthost)) < 250), + lambda: abs(arp_available - get_fdb_dynamic_mac_count(duthost)) < 250), "ARP Table Add failed") finally: try: @@ -147,6 +151,7 @@ def generate_global_addr(mac): return ipv6 +# generate neighbor solicitation packet for test def ipv6_packets_for_test(ip_and_intf_info, fake_src_mac, fake_src_addr): _, _, src_addr_v6, _, _ = ip_and_intf_info fake_src_mac = fake_src_mac @@ -163,14 +168,14 @@ def ipv6_packets_for_test(ip_and_intf_info, fake_src_mac, fake_src_addr): return ns_pkt -def add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable): - for entry in range(0, nd_avaliable): +def add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_available): + for entry in range(0, nd_available): nd_entry_mac = IntToMac(MacToInt(ARP_SRC_MAC) + entry) fake_src_addr = generate_global_addr(nd_entry_mac) ns_pkt = ipv6_packets_for_test(ip_and_intf_info, nd_entry_mac, fake_src_addr) testutils.send_packet(ptfadapter, ptf_intf_index, ns_pkt) - logger.info("Sending {} ipv6 neighbor entries".format(nd_avaliable)) + logger.info("Sending {} ipv6 neighbor entries".format(nd_available)) def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, @@ -185,23 +190,23 @@ def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, normalized_level = "debug" asic_type = duthost.facts['asic_type'] loop_times = LOOP_TIMES_LEVEL_MAP[normalized_level] - ipv6_avaliable = get_crm_resources(duthost, "ipv6_neighbor", "available") - fdb_avaliable = get_crm_resources(duthost, "fdb_entry", "available") - pytest_assert(ipv6_avaliable > 0 and fdb_avaliable > 0, "Entries have been filled") + ipv6_available = get_crm_resources(duthost, "ipv6_neighbor", "available") + fdb_available = get_crm_resources(duthost, "fdb_entry", "available") + pytest_assert(ipv6_available > 0 and fdb_available > 0, "Entries have been filled") - nd_avaliable = min(min(ipv6_avaliable, fdb_avaliable), ENTRIES_NUMBERS) + nd_available = min(min(ipv6_available, fdb_available), ENTRIES_NUMBERS) while loop_times > 0: loop_times -= 1 try: - add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable) + add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_available) if asic_type != 'vs': # There is a certain probability of hash collision, we set the percentage as 1% here # The entries we add will not exceed 10000, so the number we tolerate is 100 logger.debug("Expected route number: {}, real route number {}" - .format(nd_avaliable, get_fdb_dynamic_mac_count(duthost))) + .format(nd_available, get_fdb_dynamic_mac_count(duthost))) pytest_assert(wait_until(20, 1, 0, - lambda: abs(nd_avaliable - get_fdb_dynamic_mac_count(duthost)) < 250), + lambda: abs(nd_available - get_fdb_dynamic_mac_count(duthost)) < 250), "Neighbor Table Add failed") finally: try: @@ -214,3 +219,84 @@ def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, raise e # Wait for 10 seconds before starting next loop time.sleep(10) + + +def send_ipv6_echo_request(ptfadapter, dut_mac, ip_and_intf_info, ptf_intf_index, nd_available, tgt_cnt): + for i in range(tgt_cnt): + entry = random.randrange(0, nd_available) + nd_entry_mac = IntToMac(MacToInt(ARP_SRC_MAC) + entry) + fake_src_addr = generate_global_addr(nd_entry_mac) + _, _, src_addr_v6, _, _ = ip_and_intf_info + tgt_addr = increment_ipv6_addr(src_addr_v6) + er_pkt = testutils.simple_icmpv6_packet(eth_dst=dut_mac, + eth_src=nd_entry_mac, + ipv6_src=fake_src_addr, + ipv6_dst=tgt_addr, + icmp_type=128, + ) + identifier = random.randint(10000, 50000) + er_pkt.load = identifier.to_bytes(2, "big") + b"D" * 40 + testutils.send_packet(ptfadapter, ptf_intf_index, er_pkt) + + +def test_ipv6_nd_incomplete(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, + ptfadapter, get_function_completeness_level, proxy_arp_enabled): + _, _, ptf_intf_ipv6_addr, _, ptf_intf_index = ip_and_intf_info + ptf_intf_ipv6_addr = increment_ipv6_addr(ptf_intf_ipv6_addr) + pytest_require(proxy_arp_enabled, 'Proxy ARP not enabled for all VLANs') + pytest_require(ptf_intf_ipv6_addr is not None, 'No IPv6 VLAN address configured on device') + + ipv6_available = get_crm_resources(duthost, "ipv6_neighbor", "available") + fdb_available = get_crm_resources(duthost, "fdb_entry", "available") + pytest_assert(ipv6_available > 0 and fdb_available > 0, "Entries have been filled") + + nd_available = min(min(ipv6_available, fdb_available), ENTRIES_NUMBERS) + tgt_incomplete_neighbor_cnt = min(nd_available, TEST_INCOMPLETE_NEIGHBOR_CNT) + + max_conntrack = int(duthost.command("cat /proc/sys/net/netfilter/nf_conntrack_max")["stdout"]) + logger.info("nf_conntrack_max: {}".format(max_conntrack)) + # we test a small portion of max_conntrack to see the increase + tgt_conntrack_cnt = int(max_conntrack * 0.1) + + conntrack_cnt_pre = int(duthost.command("cat /proc/sys/net/netfilter/nf_conntrack_count")["stdout"]) + logger.info("nf_conntrack_count pre test: {}".format(conntrack_cnt_pre)) + + pytest_assert("[UNREPLIED]" not in duthost.command("sudo conntrack -f ipv6 -L dying")["stdout"], + "unreplied icmpv6 requests ended up in the dying list before test is run") + + orig_conntrack_icmpv6_timeout = int(duthost.command("cat /proc/sys/net/netfilter/" + "nf_conntrack_icmpv6_timeout")["stdout"]) + logger.info("original nf_conntrack_icmpv6_timeout: {}".format(orig_conntrack_icmpv6_timeout)) + + try: + clear_dut_arp_cache(duthost) + + duthost.command("conntrack -F") + + duthost.shell("echo {} > /proc/sys/net/netfilter/nf_conntrack_icmpv6_timeout" + .format(TEST_CONNTRACK_TIMEOUT)) + logger.info("setting nf_conntrack_icmpv6_timeout to {}".format(TEST_CONNTRACK_TIMEOUT)) + + send_ipv6_echo_request(ptfadapter, duthost.facts["router_mac"], ip_and_intf_info, + ptf_intf_index, tgt_incomplete_neighbor_cnt, tgt_conntrack_cnt) + + conntrack_cnt_post = int(duthost.command("cat /proc/sys/net/netfilter/nf_conntrack_count")["stdout"]) + logger.info("nf_conntrack_count post test: {}".format(conntrack_cnt_post)) + + pytest_assert((conntrack_cnt_post - conntrack_cnt_pre) < tgt_conntrack_cnt * 0.1, + "{} echo requests cause large increase in conntrack entries".format(tgt_conntrack_cnt)) + + pytest_assert("[UNREPLIED]" not in duthost.command("conntrack -f ipv6 -L dying")["stdout"], + "unreplied icmpv6 requests ended up in the dying list") + + logger.info("neighbors in INCOMPLETE state: {}" + .format(duthost.command("ip -6 neigh")["stdout"].count("INCOMPLETE"))) + + finally: + duthost.shell("echo {} > /proc/sys/net/netfilter/nf_conntrack_icmpv6_timeout" + .format(orig_conntrack_icmpv6_timeout)) + logger.info("setting nf_conntrack_icmpv6_timeout back to {}".format(orig_conntrack_icmpv6_timeout)) + + duthost.command("conntrack -F") + + clear_dut_arp_cache(duthost) From f803ac22ab22be8084ed64d4ad76bd8618d08c45 Mon Sep 17 00:00:00 2001 From: Vivek Verma <137406113+vivekverma-arista@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:14:31 +0530 Subject: [PATCH 130/175] Fix routes/test_route_perf.py (#15620) Description of PR Summary: Fixes #323 Approach What is the motivation for this PR? Regression due to #15452 How did you do it? Added missing quotes to the command. How did you verify/test it? Ran route/test_route_perf.py on Arista 7260CX3 platform with dualtor topology. co-authorized by: jianquanye@microsoft.com --- tests/route/test_route_perf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/route/test_route_perf.py b/tests/route/test_route_perf.py index 3488792e9d8..d54f46d95ca 100644 --- a/tests/route/test_route_perf.py +++ b/tests/route/test_route_perf.py @@ -65,7 +65,7 @@ def check_config(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_rand_ if (asic == "broadcom"): broadcom_cmd = "bcmcmd -n " + str(asic_id) if duthost.is_multi_asic else "bcmcmd" - alpm_cmd = "{} {}".format(broadcom_cmd, "conf show l3_alpm_enable") + alpm_cmd = "{} {}".format(broadcom_cmd, '"conf show l3_alpm_enable"') alpm_enable = duthost.command(alpm_cmd)["stdout_lines"][2].strip() logger.info("Checking config: {}".format(alpm_enable)) pytest_assert(alpm_enable == "l3_alpm_enable=2", "l3_alpm_enable is not set for route scaling") From f994b052db6bae9b484797aa403fde8b230775c2 Mon Sep 17 00:00:00 2001 From: rraghav-cisco <58446052+rraghav-cisco@users.noreply.github.com> Date: Wed, 20 Nov 2024 02:48:50 -0800 Subject: [PATCH 131/175] Fixing service-restart testcases. (#15560) Description of PR Summary: The pfcwd_basic service-restart cases keep failing due to: sonic-net/sonic-buildimage#20637 The ask is not to restart swss multiple times without doing a config reload in between. So in this PR: we are doing config-reload for every iteration of the test The swss restart is done only once in one DUT. The asic is randomly picked, and the swss of that ASIC is restarted instead of doing the restart for all asics. Also added checks to make sure the services, interfaces and bgp are up before proceding with the ixia traffic. Approach What is the motivation for this PR? The issue: sonic-net/sonic-buildimage#20637 How did you do it? Pls see the description. How did you verify/test it? Ran it on my TB. =========================================================================================================================== PASSES =========================================================================================================================== ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-True-swss] _____________________________________________________________________________________ ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-False-swss] ____________________________________________________________________________________ ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-True-swss] _____________________________________________________________________________________ ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-False-swss] ____________________________________________________________________________________ ----------------------------------------------------------------------------- generated xml file: /run_logs/ixia/restart-service/2024-11-14-00-05-11/tr_2024-11-14-00-05-11.xml ------------------------------------------------------------------------------ INFO:root:Can not get Allure report URL. Please check logs ------------------------------------------------------------------------------------------------------------------- live log sessionfinish ------------------------------------------------------------------------------------------------------------------- 01:31:34 __init__.pytest_terminal_summary L0067 INFO | Can not get Allure report URL. Please check logs ================================================================================================================== short test summary info =================================================================================================================== PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-True-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-False-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-True-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-False-swss] ========================================================================================================= 4 passed, 7 warnings in 5180.68s (1:26:20) ========================================================================================================= sonic@ixia-sonic-mgmt-whitebox:/data/tests$ -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html =========================================================================================================================== PASSES =========================================================================================================================== ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-True-swss] _____________________________________________________________________________________ ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-False-swss] ____________________________________________________________________________________ ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-True-swss] _____________________________________________________________________________________ ____________________________________________________________________________________ test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-False-swss] ____________________________________________________________________________________ ---------------------------------------------------------------------------- generated xml file: /run_logs/ixia/restart-service-2/2024-11-14-02-47-47/tr_2024-11-14-02-47-47.xml ----------------------------------------------------------------------------- INFO:root:Can not get Allure report URL. Please check logs ------------------------------------------------------------------------------------------------------------------- live log sessionfinish ------------------------------------------------------------------------------------------------------------------- 04:14:03 __init__.pytest_terminal_summary L0067 INFO | Can not get Allure report URL. Please check logs ================================================================================================================== short test summary info =================================================================================================================== PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-True-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info0-False-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-True-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_multi_lossless_prio_restart_service[multidut_port_info1-False-swss] ========================================================================================================= 4 passed, 7 warnings in 5173.22s (1:26:13) ========================================================================================================= sonic@ixia-sonic-mgmt-whitebox:/data/tests$ =========================================================================================================================== PASSES =========================================================================================================================== ____________________________________________________________________________________ test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info0-True-swss] ____________________________________________________________________________________ ___________________________________________________________________________________ test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info0-False-swss] ____________________________________________________________________________________ ____________________________________________________________________________________ test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info1-True-swss] ____________________________________________________________________________________ ___________________________________________________________________________________ test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info1-False-swss] ____________________________________________________________________________________ ---------------------------------------------------------------------------- generated xml file: /run_logs/ixia/restart-service-2/2024-11-14-06-39-15/tr_2024-11-14-06-39-15.xml ----------------------------------------------------------------------------- INFO:root:Can not get Allure report URL. Please check logs ------------------------------------------------------------------------------------------------------------------- live log sessionfinish ------------------------------------------------------------------------------------------------------------------- 08:10:42 __init__.pytest_terminal_summary L0067 INFO | Can not get Allure report URL. Please check logs ================================================================================================================== short test summary info =================================================================================================================== PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info0-True-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info0-False-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info1-True-swss] PASSED snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py::test_pfcwd_basic_single_lossless_prio_service_restart[multidut_port_info1-False-swss] ========================================================================================================= 4 passed, 7 warnings in 5484.86s (1:31:24) ========================================================================================================= sonic@ixia-sonic-mgmt-whitebox:/data/tests$ co-authorized by: jianquanye@microsoft.com --- .../test_multidut_pfcwd_basic_with_snappi.py | 63 +++++++++++++++---- 1 file changed, 52 insertions(+), 11 deletions(-) diff --git a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py index daf00e18751..9c09f674b45 100644 --- a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py +++ b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py @@ -1,6 +1,7 @@ import pytest import random import logging +import time import re from collections import defaultdict from tests.common.helpers.assertions import pytest_require, pytest_assert # noqa: F401 @@ -13,6 +14,8 @@ from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, lossless_prio_list # noqa F401 from tests.common.reboot import reboot # noqa: F401 from tests.common.utilities import wait_until # noqa: F401 +from tests.common.config_reload import config_reload +from tests.common.platform.interface_utils import check_interface_status_of_up_ports from tests.snappi_tests.multidut.pfcwd.files.pfcwd_multidut_basic_helper import run_pfcwd_basic_test from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.snappi_tests.files.helper import skip_pfcwd_test, reboot_duts, \ @@ -29,6 +32,26 @@ def number_of_tx_rx_ports(): yield (1, 1) +@pytest.fixture(autouse=False) +def save_restore_config(setup_ports_and_dut): + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut + timestamp = time.time() + dest = f'~/{timestamp}' + + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): + duthost.shell(f"sudo mkdir {dest}; sudo cp /etc/sonic/config*.json {dest}") + duthost.shell("sudo config save -y") + + yield + + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): + duthost.shell(f"sudo cp {dest}/config_db*json /etc/sonic/") + duthost.shell("sudo config save -y") + + for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): + config_reload(duthost) + + @pytest.mark.parametrize("trigger_pfcwd", [True, False]) def test_pfcwd_basic_single_lossless_prio(snappi_api, # noqa: F811 conn_graph_facts, # noqa: F811 @@ -221,7 +244,8 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, prio_dscp_map, # noqa: F811 restart_service, trigger_pfcwd, - setup_ports_and_dut): # noqa: F811 + setup_ports_and_dut, # noqa: F811 + save_restore_config): """ Verify PFC watchdog basic test works on a single lossless priority after various service restarts @@ -251,6 +275,7 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, logger.info('Port dictionary:{}'.format(ports_dict)) for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): + up_bgp_neighbors = duthost.get_bgp_neighbors_per_asic("established") # Record current state of critical services. duthost.critical_services_fully_started() @@ -264,6 +289,11 @@ def test_pfcwd_basic_single_lossless_prio_service_restart(snappi_api, logger.info("Wait until the system is stable") pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, check_interface_status_of_up_ports, duthost), + "Not all interfaces are up.") + pytest_assert(wait_until( + WAIT_TIME, INTERVAL, 0, duthost.check_bgp_session_state_all_asics, up_bgp_neighbors, "established")) + else: for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) @@ -300,7 +330,8 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, prio_dscp_map, # noqa F811 restart_service, setup_ports_and_dut, # noqa: F811 - trigger_pfcwd): + trigger_pfcwd, + save_restore_config): """ Verify PFC watchdog basic test works on multiple lossless priorities after various service restarts @@ -330,16 +361,26 @@ def test_pfcwd_basic_multi_lossless_prio_restart_service(snappi_api, logger.info('Port dictionary:{}'.format(ports_dict)) for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): + up_bgp_neighbors = duthost.get_bgp_neighbors_per_asic("established") + # Record current state of critical services. + duthost.critical_services_fully_started() + asic_list = ports_dict[duthost.hostname] - for asic in asic_list: - asic_id = re.match(r"(asic)(\d+)", asic).group(2) - proc = 'swss@' + asic_id - logger.info("Issuing a restart of service {} on the dut {}".format(proc, duthost.hostname)) - duthost.command("sudo systemctl reset-failed {}".format(proc)) - duthost.command("sudo systemctl restart {}".format(proc)) - logger.info("Wait until the system is stable") - pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), - "Not all critical services are fully started") + asic = random.sample(asic_list, 1)[0] + asic_id = re.match(r"(asic)(\d+)", asic).group(2) + proc = 'swss@' + asic_id + + logger.info("Issuing a restart of service {} on the dut {}".format(proc, duthost.hostname)) + duthost.command("sudo systemctl reset-failed {}".format(proc)) + duthost.command("sudo systemctl restart {}".format(proc)) + logger.info("Wait until the system is stable") + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, duthost.critical_services_fully_started), + "Not all critical services are fully started") + pytest_assert(wait_until(WAIT_TIME, INTERVAL, 0, check_interface_status_of_up_ports, duthost), + "Not all interfaces are up.") + pytest_assert(wait_until( + WAIT_TIME, INTERVAL, 0, duthost.check_bgp_session_state_all_asics, up_bgp_neighbors, "established")) + else: for duthost in list(set([snappi_ports[0]['duthost'], snappi_ports[1]['duthost']])): logger.info("Issuing a restart of service {} on the dut {}".format(restart_service, duthost.hostname)) From ea31b61aa4f686df8db4c8a770b2627f18b9fb88 Mon Sep 17 00:00:00 2001 From: Abdel Baig <137210298+abdbaig@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:36:52 -0500 Subject: [PATCH 132/175] Handle p bit properly in bfd_responder (#15167) * handle p bit properly in bfd_responder * add missing flags and fix comment * add extra blank line --- ansible/roles/test/files/helpers/bfd_responder.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/ansible/roles/test/files/helpers/bfd_responder.py b/ansible/roles/test/files/helpers/bfd_responder.py index fce30d0b0dc..774393dce42 100644 --- a/ansible/roles/test/files/helpers/bfd_responder.py +++ b/ansible/roles/test/files/helpers/bfd_responder.py @@ -14,6 +14,8 @@ IPv4 = '4' IPv6 = '6' +BFD_FLAG_P_BIT = 5 +BFD_FLAG_F_BIT = 4 def get_if(iff, cmd): @@ -86,13 +88,17 @@ def __init__(self, sessions): def action(self, interface): data = interface.recv() - mac_src, mac_dst, ip_src, ip_dst, bfd_remote_disc, bfd_state = self.extract_bfd_info( + mac_src, mac_dst, ip_src, ip_dst, bfd_remote_disc, bfd_state, bfd_flags = self.extract_bfd_info( data) if ip_dst not in self.sessions: return session = self.sessions[ip_dst] if bfd_state == 3: + # Respond with F bit if P bit is set + if (bfd_flags & (1 << BFD_FLAG_P_BIT)): + session["pkt"].payload.payload.payload.load.flags = (1 << BFD_FLAG_F_BIT) interface.send(session["pkt"]) + session["pkt"].payload.payload.payload.load.flags = 0 return if bfd_state == 2: @@ -101,6 +107,7 @@ def action(self, interface): bfd_pkt_init = self.craft_bfd_packet( session, data, mac_src, mac_dst, ip_src, ip_dst, bfd_remote_disc, 2) bfd_pkt_init.payload.payload.chksum = None + bfd_pkt_init.payload.payload.payload.load.flags = 0 interface.send(bfd_pkt_init) bfd_pkt_init.payload.payload.payload.load.sta = 3 bfd_pkt_init.payload.payload.chksum = None @@ -120,10 +127,11 @@ def extract_bfd_info(self, data): bfdpkt = BFD(ether.payload.payload.payload.load) bfd_remote_disc = bfdpkt.my_discriminator bfd_state = bfdpkt.sta + bfd_flags = bfdpkt.flags if ip_priority != self.bfd_default_ip_priority: raise RuntimeError("Received BFD packet with incorrect priority value: {}".format(ip_priority)) logging.debug('BFD packet info: sip {}, dip {}, priority {}'.format(ip_src, ip_dst, ip_priority)) - return mac_src, mac_dst, ip_src, ip_dst, bfd_remote_disc, bfd_state + return mac_src, mac_dst, ip_src, ip_dst, bfd_remote_disc, bfd_state, bfd_flags def craft_bfd_packet(self, session, data, mac_src, mac_dst, ip_src, ip_dst, bfd_remote_disc, bfd_state): ethpart = scapy2.Ether(data) From 5976622f329480f48e12c0b44055fa3135b441fb Mon Sep 17 00:00:00 2001 From: Chenyang Wang <49756587+cyw233@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:44:19 +1100 Subject: [PATCH 133/175] fix: return all BGP neighbors for config reload (#15634) --- tests/common/config_reload.py | 2 +- tests/common/devices/multi_asic.py | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/common/config_reload.py b/tests/common/config_reload.py index 0b0fe7c2768..b6e2542bece 100644 --- a/tests/common/config_reload.py +++ b/tests/common/config_reload.py @@ -215,7 +215,7 @@ def _config_reload_cmd_wrapper(cmd, executable): time.sleep(wait) if wait_for_bgp: - bgp_neighbors = sonic_host.get_bgp_neighbors_per_asic() + bgp_neighbors = sonic_host.get_bgp_neighbors_per_asic(state="all") pytest_assert( wait_until(wait + 120, 10, 0, sonic_host.check_bgp_session_state_all_asics, bgp_neighbors), "Not all bgp sessions are established after config reload", diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py index 6f541c201af..d879e468481 100644 --- a/tests/common/devices/multi_asic.py +++ b/tests/common/devices/multi_asic.py @@ -549,7 +549,8 @@ def get_bgp_neighbors_per_asic(self, state="established"): Get a diction of BGP neighbor states Args: - state: BGP session state, return neighbor IP of sessions that match this state + state: BGP session state, return neighbor IP of sessions that match this state. If state is "all", + return all neighbors regardless of state. Returns: dictionary {namespace: { (neighbor_ip : info_dict)* }} """ @@ -557,9 +558,10 @@ def get_bgp_neighbors_per_asic(self, state="established"): for asic in self.asics: bgp_neigh[asic.namespace] = {} bgp_info = asic.bgp_facts()["ansible_facts"]["bgp_neighbors"] - for k, v in list(bgp_info.items()): - if v["state"] != state: - bgp_info.pop(k) + if state != "all": + for k, v in list(bgp_info.items()): + if v["state"] != state: + bgp_info.pop(k) bgp_neigh[asic.namespace].update(bgp_info) return bgp_neigh @@ -598,7 +600,7 @@ def check_bgp_session_state_all_asics(self, bgp_neighbors, state="established"): """ for asic in self.asics: if asic.namespace in bgp_neighbors: - neigh_ips = [k.lower() for k, v in list(bgp_neighbors[asic.namespace].items()) if v["state"] == state] + neigh_ips = [k.lower() for k, v in list(bgp_neighbors[asic.namespace].items())] if not asic.check_bgp_session_state(neigh_ips, state): return False return True From 5b9020237214db2a4d78637fb37110850a03910b Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Thu, 21 Nov 2024 09:48:55 +0800 Subject: [PATCH 134/175] Test IPV6 after all other test (#15583) Test test_ro_user_ipv6 after all other tests. Why I did it When IPV6 test case failed on some IPV6 not stable device, some other test also will failed. How I did it Test test_ro_user_ipv6 after all other tests. How to verify it Pass all test case. Description for the changelog Test test_ro_user_ipv6 after all other tests. --- tests/tacacs/test_ro_user.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/tacacs/test_ro_user.py b/tests/tacacs/test_ro_user.py index 847ee573100..1def4711877 100644 --- a/tests/tacacs/test_ro_user.py +++ b/tests/tacacs/test_ro_user.py @@ -81,18 +81,6 @@ def test_ro_user(localhost, duthosts, enum_rand_one_per_hwsku_hostname, tacacs_c check_output(res, 'test', 'remote_user') -def test_ro_user_ipv6(localhost, ptfhost, duthosts, enum_rand_one_per_hwsku_hostname, tacacs_creds, check_tacacs_v6): - duthost = duthosts[enum_rand_one_per_hwsku_hostname] - dutip = duthost.mgmt_ip - - res = ssh_remote_run_retry(localhost, dutip, ptfhost, - tacacs_creds['tacacs_ro_user'], - tacacs_creds['tacacs_ro_user_passwd'], - "cat /etc/passwd") - - check_output(res, 'testadmin', 'remote_user_su') - - def test_ro_user_allowed_command(localhost, duthosts, enum_rand_one_per_hwsku_hostname, tacacs_creds, check_tacacs): duthost = duthosts[enum_rand_one_per_hwsku_hostname] dutip = duthost.mgmt_ip @@ -214,3 +202,15 @@ def test_ro_user_banned_command(localhost, duthosts, enum_rand_one_per_hwsku_hos banned = ssh_remote_ban_run(localhost, dutip, tacacs_creds['tacacs_ro_user'], tacacs_creds['tacacs_ro_user_passwd'], command) pytest_assert(banned, "command '{}' authorized".format(command)) + + +def test_ro_user_ipv6(localhost, ptfhost, duthosts, enum_rand_one_per_hwsku_hostname, tacacs_creds, check_tacacs_v6): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + dutip = duthost.mgmt_ip + + res = ssh_remote_run_retry(localhost, dutip, ptfhost, + tacacs_creds['tacacs_ro_user'], + tacacs_creds['tacacs_ro_user_passwd'], + "cat /etc/passwd") + + check_output(res, 'testadmin', 'remote_user_su') From 1f2e6d6547cedf31e0b3a1ea157e0bbc727a13d2 Mon Sep 17 00:00:00 2001 From: Wenda Chu <32250288+w1nda@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:19:13 +0800 Subject: [PATCH 135/175] add topo_t0-isolated-u254d2, topo_t0-isolated-u510d2, topo_t1-isolated-u2d254, topo_t1-isolated-u2d510 topo (#15355) Add four topologies: t0 role with 254 uplinks and 2 downlinks t0 role with 510 uplinks and 2 downlinks t1 role with 2 uplinks and 254 downlinks t1 role with 2 uplinks and 510 downlinks --- ansible/vars/topo_t0-isolated-u254d2.yml | 5370 ++++++++++ ansible/vars/topo_t0-isolated-u510d2.yml | 10746 +++++++++++++++++++ ansible/vars/topo_t1-isolated-u2d254.yaml | 5650 ++++++++++ ansible/vars/topo_t1-isolated-u2d510.yaml | 11282 ++++++++++++++++++++ 4 files changed, 33048 insertions(+) create mode 100644 ansible/vars/topo_t0-isolated-u254d2.yml create mode 100644 ansible/vars/topo_t0-isolated-u510d2.yml create mode 100644 ansible/vars/topo_t1-isolated-u2d254.yaml create mode 100644 ansible/vars/topo_t1-isolated-u2d510.yaml diff --git a/ansible/vars/topo_t0-isolated-u254d2.yml b/ansible/vars/topo_t0-isolated-u254d2.yml new file mode 100644 index 00000000000..14f6e7c767e --- /dev/null +++ b/ansible/vars/topo_t0-isolated-u254d2.yml @@ -0,0 +1,5370 @@ +topology: + host_interfaces: + - 0 + - 1 + VMs: + ARISTA01T1: + vlans: + - 2 + vm_offset: 0 + ARISTA02T1: + vlans: + - 3 + vm_offset: 1 + ARISTA03T1: + vlans: + - 4 + vm_offset: 2 + ARISTA04T1: + vlans: + - 5 + vm_offset: 3 + ARISTA05T1: + vlans: + - 6 + vm_offset: 4 + ARISTA06T1: + vlans: + - 7 + vm_offset: 5 + ARISTA07T1: + vlans: + - 8 + vm_offset: 6 + ARISTA08T1: + vlans: + - 9 + vm_offset: 7 + ARISTA09T1: + vlans: + - 10 + vm_offset: 8 + ARISTA10T1: + vlans: + - 11 + vm_offset: 9 + ARISTA11T1: + vlans: + - 12 + vm_offset: 10 + ARISTA12T1: + vlans: + - 13 + vm_offset: 11 + ARISTA13T1: + vlans: + - 14 + vm_offset: 12 + ARISTA14T1: + vlans: + - 15 + vm_offset: 13 + ARISTA15T1: + vlans: + - 16 + vm_offset: 14 + ARISTA16T1: + vlans: + - 17 + vm_offset: 15 + ARISTA17T1: + vlans: + - 18 + vm_offset: 16 + ARISTA18T1: + vlans: + - 19 + vm_offset: 17 + ARISTA19T1: + vlans: + - 20 + vm_offset: 18 + ARISTA20T1: + vlans: + - 21 + vm_offset: 19 + ARISTA21T1: + vlans: + - 22 + vm_offset: 20 + ARISTA22T1: + vlans: + - 23 + vm_offset: 21 + ARISTA23T1: + vlans: + - 24 + vm_offset: 22 + ARISTA24T1: + vlans: + - 25 + vm_offset: 23 + ARISTA25T1: + vlans: + - 26 + vm_offset: 24 + ARISTA26T1: + vlans: + - 27 + vm_offset: 25 + ARISTA27T1: + vlans: + - 28 + vm_offset: 26 + ARISTA28T1: + vlans: + - 29 + vm_offset: 27 + ARISTA29T1: + vlans: + - 30 + vm_offset: 28 + ARISTA30T1: + vlans: + - 31 + vm_offset: 29 + ARISTA31T1: + vlans: + - 32 + vm_offset: 30 + ARISTA32T1: + vlans: + - 33 + vm_offset: 31 + ARISTA33T1: + vlans: + - 34 + vm_offset: 32 + ARISTA34T1: + vlans: + - 35 + vm_offset: 33 + ARISTA35T1: + vlans: + - 36 + vm_offset: 34 + ARISTA36T1: + vlans: + - 37 + vm_offset: 35 + ARISTA37T1: + vlans: + - 38 + vm_offset: 36 + ARISTA38T1: + vlans: + - 39 + vm_offset: 37 + ARISTA39T1: + vlans: + - 40 + vm_offset: 38 + ARISTA40T1: + vlans: + - 41 + vm_offset: 39 + ARISTA41T1: + vlans: + - 42 + vm_offset: 40 + ARISTA42T1: + vlans: + - 43 + vm_offset: 41 + ARISTA43T1: + vlans: + - 44 + vm_offset: 42 + ARISTA44T1: + vlans: + - 45 + vm_offset: 43 + ARISTA45T1: + vlans: + - 46 + vm_offset: 44 + ARISTA46T1: + vlans: + - 47 + vm_offset: 45 + ARISTA47T1: + vlans: + - 48 + vm_offset: 46 + ARISTA48T1: + vlans: + - 49 + vm_offset: 47 + ARISTA49T1: + vlans: + - 50 + vm_offset: 48 + ARISTA50T1: + vlans: + - 51 + vm_offset: 49 + ARISTA51T1: + vlans: + - 52 + vm_offset: 50 + ARISTA52T1: + vlans: + - 53 + vm_offset: 51 + ARISTA53T1: + vlans: + - 54 + vm_offset: 52 + ARISTA54T1: + vlans: + - 55 + vm_offset: 53 + ARISTA55T1: + vlans: + - 56 + vm_offset: 54 + ARISTA56T1: + vlans: + - 57 + vm_offset: 55 + ARISTA57T1: + vlans: + - 58 + vm_offset: 56 + ARISTA58T1: + vlans: + - 59 + vm_offset: 57 + ARISTA59T1: + vlans: + - 60 + vm_offset: 58 + ARISTA60T1: + vlans: + - 61 + vm_offset: 59 + ARISTA61T1: + vlans: + - 62 + vm_offset: 60 + ARISTA62T1: + vlans: + - 63 + vm_offset: 61 + ARISTA63T1: + vlans: + - 64 + vm_offset: 62 + ARISTA64T1: + vlans: + - 65 + vm_offset: 63 + ARISTA65T1: + vlans: + - 66 + vm_offset: 64 + ARISTA66T1: + vlans: + - 67 + vm_offset: 65 + ARISTA67T1: + vlans: + - 68 + vm_offset: 66 + ARISTA68T1: + vlans: + - 69 + vm_offset: 67 + ARISTA69T1: + vlans: + - 70 + vm_offset: 68 + ARISTA70T1: + vlans: + - 71 + vm_offset: 69 + ARISTA71T1: + vlans: + - 72 + vm_offset: 70 + ARISTA72T1: + vlans: + - 73 + vm_offset: 71 + ARISTA73T1: + vlans: + - 74 + vm_offset: 72 + ARISTA74T1: + vlans: + - 75 + vm_offset: 73 + ARISTA75T1: + vlans: + - 76 + vm_offset: 74 + ARISTA76T1: + vlans: + - 77 + vm_offset: 75 + ARISTA77T1: + vlans: + - 78 + vm_offset: 76 + ARISTA78T1: + vlans: + - 79 + vm_offset: 77 + ARISTA79T1: + vlans: + - 80 + vm_offset: 78 + ARISTA80T1: + vlans: + - 81 + vm_offset: 79 + ARISTA81T1: + vlans: + - 82 + vm_offset: 80 + ARISTA82T1: + vlans: + - 83 + vm_offset: 81 + ARISTA83T1: + vlans: + - 84 + vm_offset: 82 + ARISTA84T1: + vlans: + - 85 + vm_offset: 83 + ARISTA85T1: + vlans: + - 86 + vm_offset: 84 + ARISTA86T1: + vlans: + - 87 + vm_offset: 85 + ARISTA87T1: + vlans: + - 88 + vm_offset: 86 + ARISTA88T1: + vlans: + - 89 + vm_offset: 87 + ARISTA89T1: + vlans: + - 90 + vm_offset: 88 + ARISTA90T1: + vlans: + - 91 + vm_offset: 89 + ARISTA91T1: + vlans: + - 92 + vm_offset: 90 + ARISTA92T1: + vlans: + - 93 + vm_offset: 91 + ARISTA93T1: + vlans: + - 94 + vm_offset: 92 + ARISTA94T1: + vlans: + - 95 + vm_offset: 93 + ARISTA95T1: + vlans: + - 96 + vm_offset: 94 + ARISTA96T1: + vlans: + - 97 + vm_offset: 95 + ARISTA97T1: + vlans: + - 98 + vm_offset: 96 + ARISTA98T1: + vlans: + - 99 + vm_offset: 97 + ARISTA99T1: + vlans: + - 100 + vm_offset: 98 + ARISTA100T1: + vlans: + - 101 + vm_offset: 99 + ARISTA101T1: + vlans: + - 102 + vm_offset: 100 + ARISTA102T1: + vlans: + - 103 + vm_offset: 101 + ARISTA103T1: + vlans: + - 104 + vm_offset: 102 + ARISTA104T1: + vlans: + - 105 + vm_offset: 103 + ARISTA105T1: + vlans: + - 106 + vm_offset: 104 + ARISTA106T1: + vlans: + - 107 + vm_offset: 105 + ARISTA107T1: + vlans: + - 108 + vm_offset: 106 + ARISTA108T1: + vlans: + - 109 + vm_offset: 107 + ARISTA109T1: + vlans: + - 110 + vm_offset: 108 + ARISTA110T1: + vlans: + - 111 + vm_offset: 109 + ARISTA111T1: + vlans: + - 112 + vm_offset: 110 + ARISTA112T1: + vlans: + - 113 + vm_offset: 111 + ARISTA113T1: + vlans: + - 114 + vm_offset: 112 + ARISTA114T1: + vlans: + - 115 + vm_offset: 113 + ARISTA115T1: + vlans: + - 116 + vm_offset: 114 + ARISTA116T1: + vlans: + - 117 + vm_offset: 115 + ARISTA117T1: + vlans: + - 118 + vm_offset: 116 + ARISTA118T1: + vlans: + - 119 + vm_offset: 117 + ARISTA119T1: + vlans: + - 120 + vm_offset: 118 + ARISTA120T1: + vlans: + - 121 + vm_offset: 119 + ARISTA121T1: + vlans: + - 122 + vm_offset: 120 + ARISTA122T1: + vlans: + - 123 + vm_offset: 121 + ARISTA123T1: + vlans: + - 124 + vm_offset: 122 + ARISTA124T1: + vlans: + - 125 + vm_offset: 123 + ARISTA125T1: + vlans: + - 126 + vm_offset: 124 + ARISTA126T1: + vlans: + - 127 + vm_offset: 125 + ARISTA127T1: + vlans: + - 128 + vm_offset: 126 + ARISTA128T1: + vlans: + - 129 + vm_offset: 127 + ARISTA129T1: + vlans: + - 130 + vm_offset: 128 + ARISTA130T1: + vlans: + - 131 + vm_offset: 129 + ARISTA131T1: + vlans: + - 132 + vm_offset: 130 + ARISTA132T1: + vlans: + - 133 + vm_offset: 131 + ARISTA133T1: + vlans: + - 134 + vm_offset: 132 + ARISTA134T1: + vlans: + - 135 + vm_offset: 133 + ARISTA135T1: + vlans: + - 136 + vm_offset: 134 + ARISTA136T1: + vlans: + - 137 + vm_offset: 135 + ARISTA137T1: + vlans: + - 138 + vm_offset: 136 + ARISTA138T1: + vlans: + - 139 + vm_offset: 137 + ARISTA139T1: + vlans: + - 140 + vm_offset: 138 + ARISTA140T1: + vlans: + - 141 + vm_offset: 139 + ARISTA141T1: + vlans: + - 142 + vm_offset: 140 + ARISTA142T1: + vlans: + - 143 + vm_offset: 141 + ARISTA143T1: + vlans: + - 144 + vm_offset: 142 + ARISTA144T1: + vlans: + - 145 + vm_offset: 143 + ARISTA145T1: + vlans: + - 146 + vm_offset: 144 + ARISTA146T1: + vlans: + - 147 + vm_offset: 145 + ARISTA147T1: + vlans: + - 148 + vm_offset: 146 + ARISTA148T1: + vlans: + - 149 + vm_offset: 147 + ARISTA149T1: + vlans: + - 150 + vm_offset: 148 + ARISTA150T1: + vlans: + - 151 + vm_offset: 149 + ARISTA151T1: + vlans: + - 152 + vm_offset: 150 + ARISTA152T1: + vlans: + - 153 + vm_offset: 151 + ARISTA153T1: + vlans: + - 154 + vm_offset: 152 + ARISTA154T1: + vlans: + - 155 + vm_offset: 153 + ARISTA155T1: + vlans: + - 156 + vm_offset: 154 + ARISTA156T1: + vlans: + - 157 + vm_offset: 155 + ARISTA157T1: + vlans: + - 158 + vm_offset: 156 + ARISTA158T1: + vlans: + - 159 + vm_offset: 157 + ARISTA159T1: + vlans: + - 160 + vm_offset: 158 + ARISTA160T1: + vlans: + - 161 + vm_offset: 159 + ARISTA161T1: + vlans: + - 162 + vm_offset: 160 + ARISTA162T1: + vlans: + - 163 + vm_offset: 161 + ARISTA163T1: + vlans: + - 164 + vm_offset: 162 + ARISTA164T1: + vlans: + - 165 + vm_offset: 163 + ARISTA165T1: + vlans: + - 166 + vm_offset: 164 + ARISTA166T1: + vlans: + - 167 + vm_offset: 165 + ARISTA167T1: + vlans: + - 168 + vm_offset: 166 + ARISTA168T1: + vlans: + - 169 + vm_offset: 167 + ARISTA169T1: + vlans: + - 170 + vm_offset: 168 + ARISTA170T1: + vlans: + - 171 + vm_offset: 169 + ARISTA171T1: + vlans: + - 172 + vm_offset: 170 + ARISTA172T1: + vlans: + - 173 + vm_offset: 171 + ARISTA173T1: + vlans: + - 174 + vm_offset: 172 + ARISTA174T1: + vlans: + - 175 + vm_offset: 173 + ARISTA175T1: + vlans: + - 176 + vm_offset: 174 + ARISTA176T1: + vlans: + - 177 + vm_offset: 175 + ARISTA177T1: + vlans: + - 178 + vm_offset: 176 + ARISTA178T1: + vlans: + - 179 + vm_offset: 177 + ARISTA179T1: + vlans: + - 180 + vm_offset: 178 + ARISTA180T1: + vlans: + - 181 + vm_offset: 179 + ARISTA181T1: + vlans: + - 182 + vm_offset: 180 + ARISTA182T1: + vlans: + - 183 + vm_offset: 181 + ARISTA183T1: + vlans: + - 184 + vm_offset: 182 + ARISTA184T1: + vlans: + - 185 + vm_offset: 183 + ARISTA185T1: + vlans: + - 186 + vm_offset: 184 + ARISTA186T1: + vlans: + - 187 + vm_offset: 185 + ARISTA187T1: + vlans: + - 188 + vm_offset: 186 + ARISTA188T1: + vlans: + - 189 + vm_offset: 187 + ARISTA189T1: + vlans: + - 190 + vm_offset: 188 + ARISTA190T1: + vlans: + - 191 + vm_offset: 189 + ARISTA191T1: + vlans: + - 192 + vm_offset: 190 + ARISTA192T1: + vlans: + - 193 + vm_offset: 191 + ARISTA193T1: + vlans: + - 194 + vm_offset: 192 + ARISTA194T1: + vlans: + - 195 + vm_offset: 193 + ARISTA195T1: + vlans: + - 196 + vm_offset: 194 + ARISTA196T1: + vlans: + - 197 + vm_offset: 195 + ARISTA197T1: + vlans: + - 198 + vm_offset: 196 + ARISTA198T1: + vlans: + - 199 + vm_offset: 197 + ARISTA199T1: + vlans: + - 200 + vm_offset: 198 + ARISTA200T1: + vlans: + - 201 + vm_offset: 199 + ARISTA201T1: + vlans: + - 202 + vm_offset: 200 + ARISTA202T1: + vlans: + - 203 + vm_offset: 201 + ARISTA203T1: + vlans: + - 204 + vm_offset: 202 + ARISTA204T1: + vlans: + - 205 + vm_offset: 203 + ARISTA205T1: + vlans: + - 206 + vm_offset: 204 + ARISTA206T1: + vlans: + - 207 + vm_offset: 205 + ARISTA207T1: + vlans: + - 208 + vm_offset: 206 + ARISTA208T1: + vlans: + - 209 + vm_offset: 207 + ARISTA209T1: + vlans: + - 210 + vm_offset: 208 + ARISTA210T1: + vlans: + - 211 + vm_offset: 209 + ARISTA211T1: + vlans: + - 212 + vm_offset: 210 + ARISTA212T1: + vlans: + - 213 + vm_offset: 211 + ARISTA213T1: + vlans: + - 214 + vm_offset: 212 + ARISTA214T1: + vlans: + - 215 + vm_offset: 213 + ARISTA215T1: + vlans: + - 216 + vm_offset: 214 + ARISTA216T1: + vlans: + - 217 + vm_offset: 215 + ARISTA217T1: + vlans: + - 218 + vm_offset: 216 + ARISTA218T1: + vlans: + - 219 + vm_offset: 217 + ARISTA219T1: + vlans: + - 220 + vm_offset: 218 + ARISTA220T1: + vlans: + - 221 + vm_offset: 219 + ARISTA221T1: + vlans: + - 222 + vm_offset: 220 + ARISTA222T1: + vlans: + - 223 + vm_offset: 221 + ARISTA223T1: + vlans: + - 224 + vm_offset: 222 + ARISTA224T1: + vlans: + - 225 + vm_offset: 223 + ARISTA225T1: + vlans: + - 226 + vm_offset: 224 + ARISTA226T1: + vlans: + - 227 + vm_offset: 225 + ARISTA227T1: + vlans: + - 228 + vm_offset: 226 + ARISTA228T1: + vlans: + - 229 + vm_offset: 227 + ARISTA229T1: + vlans: + - 230 + vm_offset: 228 + ARISTA230T1: + vlans: + - 231 + vm_offset: 229 + ARISTA231T1: + vlans: + - 232 + vm_offset: 230 + ARISTA232T1: + vlans: + - 233 + vm_offset: 231 + ARISTA233T1: + vlans: + - 234 + vm_offset: 232 + ARISTA234T1: + vlans: + - 235 + vm_offset: 233 + ARISTA235T1: + vlans: + - 236 + vm_offset: 234 + ARISTA236T1: + vlans: + - 237 + vm_offset: 235 + ARISTA237T1: + vlans: + - 238 + vm_offset: 236 + ARISTA238T1: + vlans: + - 239 + vm_offset: 237 + ARISTA239T1: + vlans: + - 240 + vm_offset: 238 + ARISTA240T1: + vlans: + - 241 + vm_offset: 239 + ARISTA241T1: + vlans: + - 242 + vm_offset: 240 + ARISTA242T1: + vlans: + - 243 + vm_offset: 241 + ARISTA243T1: + vlans: + - 244 + vm_offset: 242 + ARISTA244T1: + vlans: + - 245 + vm_offset: 243 + ARISTA245T1: + vlans: + - 246 + vm_offset: 244 + ARISTA246T1: + vlans: + - 247 + vm_offset: 245 + ARISTA247T1: + vlans: + - 248 + vm_offset: 246 + ARISTA248T1: + vlans: + - 249 + vm_offset: 247 + ARISTA249T1: + vlans: + - 250 + vm_offset: 248 + ARISTA250T1: + vlans: + - 251 + vm_offset: 249 + ARISTA251T1: + vlans: + - 252 + vm_offset: 250 + ARISTA252T1: + vlans: + - 253 + vm_offset: 251 + ARISTA253T1: + vlans: + - 254 + vm_offset: 252 + ARISTA254T1: + vlans: + - 255 + vm_offset: 253 + DUT: + vlan_configs: + default_vlan_config: one_vlan_per_intf + one_vlan_per_intf: + Vlan1000: + id: 1000 + intfs: [0] + prefix_v6: fc00:c:c:0001::/64 + tag: 1000 + Vlan1001: + id: 1001 + intfs: [1] + prefix_v6: fc00:c:c:0002::/64 + tag: 1001 + +configuration_properties: + common: + dut_asn: 4200000000 + dut_type: ToRRouter + swrole: leaf + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 4200200000 + leaf_asn_start: 4200100000 + tor_asn_start: 4200000000 + failure_rate: 0 + nhipv6: FC0A::FF + +configuration: + ARISTA01T1: + properties: + - common + bgp: + router-id: 0.12.0.3 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3::1/128 + Ethernet1: + ipv6: fc00:a::a/126 + bp_interface: + ipv6: fc00:b::3/64 + + ARISTA02T1: + properties: + - common + bgp: + router-id: 0.12.0.4 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d + interfaces: + Loopback0: + ipv6: fc00:c:c:4::1/128 + Ethernet1: + ipv6: fc00:a::e/126 + bp_interface: + ipv6: fc00:b::4/64 + + ARISTA03T1: + properties: + - common + bgp: + router-id: 0.12.0.5 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::11 + interfaces: + Loopback0: + ipv6: fc00:c:c:5::1/128 + Ethernet1: + ipv6: fc00:a::12/126 + bp_interface: + ipv6: fc00:b::5/64 + + ARISTA04T1: + properties: + - common + bgp: + router-id: 0.12.0.6 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::15 + interfaces: + Loopback0: + ipv6: fc00:c:c:6::1/128 + Ethernet1: + ipv6: fc00:a::16/126 + bp_interface: + ipv6: fc00:b::6/64 + + ARISTA05T1: + properties: + - common + bgp: + router-id: 0.12.0.7 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::19 + interfaces: + Loopback0: + ipv6: fc00:c:c:7::1/128 + Ethernet1: + ipv6: fc00:a::1a/126 + bp_interface: + ipv6: fc00:b::7/64 + + ARISTA06T1: + properties: + - common + bgp: + router-id: 0.12.0.8 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d + interfaces: + Loopback0: + ipv6: fc00:c:c:8::1/128 + Ethernet1: + ipv6: fc00:a::1e/126 + bp_interface: + ipv6: fc00:b::8/64 + + ARISTA07T1: + properties: + - common + bgp: + router-id: 0.12.0.9 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::21 + interfaces: + Loopback0: + ipv6: fc00:c:c:9::1/128 + Ethernet1: + ipv6: fc00:a::22/126 + bp_interface: + ipv6: fc00:b::9/64 + + ARISTA08T1: + properties: + - common + bgp: + router-id: 0.12.0.10 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::25 + interfaces: + Loopback0: + ipv6: fc00:c:c:a::1/128 + Ethernet1: + ipv6: fc00:a::26/126 + bp_interface: + ipv6: fc00:b::a/64 + + ARISTA09T1: + properties: + - common + bgp: + router-id: 0.12.0.11 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::29 + interfaces: + Loopback0: + ipv6: fc00:c:c:b::1/128 + Ethernet1: + ipv6: fc00:a::2a/126 + bp_interface: + ipv6: fc00:b::b/64 + + ARISTA10T1: + properties: + - common + bgp: + router-id: 0.12.0.12 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d + interfaces: + Loopback0: + ipv6: fc00:c:c:c::1/128 + Ethernet1: + ipv6: fc00:a::2e/126 + bp_interface: + ipv6: fc00:b::c/64 + + ARISTA11T1: + properties: + - common + bgp: + router-id: 0.12.0.13 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::31 + interfaces: + Loopback0: + ipv6: fc00:c:c:d::1/128 + Ethernet1: + ipv6: fc00:a::32/126 + bp_interface: + ipv6: fc00:b::d/64 + + ARISTA12T1: + properties: + - common + bgp: + router-id: 0.12.0.14 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::35 + interfaces: + Loopback0: + ipv6: fc00:c:c:e::1/128 + Ethernet1: + ipv6: fc00:a::36/126 + bp_interface: + ipv6: fc00:b::e/64 + + ARISTA13T1: + properties: + - common + bgp: + router-id: 0.12.0.15 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::39 + interfaces: + Loopback0: + ipv6: fc00:c:c:f::1/128 + Ethernet1: + ipv6: fc00:a::3a/126 + bp_interface: + ipv6: fc00:b::f/64 + + ARISTA14T1: + properties: + - common + bgp: + router-id: 0.12.0.16 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d + interfaces: + Loopback0: + ipv6: fc00:c:c:10::1/128 + Ethernet1: + ipv6: fc00:a::3e/126 + bp_interface: + ipv6: fc00:b::10/64 + + ARISTA15T1: + properties: + - common + bgp: + router-id: 0.12.0.17 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::41 + interfaces: + Loopback0: + ipv6: fc00:c:c:11::1/128 + Ethernet1: + ipv6: fc00:a::42/126 + bp_interface: + ipv6: fc00:b::11/64 + + ARISTA16T1: + properties: + - common + bgp: + router-id: 0.12.0.18 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::45 + interfaces: + Loopback0: + ipv6: fc00:c:c:12::1/128 + Ethernet1: + ipv6: fc00:a::46/126 + bp_interface: + ipv6: fc00:b::12/64 + + ARISTA17T1: + properties: + - common + bgp: + router-id: 0.12.0.19 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::49 + interfaces: + Loopback0: + ipv6: fc00:c:c:13::1/128 + Ethernet1: + ipv6: fc00:a::4a/126 + bp_interface: + ipv6: fc00:b::13/64 + + ARISTA18T1: + properties: + - common + bgp: + router-id: 0.12.0.20 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4d + interfaces: + Loopback0: + ipv6: fc00:c:c:14::1/128 + Ethernet1: + ipv6: fc00:a::4e/126 + bp_interface: + ipv6: fc00:b::14/64 + + ARISTA19T1: + properties: + - common + bgp: + router-id: 0.12.0.21 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::51 + interfaces: + Loopback0: + ipv6: fc00:c:c:15::1/128 + Ethernet1: + ipv6: fc00:a::52/126 + bp_interface: + ipv6: fc00:b::15/64 + + ARISTA20T1: + properties: + - common + bgp: + router-id: 0.12.0.22 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::55 + interfaces: + Loopback0: + ipv6: fc00:c:c:16::1/128 + Ethernet1: + ipv6: fc00:a::56/126 + bp_interface: + ipv6: fc00:b::16/64 + + ARISTA21T1: + properties: + - common + bgp: + router-id: 0.12.0.23 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::59 + interfaces: + Loopback0: + ipv6: fc00:c:c:17::1/128 + Ethernet1: + ipv6: fc00:a::5a/126 + bp_interface: + ipv6: fc00:b::17/64 + + ARISTA22T1: + properties: + - common + bgp: + router-id: 0.12.0.24 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5d + interfaces: + Loopback0: + ipv6: fc00:c:c:18::1/128 + Ethernet1: + ipv6: fc00:a::5e/126 + bp_interface: + ipv6: fc00:b::18/64 + + ARISTA23T1: + properties: + - common + bgp: + router-id: 0.12.0.25 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::61 + interfaces: + Loopback0: + ipv6: fc00:c:c:19::1/128 + Ethernet1: + ipv6: fc00:a::62/126 + bp_interface: + ipv6: fc00:b::19/64 + + ARISTA24T1: + properties: + - common + bgp: + router-id: 0.12.0.26 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::65 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a::1/128 + Ethernet1: + ipv6: fc00:a::66/126 + bp_interface: + ipv6: fc00:b::1a/64 + + ARISTA25T1: + properties: + - common + bgp: + router-id: 0.12.0.27 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::69 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b::1/128 + Ethernet1: + ipv6: fc00:a::6a/126 + bp_interface: + ipv6: fc00:b::1b/64 + + ARISTA26T1: + properties: + - common + bgp: + router-id: 0.12.0.28 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c::1/128 + Ethernet1: + ipv6: fc00:a::6e/126 + bp_interface: + ipv6: fc00:b::1c/64 + + ARISTA27T1: + properties: + - common + bgp: + router-id: 0.12.0.29 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::71 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d::1/128 + Ethernet1: + ipv6: fc00:a::72/126 + bp_interface: + ipv6: fc00:b::1d/64 + + ARISTA28T1: + properties: + - common + bgp: + router-id: 0.12.0.30 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::75 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e::1/128 + Ethernet1: + ipv6: fc00:a::76/126 + bp_interface: + ipv6: fc00:b::1e/64 + + ARISTA29T1: + properties: + - common + bgp: + router-id: 0.12.0.31 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::79 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f::1/128 + Ethernet1: + ipv6: fc00:a::7a/126 + bp_interface: + ipv6: fc00:b::1f/64 + + ARISTA30T1: + properties: + - common + bgp: + router-id: 0.12.0.32 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7d + interfaces: + Loopback0: + ipv6: fc00:c:c:20::1/128 + Ethernet1: + ipv6: fc00:a::7e/126 + bp_interface: + ipv6: fc00:b::20/64 + + ARISTA31T1: + properties: + - common + bgp: + router-id: 0.12.0.33 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::81 + interfaces: + Loopback0: + ipv6: fc00:c:c:21::1/128 + Ethernet1: + ipv6: fc00:a::82/126 + bp_interface: + ipv6: fc00:b::21/64 + + ARISTA32T1: + properties: + - common + bgp: + router-id: 0.12.0.34 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::85 + interfaces: + Loopback0: + ipv6: fc00:c:c:22::1/128 + Ethernet1: + ipv6: fc00:a::86/126 + bp_interface: + ipv6: fc00:b::22/64 + + ARISTA33T1: + properties: + - common + bgp: + router-id: 0.12.0.35 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::89 + interfaces: + Loopback0: + ipv6: fc00:c:c:23::1/128 + Ethernet1: + ipv6: fc00:a::8a/126 + bp_interface: + ipv6: fc00:b::23/64 + + ARISTA34T1: + properties: + - common + bgp: + router-id: 0.12.0.36 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::8d + interfaces: + Loopback0: + ipv6: fc00:c:c:24::1/128 + Ethernet1: + ipv6: fc00:a::8e/126 + bp_interface: + ipv6: fc00:b::24/64 + + ARISTA35T1: + properties: + - common + bgp: + router-id: 0.12.0.37 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::91 + interfaces: + Loopback0: + ipv6: fc00:c:c:25::1/128 + Ethernet1: + ipv6: fc00:a::92/126 + bp_interface: + ipv6: fc00:b::25/64 + + ARISTA36T1: + properties: + - common + bgp: + router-id: 0.12.0.38 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::95 + interfaces: + Loopback0: + ipv6: fc00:c:c:26::1/128 + Ethernet1: + ipv6: fc00:a::96/126 + bp_interface: + ipv6: fc00:b::26/64 + + ARISTA37T1: + properties: + - common + bgp: + router-id: 0.12.0.39 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::99 + interfaces: + Loopback0: + ipv6: fc00:c:c:27::1/128 + Ethernet1: + ipv6: fc00:a::9a/126 + bp_interface: + ipv6: fc00:b::27/64 + + ARISTA38T1: + properties: + - common + bgp: + router-id: 0.12.0.40 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::9d + interfaces: + Loopback0: + ipv6: fc00:c:c:28::1/128 + Ethernet1: + ipv6: fc00:a::9e/126 + bp_interface: + ipv6: fc00:b::28/64 + + ARISTA39T1: + properties: + - common + bgp: + router-id: 0.12.0.41 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:29::1/128 + Ethernet1: + ipv6: fc00:a::a2/126 + bp_interface: + ipv6: fc00:b::29/64 + + ARISTA40T1: + properties: + - common + bgp: + router-id: 0.12.0.42 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2a::1/128 + Ethernet1: + ipv6: fc00:a::a6/126 + bp_interface: + ipv6: fc00:b::2a/64 + + ARISTA41T1: + properties: + - common + bgp: + router-id: 0.12.0.43 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2b::1/128 + Ethernet1: + ipv6: fc00:a::aa/126 + bp_interface: + ipv6: fc00:b::2b/64 + + ARISTA42T1: + properties: + - common + bgp: + router-id: 0.12.0.44 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::ad + interfaces: + Loopback0: + ipv6: fc00:c:c:2c::1/128 + Ethernet1: + ipv6: fc00:a::ae/126 + bp_interface: + ipv6: fc00:b::2c/64 + + ARISTA43T1: + properties: + - common + bgp: + router-id: 0.12.0.45 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:2d::1/128 + Ethernet1: + ipv6: fc00:a::b2/126 + bp_interface: + ipv6: fc00:b::2d/64 + + ARISTA44T1: + properties: + - common + bgp: + router-id: 0.12.0.46 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2e::1/128 + Ethernet1: + ipv6: fc00:a::b6/126 + bp_interface: + ipv6: fc00:b::2e/64 + + ARISTA45T1: + properties: + - common + bgp: + router-id: 0.12.0.47 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2f::1/128 + Ethernet1: + ipv6: fc00:a::ba/126 + bp_interface: + ipv6: fc00:b::2f/64 + + ARISTA46T1: + properties: + - common + bgp: + router-id: 0.12.0.48 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::bd + interfaces: + Loopback0: + ipv6: fc00:c:c:30::1/128 + Ethernet1: + ipv6: fc00:a::be/126 + bp_interface: + ipv6: fc00:b::30/64 + + ARISTA47T1: + properties: + - common + bgp: + router-id: 0.12.0.49 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:31::1/128 + Ethernet1: + ipv6: fc00:a::c2/126 + bp_interface: + ipv6: fc00:b::31/64 + + ARISTA48T1: + properties: + - common + bgp: + router-id: 0.12.0.50 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:32::1/128 + Ethernet1: + ipv6: fc00:a::c6/126 + bp_interface: + ipv6: fc00:b::32/64 + + ARISTA49T1: + properties: + - common + bgp: + router-id: 0.12.0.51 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:33::1/128 + Ethernet1: + ipv6: fc00:a::ca/126 + bp_interface: + ipv6: fc00:b::33/64 + + ARISTA50T1: + properties: + - common + bgp: + router-id: 0.12.0.52 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::cd + interfaces: + Loopback0: + ipv6: fc00:c:c:34::1/128 + Ethernet1: + ipv6: fc00:a::ce/126 + bp_interface: + ipv6: fc00:b::34/64 + + ARISTA51T1: + properties: + - common + bgp: + router-id: 0.12.0.53 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:35::1/128 + Ethernet1: + ipv6: fc00:a::d2/126 + bp_interface: + ipv6: fc00:b::35/64 + + ARISTA52T1: + properties: + - common + bgp: + router-id: 0.12.0.54 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:36::1/128 + Ethernet1: + ipv6: fc00:a::d6/126 + bp_interface: + ipv6: fc00:b::36/64 + + ARISTA53T1: + properties: + - common + bgp: + router-id: 0.12.0.55 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:37::1/128 + Ethernet1: + ipv6: fc00:a::da/126 + bp_interface: + ipv6: fc00:b::37/64 + + ARISTA54T1: + properties: + - common + bgp: + router-id: 0.12.0.56 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::dd + interfaces: + Loopback0: + ipv6: fc00:c:c:38::1/128 + Ethernet1: + ipv6: fc00:a::de/126 + bp_interface: + ipv6: fc00:b::38/64 + + ARISTA55T1: + properties: + - common + bgp: + router-id: 0.12.0.57 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:39::1/128 + Ethernet1: + ipv6: fc00:a::e2/126 + bp_interface: + ipv6: fc00:b::39/64 + + ARISTA56T1: + properties: + - common + bgp: + router-id: 0.12.0.58 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3a::1/128 + Ethernet1: + ipv6: fc00:a::e6/126 + bp_interface: + ipv6: fc00:b::3a/64 + + ARISTA57T1: + properties: + - common + bgp: + router-id: 0.12.0.59 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3b::1/128 + Ethernet1: + ipv6: fc00:a::ea/126 + bp_interface: + ipv6: fc00:b::3b/64 + + ARISTA58T1: + properties: + - common + bgp: + router-id: 0.12.0.60 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::ed + interfaces: + Loopback0: + ipv6: fc00:c:c:3c::1/128 + Ethernet1: + ipv6: fc00:a::ee/126 + bp_interface: + ipv6: fc00:b::3c/64 + + ARISTA59T1: + properties: + - common + bgp: + router-id: 0.12.0.61 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:3d::1/128 + Ethernet1: + ipv6: fc00:a::f2/126 + bp_interface: + ipv6: fc00:b::3d/64 + + ARISTA60T1: + properties: + - common + bgp: + router-id: 0.12.0.62 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3e::1/128 + Ethernet1: + ipv6: fc00:a::f6/126 + bp_interface: + ipv6: fc00:b::3e/64 + + ARISTA61T1: + properties: + - common + bgp: + router-id: 0.12.0.63 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3f::1/128 + Ethernet1: + ipv6: fc00:a::fa/126 + bp_interface: + ipv6: fc00:b::3f/64 + + ARISTA62T1: + properties: + - common + bgp: + router-id: 0.12.0.64 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::fd + interfaces: + Loopback0: + ipv6: fc00:c:c:40::1/128 + Ethernet1: + ipv6: fc00:a::fe/126 + bp_interface: + ipv6: fc00:b::40/64 + + ARISTA63T1: + properties: + - common + bgp: + router-id: 0.12.0.65 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::101 + interfaces: + Loopback0: + ipv6: fc00:c:c:41::1/128 + Ethernet1: + ipv6: fc00:a::102/126 + bp_interface: + ipv6: fc00:b::41/64 + + ARISTA64T1: + properties: + - common + bgp: + router-id: 0.12.0.66 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::105 + interfaces: + Loopback0: + ipv6: fc00:c:c:42::1/128 + Ethernet1: + ipv6: fc00:a::106/126 + bp_interface: + ipv6: fc00:b::42/64 + + ARISTA65T1: + properties: + - common + bgp: + router-id: 0.12.0.67 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::109 + interfaces: + Loopback0: + ipv6: fc00:c:c:43::1/128 + Ethernet1: + ipv6: fc00:a::10a/126 + bp_interface: + ipv6: fc00:b::43/64 + + ARISTA66T1: + properties: + - common + bgp: + router-id: 0.12.0.68 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::10d + interfaces: + Loopback0: + ipv6: fc00:c:c:44::1/128 + Ethernet1: + ipv6: fc00:a::10e/126 + bp_interface: + ipv6: fc00:b::44/64 + + ARISTA67T1: + properties: + - common + bgp: + router-id: 0.12.0.69 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::111 + interfaces: + Loopback0: + ipv6: fc00:c:c:45::1/128 + Ethernet1: + ipv6: fc00:a::112/126 + bp_interface: + ipv6: fc00:b::45/64 + + ARISTA68T1: + properties: + - common + bgp: + router-id: 0.12.0.70 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::115 + interfaces: + Loopback0: + ipv6: fc00:c:c:46::1/128 + Ethernet1: + ipv6: fc00:a::116/126 + bp_interface: + ipv6: fc00:b::46/64 + + ARISTA69T1: + properties: + - common + bgp: + router-id: 0.12.0.71 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::119 + interfaces: + Loopback0: + ipv6: fc00:c:c:47::1/128 + Ethernet1: + ipv6: fc00:a::11a/126 + bp_interface: + ipv6: fc00:b::47/64 + + ARISTA70T1: + properties: + - common + bgp: + router-id: 0.12.0.72 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::11d + interfaces: + Loopback0: + ipv6: fc00:c:c:48::1/128 + Ethernet1: + ipv6: fc00:a::11e/126 + bp_interface: + ipv6: fc00:b::48/64 + + ARISTA71T1: + properties: + - common + bgp: + router-id: 0.12.0.73 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::121 + interfaces: + Loopback0: + ipv6: fc00:c:c:49::1/128 + Ethernet1: + ipv6: fc00:a::122/126 + bp_interface: + ipv6: fc00:b::49/64 + + ARISTA72T1: + properties: + - common + bgp: + router-id: 0.12.0.74 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::125 + interfaces: + Loopback0: + ipv6: fc00:c:c:4a::1/128 + Ethernet1: + ipv6: fc00:a::126/126 + bp_interface: + ipv6: fc00:b::4a/64 + + ARISTA73T1: + properties: + - common + bgp: + router-id: 0.12.0.75 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::129 + interfaces: + Loopback0: + ipv6: fc00:c:c:4b::1/128 + Ethernet1: + ipv6: fc00:a::12a/126 + bp_interface: + ipv6: fc00:b::4b/64 + + ARISTA74T1: + properties: + - common + bgp: + router-id: 0.12.0.76 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::12d + interfaces: + Loopback0: + ipv6: fc00:c:c:4c::1/128 + Ethernet1: + ipv6: fc00:a::12e/126 + bp_interface: + ipv6: fc00:b::4c/64 + + ARISTA75T1: + properties: + - common + bgp: + router-id: 0.12.0.77 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::131 + interfaces: + Loopback0: + ipv6: fc00:c:c:4d::1/128 + Ethernet1: + ipv6: fc00:a::132/126 + bp_interface: + ipv6: fc00:b::4d/64 + + ARISTA76T1: + properties: + - common + bgp: + router-id: 0.12.0.78 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::135 + interfaces: + Loopback0: + ipv6: fc00:c:c:4e::1/128 + Ethernet1: + ipv6: fc00:a::136/126 + bp_interface: + ipv6: fc00:b::4e/64 + + ARISTA77T1: + properties: + - common + bgp: + router-id: 0.12.0.79 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::139 + interfaces: + Loopback0: + ipv6: fc00:c:c:4f::1/128 + Ethernet1: + ipv6: fc00:a::13a/126 + bp_interface: + ipv6: fc00:b::4f/64 + + ARISTA78T1: + properties: + - common + bgp: + router-id: 0.12.0.80 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::13d + interfaces: + Loopback0: + ipv6: fc00:c:c:50::1/128 + Ethernet1: + ipv6: fc00:a::13e/126 + bp_interface: + ipv6: fc00:b::50/64 + + ARISTA79T1: + properties: + - common + bgp: + router-id: 0.12.0.81 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::141 + interfaces: + Loopback0: + ipv6: fc00:c:c:51::1/128 + Ethernet1: + ipv6: fc00:a::142/126 + bp_interface: + ipv6: fc00:b::51/64 + + ARISTA80T1: + properties: + - common + bgp: + router-id: 0.12.0.82 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::145 + interfaces: + Loopback0: + ipv6: fc00:c:c:52::1/128 + Ethernet1: + ipv6: fc00:a::146/126 + bp_interface: + ipv6: fc00:b::52/64 + + ARISTA81T1: + properties: + - common + bgp: + router-id: 0.12.0.83 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::149 + interfaces: + Loopback0: + ipv6: fc00:c:c:53::1/128 + Ethernet1: + ipv6: fc00:a::14a/126 + bp_interface: + ipv6: fc00:b::53/64 + + ARISTA82T1: + properties: + - common + bgp: + router-id: 0.12.0.84 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::14d + interfaces: + Loopback0: + ipv6: fc00:c:c:54::1/128 + Ethernet1: + ipv6: fc00:a::14e/126 + bp_interface: + ipv6: fc00:b::54/64 + + ARISTA83T1: + properties: + - common + bgp: + router-id: 0.12.0.85 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::151 + interfaces: + Loopback0: + ipv6: fc00:c:c:55::1/128 + Ethernet1: + ipv6: fc00:a::152/126 + bp_interface: + ipv6: fc00:b::55/64 + + ARISTA84T1: + properties: + - common + bgp: + router-id: 0.12.0.86 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::155 + interfaces: + Loopback0: + ipv6: fc00:c:c:56::1/128 + Ethernet1: + ipv6: fc00:a::156/126 + bp_interface: + ipv6: fc00:b::56/64 + + ARISTA85T1: + properties: + - common + bgp: + router-id: 0.12.0.87 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::159 + interfaces: + Loopback0: + ipv6: fc00:c:c:57::1/128 + Ethernet1: + ipv6: fc00:a::15a/126 + bp_interface: + ipv6: fc00:b::57/64 + + ARISTA86T1: + properties: + - common + bgp: + router-id: 0.12.0.88 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::15d + interfaces: + Loopback0: + ipv6: fc00:c:c:58::1/128 + Ethernet1: + ipv6: fc00:a::15e/126 + bp_interface: + ipv6: fc00:b::58/64 + + ARISTA87T1: + properties: + - common + bgp: + router-id: 0.12.0.89 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::161 + interfaces: + Loopback0: + ipv6: fc00:c:c:59::1/128 + Ethernet1: + ipv6: fc00:a::162/126 + bp_interface: + ipv6: fc00:b::59/64 + + ARISTA88T1: + properties: + - common + bgp: + router-id: 0.12.0.90 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::165 + interfaces: + Loopback0: + ipv6: fc00:c:c:5a::1/128 + Ethernet1: + ipv6: fc00:a::166/126 + bp_interface: + ipv6: fc00:b::5a/64 + + ARISTA89T1: + properties: + - common + bgp: + router-id: 0.12.0.91 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::169 + interfaces: + Loopback0: + ipv6: fc00:c:c:5b::1/128 + Ethernet1: + ipv6: fc00:a::16a/126 + bp_interface: + ipv6: fc00:b::5b/64 + + ARISTA90T1: + properties: + - common + bgp: + router-id: 0.12.0.92 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::16d + interfaces: + Loopback0: + ipv6: fc00:c:c:5c::1/128 + Ethernet1: + ipv6: fc00:a::16e/126 + bp_interface: + ipv6: fc00:b::5c/64 + + ARISTA91T1: + properties: + - common + bgp: + router-id: 0.12.0.93 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::171 + interfaces: + Loopback0: + ipv6: fc00:c:c:5d::1/128 + Ethernet1: + ipv6: fc00:a::172/126 + bp_interface: + ipv6: fc00:b::5d/64 + + ARISTA92T1: + properties: + - common + bgp: + router-id: 0.12.0.94 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::175 + interfaces: + Loopback0: + ipv6: fc00:c:c:5e::1/128 + Ethernet1: + ipv6: fc00:a::176/126 + bp_interface: + ipv6: fc00:b::5e/64 + + ARISTA93T1: + properties: + - common + bgp: + router-id: 0.12.0.95 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::179 + interfaces: + Loopback0: + ipv6: fc00:c:c:5f::1/128 + Ethernet1: + ipv6: fc00:a::17a/126 + bp_interface: + ipv6: fc00:b::5f/64 + + ARISTA94T1: + properties: + - common + bgp: + router-id: 0.12.0.96 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::17d + interfaces: + Loopback0: + ipv6: fc00:c:c:60::1/128 + Ethernet1: + ipv6: fc00:a::17e/126 + bp_interface: + ipv6: fc00:b::60/64 + + ARISTA95T1: + properties: + - common + bgp: + router-id: 0.12.0.97 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::181 + interfaces: + Loopback0: + ipv6: fc00:c:c:61::1/128 + Ethernet1: + ipv6: fc00:a::182/126 + bp_interface: + ipv6: fc00:b::61/64 + + ARISTA96T1: + properties: + - common + bgp: + router-id: 0.12.0.98 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::185 + interfaces: + Loopback0: + ipv6: fc00:c:c:62::1/128 + Ethernet1: + ipv6: fc00:a::186/126 + bp_interface: + ipv6: fc00:b::62/64 + + ARISTA97T1: + properties: + - common + bgp: + router-id: 0.12.0.99 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::189 + interfaces: + Loopback0: + ipv6: fc00:c:c:63::1/128 + Ethernet1: + ipv6: fc00:a::18a/126 + bp_interface: + ipv6: fc00:b::63/64 + + ARISTA98T1: + properties: + - common + bgp: + router-id: 0.12.0.100 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::18d + interfaces: + Loopback0: + ipv6: fc00:c:c:64::1/128 + Ethernet1: + ipv6: fc00:a::18e/126 + bp_interface: + ipv6: fc00:b::64/64 + + ARISTA99T1: + properties: + - common + bgp: + router-id: 0.12.0.101 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::191 + interfaces: + Loopback0: + ipv6: fc00:c:c:65::1/128 + Ethernet1: + ipv6: fc00:a::192/126 + bp_interface: + ipv6: fc00:b::65/64 + + ARISTA100T1: + properties: + - common + bgp: + router-id: 0.12.0.102 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::195 + interfaces: + Loopback0: + ipv6: fc00:c:c:66::1/128 + Ethernet1: + ipv6: fc00:a::196/126 + bp_interface: + ipv6: fc00:b::66/64 + + ARISTA101T1: + properties: + - common + bgp: + router-id: 0.12.0.103 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::199 + interfaces: + Loopback0: + ipv6: fc00:c:c:67::1/128 + Ethernet1: + ipv6: fc00:a::19a/126 + bp_interface: + ipv6: fc00:b::67/64 + + ARISTA102T1: + properties: + - common + bgp: + router-id: 0.12.0.104 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::19d + interfaces: + Loopback0: + ipv6: fc00:c:c:68::1/128 + Ethernet1: + ipv6: fc00:a::19e/126 + bp_interface: + ipv6: fc00:b::68/64 + + ARISTA103T1: + properties: + - common + bgp: + router-id: 0.12.0.105 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:69::1/128 + Ethernet1: + ipv6: fc00:a::1a2/126 + bp_interface: + ipv6: fc00:b::69/64 + + ARISTA104T1: + properties: + - common + bgp: + router-id: 0.12.0.106 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6a::1/128 + Ethernet1: + ipv6: fc00:a::1a6/126 + bp_interface: + ipv6: fc00:b::6a/64 + + ARISTA105T1: + properties: + - common + bgp: + router-id: 0.12.0.107 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6b::1/128 + Ethernet1: + ipv6: fc00:a::1aa/126 + bp_interface: + ipv6: fc00:b::6b/64 + + ARISTA106T1: + properties: + - common + bgp: + router-id: 0.12.0.108 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1ad + interfaces: + Loopback0: + ipv6: fc00:c:c:6c::1/128 + Ethernet1: + ipv6: fc00:a::1ae/126 + bp_interface: + ipv6: fc00:b::6c/64 + + ARISTA107T1: + properties: + - common + bgp: + router-id: 0.12.0.109 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:6d::1/128 + Ethernet1: + ipv6: fc00:a::1b2/126 + bp_interface: + ipv6: fc00:b::6d/64 + + ARISTA108T1: + properties: + - common + bgp: + router-id: 0.12.0.110 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6e::1/128 + Ethernet1: + ipv6: fc00:a::1b6/126 + bp_interface: + ipv6: fc00:b::6e/64 + + ARISTA109T1: + properties: + - common + bgp: + router-id: 0.12.0.111 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6f::1/128 + Ethernet1: + ipv6: fc00:a::1ba/126 + bp_interface: + ipv6: fc00:b::6f/64 + + ARISTA110T1: + properties: + - common + bgp: + router-id: 0.12.0.112 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1bd + interfaces: + Loopback0: + ipv6: fc00:c:c:70::1/128 + Ethernet1: + ipv6: fc00:a::1be/126 + bp_interface: + ipv6: fc00:b::70/64 + + ARISTA111T1: + properties: + - common + bgp: + router-id: 0.12.0.113 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:71::1/128 + Ethernet1: + ipv6: fc00:a::1c2/126 + bp_interface: + ipv6: fc00:b::71/64 + + ARISTA112T1: + properties: + - common + bgp: + router-id: 0.12.0.114 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:72::1/128 + Ethernet1: + ipv6: fc00:a::1c6/126 + bp_interface: + ipv6: fc00:b::72/64 + + ARISTA113T1: + properties: + - common + bgp: + router-id: 0.12.0.115 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:73::1/128 + Ethernet1: + ipv6: fc00:a::1ca/126 + bp_interface: + ipv6: fc00:b::73/64 + + ARISTA114T1: + properties: + - common + bgp: + router-id: 0.12.0.116 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1cd + interfaces: + Loopback0: + ipv6: fc00:c:c:74::1/128 + Ethernet1: + ipv6: fc00:a::1ce/126 + bp_interface: + ipv6: fc00:b::74/64 + + ARISTA115T1: + properties: + - common + bgp: + router-id: 0.12.0.117 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:75::1/128 + Ethernet1: + ipv6: fc00:a::1d2/126 + bp_interface: + ipv6: fc00:b::75/64 + + ARISTA116T1: + properties: + - common + bgp: + router-id: 0.12.0.118 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:76::1/128 + Ethernet1: + ipv6: fc00:a::1d6/126 + bp_interface: + ipv6: fc00:b::76/64 + + ARISTA117T1: + properties: + - common + bgp: + router-id: 0.12.0.119 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:77::1/128 + Ethernet1: + ipv6: fc00:a::1da/126 + bp_interface: + ipv6: fc00:b::77/64 + + ARISTA118T1: + properties: + - common + bgp: + router-id: 0.12.0.120 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1dd + interfaces: + Loopback0: + ipv6: fc00:c:c:78::1/128 + Ethernet1: + ipv6: fc00:a::1de/126 + bp_interface: + ipv6: fc00:b::78/64 + + ARISTA119T1: + properties: + - common + bgp: + router-id: 0.12.0.121 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:79::1/128 + Ethernet1: + ipv6: fc00:a::1e2/126 + bp_interface: + ipv6: fc00:b::79/64 + + ARISTA120T1: + properties: + - common + bgp: + router-id: 0.12.0.122 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7a::1/128 + Ethernet1: + ipv6: fc00:a::1e6/126 + bp_interface: + ipv6: fc00:b::7a/64 + + ARISTA121T1: + properties: + - common + bgp: + router-id: 0.12.0.123 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7b::1/128 + Ethernet1: + ipv6: fc00:a::1ea/126 + bp_interface: + ipv6: fc00:b::7b/64 + + ARISTA122T1: + properties: + - common + bgp: + router-id: 0.12.0.124 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1ed + interfaces: + Loopback0: + ipv6: fc00:c:c:7c::1/128 + Ethernet1: + ipv6: fc00:a::1ee/126 + bp_interface: + ipv6: fc00:b::7c/64 + + ARISTA123T1: + properties: + - common + bgp: + router-id: 0.12.0.125 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:7d::1/128 + Ethernet1: + ipv6: fc00:a::1f2/126 + bp_interface: + ipv6: fc00:b::7d/64 + + ARISTA124T1: + properties: + - common + bgp: + router-id: 0.12.0.126 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7e::1/128 + Ethernet1: + ipv6: fc00:a::1f6/126 + bp_interface: + ipv6: fc00:b::7e/64 + + ARISTA125T1: + properties: + - common + bgp: + router-id: 0.12.0.127 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7f::1/128 + Ethernet1: + ipv6: fc00:a::1fa/126 + bp_interface: + ipv6: fc00:b::7f/64 + + ARISTA126T1: + properties: + - common + bgp: + router-id: 0.12.0.128 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1fd + interfaces: + Loopback0: + ipv6: fc00:c:c:80::1/128 + Ethernet1: + ipv6: fc00:a::1fe/126 + bp_interface: + ipv6: fc00:b::80/64 + + ARISTA127T1: + properties: + - common + bgp: + router-id: 0.12.0.129 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::201 + interfaces: + Loopback0: + ipv6: fc00:c:c:81::1/128 + Ethernet1: + ipv6: fc00:a::202/126 + bp_interface: + ipv6: fc00:b::81/64 + + ARISTA128T1: + properties: + - common + bgp: + router-id: 0.12.0.130 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::205 + interfaces: + Loopback0: + ipv6: fc00:c:c:82::1/128 + Ethernet1: + ipv6: fc00:a::206/126 + bp_interface: + ipv6: fc00:b::82/64 + + ARISTA129T1: + properties: + - common + bgp: + router-id: 0.12.0.131 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::209 + interfaces: + Loopback0: + ipv6: fc00:c:c:83::1/128 + Ethernet1: + ipv6: fc00:a::20a/126 + bp_interface: + ipv6: fc00:b::83/64 + + ARISTA130T1: + properties: + - common + bgp: + router-id: 0.12.0.132 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::20d + interfaces: + Loopback0: + ipv6: fc00:c:c:84::1/128 + Ethernet1: + ipv6: fc00:a::20e/126 + bp_interface: + ipv6: fc00:b::84/64 + + ARISTA131T1: + properties: + - common + bgp: + router-id: 0.12.0.133 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::211 + interfaces: + Loopback0: + ipv6: fc00:c:c:85::1/128 + Ethernet1: + ipv6: fc00:a::212/126 + bp_interface: + ipv6: fc00:b::85/64 + + ARISTA132T1: + properties: + - common + bgp: + router-id: 0.12.0.134 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::215 + interfaces: + Loopback0: + ipv6: fc00:c:c:86::1/128 + Ethernet1: + ipv6: fc00:a::216/126 + bp_interface: + ipv6: fc00:b::86/64 + + ARISTA133T1: + properties: + - common + bgp: + router-id: 0.12.0.135 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::219 + interfaces: + Loopback0: + ipv6: fc00:c:c:87::1/128 + Ethernet1: + ipv6: fc00:a::21a/126 + bp_interface: + ipv6: fc00:b::87/64 + + ARISTA134T1: + properties: + - common + bgp: + router-id: 0.12.0.136 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::21d + interfaces: + Loopback0: + ipv6: fc00:c:c:88::1/128 + Ethernet1: + ipv6: fc00:a::21e/126 + bp_interface: + ipv6: fc00:b::88/64 + + ARISTA135T1: + properties: + - common + bgp: + router-id: 0.12.0.137 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::221 + interfaces: + Loopback0: + ipv6: fc00:c:c:89::1/128 + Ethernet1: + ipv6: fc00:a::222/126 + bp_interface: + ipv6: fc00:b::89/64 + + ARISTA136T1: + properties: + - common + bgp: + router-id: 0.12.0.138 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::225 + interfaces: + Loopback0: + ipv6: fc00:c:c:8a::1/128 + Ethernet1: + ipv6: fc00:a::226/126 + bp_interface: + ipv6: fc00:b::8a/64 + + ARISTA137T1: + properties: + - common + bgp: + router-id: 0.12.0.139 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::229 + interfaces: + Loopback0: + ipv6: fc00:c:c:8b::1/128 + Ethernet1: + ipv6: fc00:a::22a/126 + bp_interface: + ipv6: fc00:b::8b/64 + + ARISTA138T1: + properties: + - common + bgp: + router-id: 0.12.0.140 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::22d + interfaces: + Loopback0: + ipv6: fc00:c:c:8c::1/128 + Ethernet1: + ipv6: fc00:a::22e/126 + bp_interface: + ipv6: fc00:b::8c/64 + + ARISTA139T1: + properties: + - common + bgp: + router-id: 0.12.0.141 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::231 + interfaces: + Loopback0: + ipv6: fc00:c:c:8d::1/128 + Ethernet1: + ipv6: fc00:a::232/126 + bp_interface: + ipv6: fc00:b::8d/64 + + ARISTA140T1: + properties: + - common + bgp: + router-id: 0.12.0.142 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::235 + interfaces: + Loopback0: + ipv6: fc00:c:c:8e::1/128 + Ethernet1: + ipv6: fc00:a::236/126 + bp_interface: + ipv6: fc00:b::8e/64 + + ARISTA141T1: + properties: + - common + bgp: + router-id: 0.12.0.143 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::239 + interfaces: + Loopback0: + ipv6: fc00:c:c:8f::1/128 + Ethernet1: + ipv6: fc00:a::23a/126 + bp_interface: + ipv6: fc00:b::8f/64 + + ARISTA142T1: + properties: + - common + bgp: + router-id: 0.12.0.144 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::23d + interfaces: + Loopback0: + ipv6: fc00:c:c:90::1/128 + Ethernet1: + ipv6: fc00:a::23e/126 + bp_interface: + ipv6: fc00:b::90/64 + + ARISTA143T1: + properties: + - common + bgp: + router-id: 0.12.0.145 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::241 + interfaces: + Loopback0: + ipv6: fc00:c:c:91::1/128 + Ethernet1: + ipv6: fc00:a::242/126 + bp_interface: + ipv6: fc00:b::91/64 + + ARISTA144T1: + properties: + - common + bgp: + router-id: 0.12.0.146 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::245 + interfaces: + Loopback0: + ipv6: fc00:c:c:92::1/128 + Ethernet1: + ipv6: fc00:a::246/126 + bp_interface: + ipv6: fc00:b::92/64 + + ARISTA145T1: + properties: + - common + bgp: + router-id: 0.12.0.147 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::249 + interfaces: + Loopback0: + ipv6: fc00:c:c:93::1/128 + Ethernet1: + ipv6: fc00:a::24a/126 + bp_interface: + ipv6: fc00:b::93/64 + + ARISTA146T1: + properties: + - common + bgp: + router-id: 0.12.0.148 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::24d + interfaces: + Loopback0: + ipv6: fc00:c:c:94::1/128 + Ethernet1: + ipv6: fc00:a::24e/126 + bp_interface: + ipv6: fc00:b::94/64 + + ARISTA147T1: + properties: + - common + bgp: + router-id: 0.12.0.149 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::251 + interfaces: + Loopback0: + ipv6: fc00:c:c:95::1/128 + Ethernet1: + ipv6: fc00:a::252/126 + bp_interface: + ipv6: fc00:b::95/64 + + ARISTA148T1: + properties: + - common + bgp: + router-id: 0.12.0.150 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::255 + interfaces: + Loopback0: + ipv6: fc00:c:c:96::1/128 + Ethernet1: + ipv6: fc00:a::256/126 + bp_interface: + ipv6: fc00:b::96/64 + + ARISTA149T1: + properties: + - common + bgp: + router-id: 0.12.0.151 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::259 + interfaces: + Loopback0: + ipv6: fc00:c:c:97::1/128 + Ethernet1: + ipv6: fc00:a::25a/126 + bp_interface: + ipv6: fc00:b::97/64 + + ARISTA150T1: + properties: + - common + bgp: + router-id: 0.12.0.152 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::25d + interfaces: + Loopback0: + ipv6: fc00:c:c:98::1/128 + Ethernet1: + ipv6: fc00:a::25e/126 + bp_interface: + ipv6: fc00:b::98/64 + + ARISTA151T1: + properties: + - common + bgp: + router-id: 0.12.0.153 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::261 + interfaces: + Loopback0: + ipv6: fc00:c:c:99::1/128 + Ethernet1: + ipv6: fc00:a::262/126 + bp_interface: + ipv6: fc00:b::99/64 + + ARISTA152T1: + properties: + - common + bgp: + router-id: 0.12.0.154 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::265 + interfaces: + Loopback0: + ipv6: fc00:c:c:9a::1/128 + Ethernet1: + ipv6: fc00:a::266/126 + bp_interface: + ipv6: fc00:b::9a/64 + + ARISTA153T1: + properties: + - common + bgp: + router-id: 0.12.0.155 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::269 + interfaces: + Loopback0: + ipv6: fc00:c:c:9b::1/128 + Ethernet1: + ipv6: fc00:a::26a/126 + bp_interface: + ipv6: fc00:b::9b/64 + + ARISTA154T1: + properties: + - common + bgp: + router-id: 0.12.0.156 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::26d + interfaces: + Loopback0: + ipv6: fc00:c:c:9c::1/128 + Ethernet1: + ipv6: fc00:a::26e/126 + bp_interface: + ipv6: fc00:b::9c/64 + + ARISTA155T1: + properties: + - common + bgp: + router-id: 0.12.0.157 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::271 + interfaces: + Loopback0: + ipv6: fc00:c:c:9d::1/128 + Ethernet1: + ipv6: fc00:a::272/126 + bp_interface: + ipv6: fc00:b::9d/64 + + ARISTA156T1: + properties: + - common + bgp: + router-id: 0.12.0.158 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::275 + interfaces: + Loopback0: + ipv6: fc00:c:c:9e::1/128 + Ethernet1: + ipv6: fc00:a::276/126 + bp_interface: + ipv6: fc00:b::9e/64 + + ARISTA157T1: + properties: + - common + bgp: + router-id: 0.12.0.159 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::279 + interfaces: + Loopback0: + ipv6: fc00:c:c:9f::1/128 + Ethernet1: + ipv6: fc00:a::27a/126 + bp_interface: + ipv6: fc00:b::9f/64 + + ARISTA158T1: + properties: + - common + bgp: + router-id: 0.12.0.160 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::27d + interfaces: + Loopback0: + ipv6: fc00:c:c:a0::1/128 + Ethernet1: + ipv6: fc00:a::27e/126 + bp_interface: + ipv6: fc00:b::a0/64 + + ARISTA159T1: + properties: + - common + bgp: + router-id: 0.12.0.161 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::281 + interfaces: + Loopback0: + ipv6: fc00:c:c:a1::1/128 + Ethernet1: + ipv6: fc00:a::282/126 + bp_interface: + ipv6: fc00:b::a1/64 + + ARISTA160T1: + properties: + - common + bgp: + router-id: 0.12.0.162 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::285 + interfaces: + Loopback0: + ipv6: fc00:c:c:a2::1/128 + Ethernet1: + ipv6: fc00:a::286/126 + bp_interface: + ipv6: fc00:b::a2/64 + + ARISTA161T1: + properties: + - common + bgp: + router-id: 0.12.0.163 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::289 + interfaces: + Loopback0: + ipv6: fc00:c:c:a3::1/128 + Ethernet1: + ipv6: fc00:a::28a/126 + bp_interface: + ipv6: fc00:b::a3/64 + + ARISTA162T1: + properties: + - common + bgp: + router-id: 0.12.0.164 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::28d + interfaces: + Loopback0: + ipv6: fc00:c:c:a4::1/128 + Ethernet1: + ipv6: fc00:a::28e/126 + bp_interface: + ipv6: fc00:b::a4/64 + + ARISTA163T1: + properties: + - common + bgp: + router-id: 0.12.0.165 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::291 + interfaces: + Loopback0: + ipv6: fc00:c:c:a5::1/128 + Ethernet1: + ipv6: fc00:a::292/126 + bp_interface: + ipv6: fc00:b::a5/64 + + ARISTA164T1: + properties: + - common + bgp: + router-id: 0.12.0.166 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::295 + interfaces: + Loopback0: + ipv6: fc00:c:c:a6::1/128 + Ethernet1: + ipv6: fc00:a::296/126 + bp_interface: + ipv6: fc00:b::a6/64 + + ARISTA165T1: + properties: + - common + bgp: + router-id: 0.12.0.167 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::299 + interfaces: + Loopback0: + ipv6: fc00:c:c:a7::1/128 + Ethernet1: + ipv6: fc00:a::29a/126 + bp_interface: + ipv6: fc00:b::a7/64 + + ARISTA166T1: + properties: + - common + bgp: + router-id: 0.12.0.168 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::29d + interfaces: + Loopback0: + ipv6: fc00:c:c:a8::1/128 + Ethernet1: + ipv6: fc00:a::29e/126 + bp_interface: + ipv6: fc00:b::a8/64 + + ARISTA167T1: + properties: + - common + bgp: + router-id: 0.12.0.169 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:a9::1/128 + Ethernet1: + ipv6: fc00:a::2a2/126 + bp_interface: + ipv6: fc00:b::a9/64 + + ARISTA168T1: + properties: + - common + bgp: + router-id: 0.12.0.170 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:aa::1/128 + Ethernet1: + ipv6: fc00:a::2a6/126 + bp_interface: + ipv6: fc00:b::aa/64 + + ARISTA169T1: + properties: + - common + bgp: + router-id: 0.12.0.171 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ab::1/128 + Ethernet1: + ipv6: fc00:a::2aa/126 + bp_interface: + ipv6: fc00:b::ab/64 + + ARISTA170T1: + properties: + - common + bgp: + router-id: 0.12.0.172 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ac::1/128 + Ethernet1: + ipv6: fc00:a::2ae/126 + bp_interface: + ipv6: fc00:b::ac/64 + + ARISTA171T1: + properties: + - common + bgp: + router-id: 0.12.0.173 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ad::1/128 + Ethernet1: + ipv6: fc00:a::2b2/126 + bp_interface: + ipv6: fc00:b::ad/64 + + ARISTA172T1: + properties: + - common + bgp: + router-id: 0.12.0.174 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ae::1/128 + Ethernet1: + ipv6: fc00:a::2b6/126 + bp_interface: + ipv6: fc00:b::ae/64 + + ARISTA173T1: + properties: + - common + bgp: + router-id: 0.12.0.175 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:af::1/128 + Ethernet1: + ipv6: fc00:a::2ba/126 + bp_interface: + ipv6: fc00:b::af/64 + + ARISTA174T1: + properties: + - common + bgp: + router-id: 0.12.0.176 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2bd + interfaces: + Loopback0: + ipv6: fc00:c:c:b0::1/128 + Ethernet1: + ipv6: fc00:a::2be/126 + bp_interface: + ipv6: fc00:b::b0/64 + + ARISTA175T1: + properties: + - common + bgp: + router-id: 0.12.0.177 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b1::1/128 + Ethernet1: + ipv6: fc00:a::2c2/126 + bp_interface: + ipv6: fc00:b::b1/64 + + ARISTA176T1: + properties: + - common + bgp: + router-id: 0.12.0.178 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b2::1/128 + Ethernet1: + ipv6: fc00:a::2c6/126 + bp_interface: + ipv6: fc00:b::b2/64 + + ARISTA177T1: + properties: + - common + bgp: + router-id: 0.12.0.179 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b3::1/128 + Ethernet1: + ipv6: fc00:a::2ca/126 + bp_interface: + ipv6: fc00:b::b3/64 + + ARISTA178T1: + properties: + - common + bgp: + router-id: 0.12.0.180 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2cd + interfaces: + Loopback0: + ipv6: fc00:c:c:b4::1/128 + Ethernet1: + ipv6: fc00:a::2ce/126 + bp_interface: + ipv6: fc00:b::b4/64 + + ARISTA179T1: + properties: + - common + bgp: + router-id: 0.12.0.181 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b5::1/128 + Ethernet1: + ipv6: fc00:a::2d2/126 + bp_interface: + ipv6: fc00:b::b5/64 + + ARISTA180T1: + properties: + - common + bgp: + router-id: 0.12.0.182 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b6::1/128 + Ethernet1: + ipv6: fc00:a::2d6/126 + bp_interface: + ipv6: fc00:b::b6/64 + + ARISTA181T1: + properties: + - common + bgp: + router-id: 0.12.0.183 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b7::1/128 + Ethernet1: + ipv6: fc00:a::2da/126 + bp_interface: + ipv6: fc00:b::b7/64 + + ARISTA182T1: + properties: + - common + bgp: + router-id: 0.12.0.184 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2dd + interfaces: + Loopback0: + ipv6: fc00:c:c:b8::1/128 + Ethernet1: + ipv6: fc00:a::2de/126 + bp_interface: + ipv6: fc00:b::b8/64 + + ARISTA183T1: + properties: + - common + bgp: + router-id: 0.12.0.185 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b9::1/128 + Ethernet1: + ipv6: fc00:a::2e2/126 + bp_interface: + ipv6: fc00:b::b9/64 + + ARISTA184T1: + properties: + - common + bgp: + router-id: 0.12.0.186 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ba::1/128 + Ethernet1: + ipv6: fc00:a::2e6/126 + bp_interface: + ipv6: fc00:b::ba/64 + + ARISTA185T1: + properties: + - common + bgp: + router-id: 0.12.0.187 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bb::1/128 + Ethernet1: + ipv6: fc00:a::2ea/126 + bp_interface: + ipv6: fc00:b::bb/64 + + ARISTA186T1: + properties: + - common + bgp: + router-id: 0.12.0.188 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2ed + interfaces: + Loopback0: + ipv6: fc00:c:c:bc::1/128 + Ethernet1: + ipv6: fc00:a::2ee/126 + bp_interface: + ipv6: fc00:b::bc/64 + + ARISTA187T1: + properties: + - common + bgp: + router-id: 0.12.0.189 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:bd::1/128 + Ethernet1: + ipv6: fc00:a::2f2/126 + bp_interface: + ipv6: fc00:b::bd/64 + + ARISTA188T1: + properties: + - common + bgp: + router-id: 0.12.0.190 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:be::1/128 + Ethernet1: + ipv6: fc00:a::2f6/126 + bp_interface: + ipv6: fc00:b::be/64 + + ARISTA189T1: + properties: + - common + bgp: + router-id: 0.12.0.191 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bf::1/128 + Ethernet1: + ipv6: fc00:a::2fa/126 + bp_interface: + ipv6: fc00:b::bf/64 + + ARISTA190T1: + properties: + - common + bgp: + router-id: 0.12.0.192 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2fd + interfaces: + Loopback0: + ipv6: fc00:c:c:c0::1/128 + Ethernet1: + ipv6: fc00:a::2fe/126 + bp_interface: + ipv6: fc00:b::c0/64 + + ARISTA191T1: + properties: + - common + bgp: + router-id: 0.12.0.193 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::301 + interfaces: + Loopback0: + ipv6: fc00:c:c:c1::1/128 + Ethernet1: + ipv6: fc00:a::302/126 + bp_interface: + ipv6: fc00:b::c1/64 + + ARISTA192T1: + properties: + - common + bgp: + router-id: 0.12.0.194 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::305 + interfaces: + Loopback0: + ipv6: fc00:c:c:c2::1/128 + Ethernet1: + ipv6: fc00:a::306/126 + bp_interface: + ipv6: fc00:b::c2/64 + + ARISTA193T1: + properties: + - common + bgp: + router-id: 0.12.0.195 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::309 + interfaces: + Loopback0: + ipv6: fc00:c:c:c3::1/128 + Ethernet1: + ipv6: fc00:a::30a/126 + bp_interface: + ipv6: fc00:b::c3/64 + + ARISTA194T1: + properties: + - common + bgp: + router-id: 0.12.0.196 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::30d + interfaces: + Loopback0: + ipv6: fc00:c:c:c4::1/128 + Ethernet1: + ipv6: fc00:a::30e/126 + bp_interface: + ipv6: fc00:b::c4/64 + + ARISTA195T1: + properties: + - common + bgp: + router-id: 0.12.0.197 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::311 + interfaces: + Loopback0: + ipv6: fc00:c:c:c5::1/128 + Ethernet1: + ipv6: fc00:a::312/126 + bp_interface: + ipv6: fc00:b::c5/64 + + ARISTA196T1: + properties: + - common + bgp: + router-id: 0.12.0.198 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::315 + interfaces: + Loopback0: + ipv6: fc00:c:c:c6::1/128 + Ethernet1: + ipv6: fc00:a::316/126 + bp_interface: + ipv6: fc00:b::c6/64 + + ARISTA197T1: + properties: + - common + bgp: + router-id: 0.12.0.199 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::319 + interfaces: + Loopback0: + ipv6: fc00:c:c:c7::1/128 + Ethernet1: + ipv6: fc00:a::31a/126 + bp_interface: + ipv6: fc00:b::c7/64 + + ARISTA198T1: + properties: + - common + bgp: + router-id: 0.12.0.200 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::31d + interfaces: + Loopback0: + ipv6: fc00:c:c:c8::1/128 + Ethernet1: + ipv6: fc00:a::31e/126 + bp_interface: + ipv6: fc00:b::c8/64 + + ARISTA199T1: + properties: + - common + bgp: + router-id: 0.12.0.201 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::321 + interfaces: + Loopback0: + ipv6: fc00:c:c:c9::1/128 + Ethernet1: + ipv6: fc00:a::322/126 + bp_interface: + ipv6: fc00:b::c9/64 + + ARISTA200T1: + properties: + - common + bgp: + router-id: 0.12.0.202 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::325 + interfaces: + Loopback0: + ipv6: fc00:c:c:ca::1/128 + Ethernet1: + ipv6: fc00:a::326/126 + bp_interface: + ipv6: fc00:b::ca/64 + + ARISTA201T1: + properties: + - common + bgp: + router-id: 0.12.0.203 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::329 + interfaces: + Loopback0: + ipv6: fc00:c:c:cb::1/128 + Ethernet1: + ipv6: fc00:a::32a/126 + bp_interface: + ipv6: fc00:b::cb/64 + + ARISTA202T1: + properties: + - common + bgp: + router-id: 0.12.0.204 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::32d + interfaces: + Loopback0: + ipv6: fc00:c:c:cc::1/128 + Ethernet1: + ipv6: fc00:a::32e/126 + bp_interface: + ipv6: fc00:b::cc/64 + + ARISTA203T1: + properties: + - common + bgp: + router-id: 0.12.0.205 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::331 + interfaces: + Loopback0: + ipv6: fc00:c:c:cd::1/128 + Ethernet1: + ipv6: fc00:a::332/126 + bp_interface: + ipv6: fc00:b::cd/64 + + ARISTA204T1: + properties: + - common + bgp: + router-id: 0.12.0.206 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::335 + interfaces: + Loopback0: + ipv6: fc00:c:c:ce::1/128 + Ethernet1: + ipv6: fc00:a::336/126 + bp_interface: + ipv6: fc00:b::ce/64 + + ARISTA205T1: + properties: + - common + bgp: + router-id: 0.12.0.207 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::339 + interfaces: + Loopback0: + ipv6: fc00:c:c:cf::1/128 + Ethernet1: + ipv6: fc00:a::33a/126 + bp_interface: + ipv6: fc00:b::cf/64 + + ARISTA206T1: + properties: + - common + bgp: + router-id: 0.12.0.208 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::33d + interfaces: + Loopback0: + ipv6: fc00:c:c:d0::1/128 + Ethernet1: + ipv6: fc00:a::33e/126 + bp_interface: + ipv6: fc00:b::d0/64 + + ARISTA207T1: + properties: + - common + bgp: + router-id: 0.12.0.209 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::341 + interfaces: + Loopback0: + ipv6: fc00:c:c:d1::1/128 + Ethernet1: + ipv6: fc00:a::342/126 + bp_interface: + ipv6: fc00:b::d1/64 + + ARISTA208T1: + properties: + - common + bgp: + router-id: 0.12.0.210 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::345 + interfaces: + Loopback0: + ipv6: fc00:c:c:d2::1/128 + Ethernet1: + ipv6: fc00:a::346/126 + bp_interface: + ipv6: fc00:b::d2/64 + + ARISTA209T1: + properties: + - common + bgp: + router-id: 0.12.0.211 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::349 + interfaces: + Loopback0: + ipv6: fc00:c:c:d3::1/128 + Ethernet1: + ipv6: fc00:a::34a/126 + bp_interface: + ipv6: fc00:b::d3/64 + + ARISTA210T1: + properties: + - common + bgp: + router-id: 0.12.0.212 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::34d + interfaces: + Loopback0: + ipv6: fc00:c:c:d4::1/128 + Ethernet1: + ipv6: fc00:a::34e/126 + bp_interface: + ipv6: fc00:b::d4/64 + + ARISTA211T1: + properties: + - common + bgp: + router-id: 0.12.0.213 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::351 + interfaces: + Loopback0: + ipv6: fc00:c:c:d5::1/128 + Ethernet1: + ipv6: fc00:a::352/126 + bp_interface: + ipv6: fc00:b::d5/64 + + ARISTA212T1: + properties: + - common + bgp: + router-id: 0.12.0.214 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::355 + interfaces: + Loopback0: + ipv6: fc00:c:c:d6::1/128 + Ethernet1: + ipv6: fc00:a::356/126 + bp_interface: + ipv6: fc00:b::d6/64 + + ARISTA213T1: + properties: + - common + bgp: + router-id: 0.12.0.215 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::359 + interfaces: + Loopback0: + ipv6: fc00:c:c:d7::1/128 + Ethernet1: + ipv6: fc00:a::35a/126 + bp_interface: + ipv6: fc00:b::d7/64 + + ARISTA214T1: + properties: + - common + bgp: + router-id: 0.12.0.216 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::35d + interfaces: + Loopback0: + ipv6: fc00:c:c:d8::1/128 + Ethernet1: + ipv6: fc00:a::35e/126 + bp_interface: + ipv6: fc00:b::d8/64 + + ARISTA215T1: + properties: + - common + bgp: + router-id: 0.12.0.217 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::361 + interfaces: + Loopback0: + ipv6: fc00:c:c:d9::1/128 + Ethernet1: + ipv6: fc00:a::362/126 + bp_interface: + ipv6: fc00:b::d9/64 + + ARISTA216T1: + properties: + - common + bgp: + router-id: 0.12.0.218 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::365 + interfaces: + Loopback0: + ipv6: fc00:c:c:da::1/128 + Ethernet1: + ipv6: fc00:a::366/126 + bp_interface: + ipv6: fc00:b::da/64 + + ARISTA217T1: + properties: + - common + bgp: + router-id: 0.12.0.219 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::369 + interfaces: + Loopback0: + ipv6: fc00:c:c:db::1/128 + Ethernet1: + ipv6: fc00:a::36a/126 + bp_interface: + ipv6: fc00:b::db/64 + + ARISTA218T1: + properties: + - common + bgp: + router-id: 0.12.0.220 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::36d + interfaces: + Loopback0: + ipv6: fc00:c:c:dc::1/128 + Ethernet1: + ipv6: fc00:a::36e/126 + bp_interface: + ipv6: fc00:b::dc/64 + + ARISTA219T1: + properties: + - common + bgp: + router-id: 0.12.0.221 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::371 + interfaces: + Loopback0: + ipv6: fc00:c:c:dd::1/128 + Ethernet1: + ipv6: fc00:a::372/126 + bp_interface: + ipv6: fc00:b::dd/64 + + ARISTA220T1: + properties: + - common + bgp: + router-id: 0.12.0.222 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::375 + interfaces: + Loopback0: + ipv6: fc00:c:c:de::1/128 + Ethernet1: + ipv6: fc00:a::376/126 + bp_interface: + ipv6: fc00:b::de/64 + + ARISTA221T1: + properties: + - common + bgp: + router-id: 0.12.0.223 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::379 + interfaces: + Loopback0: + ipv6: fc00:c:c:df::1/128 + Ethernet1: + ipv6: fc00:a::37a/126 + bp_interface: + ipv6: fc00:b::df/64 + + ARISTA222T1: + properties: + - common + bgp: + router-id: 0.12.0.224 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::37d + interfaces: + Loopback0: + ipv6: fc00:c:c:e0::1/128 + Ethernet1: + ipv6: fc00:a::37e/126 + bp_interface: + ipv6: fc00:b::e0/64 + + ARISTA223T1: + properties: + - common + bgp: + router-id: 0.12.0.225 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::381 + interfaces: + Loopback0: + ipv6: fc00:c:c:e1::1/128 + Ethernet1: + ipv6: fc00:a::382/126 + bp_interface: + ipv6: fc00:b::e1/64 + + ARISTA224T1: + properties: + - common + bgp: + router-id: 0.12.0.226 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::385 + interfaces: + Loopback0: + ipv6: fc00:c:c:e2::1/128 + Ethernet1: + ipv6: fc00:a::386/126 + bp_interface: + ipv6: fc00:b::e2/64 + + ARISTA225T1: + properties: + - common + bgp: + router-id: 0.12.0.227 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::389 + interfaces: + Loopback0: + ipv6: fc00:c:c:e3::1/128 + Ethernet1: + ipv6: fc00:a::38a/126 + bp_interface: + ipv6: fc00:b::e3/64 + + ARISTA226T1: + properties: + - common + bgp: + router-id: 0.12.0.228 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::38d + interfaces: + Loopback0: + ipv6: fc00:c:c:e4::1/128 + Ethernet1: + ipv6: fc00:a::38e/126 + bp_interface: + ipv6: fc00:b::e4/64 + + ARISTA227T1: + properties: + - common + bgp: + router-id: 0.12.0.229 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::391 + interfaces: + Loopback0: + ipv6: fc00:c:c:e5::1/128 + Ethernet1: + ipv6: fc00:a::392/126 + bp_interface: + ipv6: fc00:b::e5/64 + + ARISTA228T1: + properties: + - common + bgp: + router-id: 0.12.0.230 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::395 + interfaces: + Loopback0: + ipv6: fc00:c:c:e6::1/128 + Ethernet1: + ipv6: fc00:a::396/126 + bp_interface: + ipv6: fc00:b::e6/64 + + ARISTA229T1: + properties: + - common + bgp: + router-id: 0.12.0.231 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::399 + interfaces: + Loopback0: + ipv6: fc00:c:c:e7::1/128 + Ethernet1: + ipv6: fc00:a::39a/126 + bp_interface: + ipv6: fc00:b::e7/64 + + ARISTA230T1: + properties: + - common + bgp: + router-id: 0.12.0.232 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::39d + interfaces: + Loopback0: + ipv6: fc00:c:c:e8::1/128 + Ethernet1: + ipv6: fc00:a::39e/126 + bp_interface: + ipv6: fc00:b::e8/64 + + ARISTA231T1: + properties: + - common + bgp: + router-id: 0.12.0.233 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:e9::1/128 + Ethernet1: + ipv6: fc00:a::3a2/126 + bp_interface: + ipv6: fc00:b::e9/64 + + ARISTA232T1: + properties: + - common + bgp: + router-id: 0.12.0.234 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ea::1/128 + Ethernet1: + ipv6: fc00:a::3a6/126 + bp_interface: + ipv6: fc00:b::ea/64 + + ARISTA233T1: + properties: + - common + bgp: + router-id: 0.12.0.235 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:eb::1/128 + Ethernet1: + ipv6: fc00:a::3aa/126 + bp_interface: + ipv6: fc00:b::eb/64 + + ARISTA234T1: + properties: + - common + bgp: + router-id: 0.12.0.236 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ec::1/128 + Ethernet1: + ipv6: fc00:a::3ae/126 + bp_interface: + ipv6: fc00:b::ec/64 + + ARISTA235T1: + properties: + - common + bgp: + router-id: 0.12.0.237 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ed::1/128 + Ethernet1: + ipv6: fc00:a::3b2/126 + bp_interface: + ipv6: fc00:b::ed/64 + + ARISTA236T1: + properties: + - common + bgp: + router-id: 0.12.0.238 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ee::1/128 + Ethernet1: + ipv6: fc00:a::3b6/126 + bp_interface: + ipv6: fc00:b::ee/64 + + ARISTA237T1: + properties: + - common + bgp: + router-id: 0.12.0.239 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ef::1/128 + Ethernet1: + ipv6: fc00:a::3ba/126 + bp_interface: + ipv6: fc00:b::ef/64 + + ARISTA238T1: + properties: + - common + bgp: + router-id: 0.12.0.240 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3bd + interfaces: + Loopback0: + ipv6: fc00:c:c:f0::1/128 + Ethernet1: + ipv6: fc00:a::3be/126 + bp_interface: + ipv6: fc00:b::f0/64 + + ARISTA239T1: + properties: + - common + bgp: + router-id: 0.12.0.241 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f1::1/128 + Ethernet1: + ipv6: fc00:a::3c2/126 + bp_interface: + ipv6: fc00:b::f1/64 + + ARISTA240T1: + properties: + - common + bgp: + router-id: 0.12.0.242 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f2::1/128 + Ethernet1: + ipv6: fc00:a::3c6/126 + bp_interface: + ipv6: fc00:b::f2/64 + + ARISTA241T1: + properties: + - common + bgp: + router-id: 0.12.0.243 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f3::1/128 + Ethernet1: + ipv6: fc00:a::3ca/126 + bp_interface: + ipv6: fc00:b::f3/64 + + ARISTA242T1: + properties: + - common + bgp: + router-id: 0.12.0.244 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3cd + interfaces: + Loopback0: + ipv6: fc00:c:c:f4::1/128 + Ethernet1: + ipv6: fc00:a::3ce/126 + bp_interface: + ipv6: fc00:b::f4/64 + + ARISTA243T1: + properties: + - common + bgp: + router-id: 0.12.0.245 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f5::1/128 + Ethernet1: + ipv6: fc00:a::3d2/126 + bp_interface: + ipv6: fc00:b::f5/64 + + ARISTA244T1: + properties: + - common + bgp: + router-id: 0.12.0.246 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f6::1/128 + Ethernet1: + ipv6: fc00:a::3d6/126 + bp_interface: + ipv6: fc00:b::f6/64 + + ARISTA245T1: + properties: + - common + bgp: + router-id: 0.12.0.247 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f7::1/128 + Ethernet1: + ipv6: fc00:a::3da/126 + bp_interface: + ipv6: fc00:b::f7/64 + + ARISTA246T1: + properties: + - common + bgp: + router-id: 0.12.0.248 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3dd + interfaces: + Loopback0: + ipv6: fc00:c:c:f8::1/128 + Ethernet1: + ipv6: fc00:a::3de/126 + bp_interface: + ipv6: fc00:b::f8/64 + + ARISTA247T1: + properties: + - common + bgp: + router-id: 0.12.0.249 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f9::1/128 + Ethernet1: + ipv6: fc00:a::3e2/126 + bp_interface: + ipv6: fc00:b::f9/64 + + ARISTA248T1: + properties: + - common + bgp: + router-id: 0.12.0.250 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fa::1/128 + Ethernet1: + ipv6: fc00:a::3e6/126 + bp_interface: + ipv6: fc00:b::fa/64 + + ARISTA249T1: + properties: + - common + bgp: + router-id: 0.12.0.251 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:fb::1/128 + Ethernet1: + ipv6: fc00:a::3ea/126 + bp_interface: + ipv6: fc00:b::fb/64 + + ARISTA250T1: + properties: + - common + bgp: + router-id: 0.12.0.252 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3ed + interfaces: + Loopback0: + ipv6: fc00:c:c:fc::1/128 + Ethernet1: + ipv6: fc00:a::3ee/126 + bp_interface: + ipv6: fc00:b::fc/64 + + ARISTA251T1: + properties: + - common + bgp: + router-id: 0.12.0.253 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:fd::1/128 + Ethernet1: + ipv6: fc00:a::3f2/126 + bp_interface: + ipv6: fc00:b::fd/64 + + ARISTA252T1: + properties: + - common + bgp: + router-id: 0.12.0.254 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fe::1/128 + Ethernet1: + ipv6: fc00:a::3f6/126 + bp_interface: + ipv6: fc00:b::fe/64 + + ARISTA253T1: + properties: + - common + bgp: + router-id: 0.12.0.255 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ff::1/128 + Ethernet1: + ipv6: fc00:a::3fa/126 + bp_interface: + ipv6: fc00:b::ff/64 + + ARISTA254T1: + properties: + - common + bgp: + router-id: 0.12.1.0 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3fd + interfaces: + Loopback0: + ipv6: fc00:c:c:100::1/128 + Ethernet1: + ipv6: fc00:a::3fe/126 + bp_interface: + ipv6: fc00:b::100/64 diff --git a/ansible/vars/topo_t0-isolated-u510d2.yml b/ansible/vars/topo_t0-isolated-u510d2.yml new file mode 100644 index 00000000000..ebd4e49d896 --- /dev/null +++ b/ansible/vars/topo_t0-isolated-u510d2.yml @@ -0,0 +1,10746 @@ +topology: + host_interfaces: + - 0 + - 1 + VMs: + ARISTA01T1: + vlans: + - 2 + vm_offset: 0 + ARISTA02T1: + vlans: + - 3 + vm_offset: 1 + ARISTA03T1: + vlans: + - 4 + vm_offset: 2 + ARISTA04T1: + vlans: + - 5 + vm_offset: 3 + ARISTA05T1: + vlans: + - 6 + vm_offset: 4 + ARISTA06T1: + vlans: + - 7 + vm_offset: 5 + ARISTA07T1: + vlans: + - 8 + vm_offset: 6 + ARISTA08T1: + vlans: + - 9 + vm_offset: 7 + ARISTA09T1: + vlans: + - 10 + vm_offset: 8 + ARISTA10T1: + vlans: + - 11 + vm_offset: 9 + ARISTA11T1: + vlans: + - 12 + vm_offset: 10 + ARISTA12T1: + vlans: + - 13 + vm_offset: 11 + ARISTA13T1: + vlans: + - 14 + vm_offset: 12 + ARISTA14T1: + vlans: + - 15 + vm_offset: 13 + ARISTA15T1: + vlans: + - 16 + vm_offset: 14 + ARISTA16T1: + vlans: + - 17 + vm_offset: 15 + ARISTA17T1: + vlans: + - 18 + vm_offset: 16 + ARISTA18T1: + vlans: + - 19 + vm_offset: 17 + ARISTA19T1: + vlans: + - 20 + vm_offset: 18 + ARISTA20T1: + vlans: + - 21 + vm_offset: 19 + ARISTA21T1: + vlans: + - 22 + vm_offset: 20 + ARISTA22T1: + vlans: + - 23 + vm_offset: 21 + ARISTA23T1: + vlans: + - 24 + vm_offset: 22 + ARISTA24T1: + vlans: + - 25 + vm_offset: 23 + ARISTA25T1: + vlans: + - 26 + vm_offset: 24 + ARISTA26T1: + vlans: + - 27 + vm_offset: 25 + ARISTA27T1: + vlans: + - 28 + vm_offset: 26 + ARISTA28T1: + vlans: + - 29 + vm_offset: 27 + ARISTA29T1: + vlans: + - 30 + vm_offset: 28 + ARISTA30T1: + vlans: + - 31 + vm_offset: 29 + ARISTA31T1: + vlans: + - 32 + vm_offset: 30 + ARISTA32T1: + vlans: + - 33 + vm_offset: 31 + ARISTA33T1: + vlans: + - 34 + vm_offset: 32 + ARISTA34T1: + vlans: + - 35 + vm_offset: 33 + ARISTA35T1: + vlans: + - 36 + vm_offset: 34 + ARISTA36T1: + vlans: + - 37 + vm_offset: 35 + ARISTA37T1: + vlans: + - 38 + vm_offset: 36 + ARISTA38T1: + vlans: + - 39 + vm_offset: 37 + ARISTA39T1: + vlans: + - 40 + vm_offset: 38 + ARISTA40T1: + vlans: + - 41 + vm_offset: 39 + ARISTA41T1: + vlans: + - 42 + vm_offset: 40 + ARISTA42T1: + vlans: + - 43 + vm_offset: 41 + ARISTA43T1: + vlans: + - 44 + vm_offset: 42 + ARISTA44T1: + vlans: + - 45 + vm_offset: 43 + ARISTA45T1: + vlans: + - 46 + vm_offset: 44 + ARISTA46T1: + vlans: + - 47 + vm_offset: 45 + ARISTA47T1: + vlans: + - 48 + vm_offset: 46 + ARISTA48T1: + vlans: + - 49 + vm_offset: 47 + ARISTA49T1: + vlans: + - 50 + vm_offset: 48 + ARISTA50T1: + vlans: + - 51 + vm_offset: 49 + ARISTA51T1: + vlans: + - 52 + vm_offset: 50 + ARISTA52T1: + vlans: + - 53 + vm_offset: 51 + ARISTA53T1: + vlans: + - 54 + vm_offset: 52 + ARISTA54T1: + vlans: + - 55 + vm_offset: 53 + ARISTA55T1: + vlans: + - 56 + vm_offset: 54 + ARISTA56T1: + vlans: + - 57 + vm_offset: 55 + ARISTA57T1: + vlans: + - 58 + vm_offset: 56 + ARISTA58T1: + vlans: + - 59 + vm_offset: 57 + ARISTA59T1: + vlans: + - 60 + vm_offset: 58 + ARISTA60T1: + vlans: + - 61 + vm_offset: 59 + ARISTA61T1: + vlans: + - 62 + vm_offset: 60 + ARISTA62T1: + vlans: + - 63 + vm_offset: 61 + ARISTA63T1: + vlans: + - 64 + vm_offset: 62 + ARISTA64T1: + vlans: + - 65 + vm_offset: 63 + ARISTA65T1: + vlans: + - 66 + vm_offset: 64 + ARISTA66T1: + vlans: + - 67 + vm_offset: 65 + ARISTA67T1: + vlans: + - 68 + vm_offset: 66 + ARISTA68T1: + vlans: + - 69 + vm_offset: 67 + ARISTA69T1: + vlans: + - 70 + vm_offset: 68 + ARISTA70T1: + vlans: + - 71 + vm_offset: 69 + ARISTA71T1: + vlans: + - 72 + vm_offset: 70 + ARISTA72T1: + vlans: + - 73 + vm_offset: 71 + ARISTA73T1: + vlans: + - 74 + vm_offset: 72 + ARISTA74T1: + vlans: + - 75 + vm_offset: 73 + ARISTA75T1: + vlans: + - 76 + vm_offset: 74 + ARISTA76T1: + vlans: + - 77 + vm_offset: 75 + ARISTA77T1: + vlans: + - 78 + vm_offset: 76 + ARISTA78T1: + vlans: + - 79 + vm_offset: 77 + ARISTA79T1: + vlans: + - 80 + vm_offset: 78 + ARISTA80T1: + vlans: + - 81 + vm_offset: 79 + ARISTA81T1: + vlans: + - 82 + vm_offset: 80 + ARISTA82T1: + vlans: + - 83 + vm_offset: 81 + ARISTA83T1: + vlans: + - 84 + vm_offset: 82 + ARISTA84T1: + vlans: + - 85 + vm_offset: 83 + ARISTA85T1: + vlans: + - 86 + vm_offset: 84 + ARISTA86T1: + vlans: + - 87 + vm_offset: 85 + ARISTA87T1: + vlans: + - 88 + vm_offset: 86 + ARISTA88T1: + vlans: + - 89 + vm_offset: 87 + ARISTA89T1: + vlans: + - 90 + vm_offset: 88 + ARISTA90T1: + vlans: + - 91 + vm_offset: 89 + ARISTA91T1: + vlans: + - 92 + vm_offset: 90 + ARISTA92T1: + vlans: + - 93 + vm_offset: 91 + ARISTA93T1: + vlans: + - 94 + vm_offset: 92 + ARISTA94T1: + vlans: + - 95 + vm_offset: 93 + ARISTA95T1: + vlans: + - 96 + vm_offset: 94 + ARISTA96T1: + vlans: + - 97 + vm_offset: 95 + ARISTA97T1: + vlans: + - 98 + vm_offset: 96 + ARISTA98T1: + vlans: + - 99 + vm_offset: 97 + ARISTA99T1: + vlans: + - 100 + vm_offset: 98 + ARISTA100T1: + vlans: + - 101 + vm_offset: 99 + ARISTA101T1: + vlans: + - 102 + vm_offset: 100 + ARISTA102T1: + vlans: + - 103 + vm_offset: 101 + ARISTA103T1: + vlans: + - 104 + vm_offset: 102 + ARISTA104T1: + vlans: + - 105 + vm_offset: 103 + ARISTA105T1: + vlans: + - 106 + vm_offset: 104 + ARISTA106T1: + vlans: + - 107 + vm_offset: 105 + ARISTA107T1: + vlans: + - 108 + vm_offset: 106 + ARISTA108T1: + vlans: + - 109 + vm_offset: 107 + ARISTA109T1: + vlans: + - 110 + vm_offset: 108 + ARISTA110T1: + vlans: + - 111 + vm_offset: 109 + ARISTA111T1: + vlans: + - 112 + vm_offset: 110 + ARISTA112T1: + vlans: + - 113 + vm_offset: 111 + ARISTA113T1: + vlans: + - 114 + vm_offset: 112 + ARISTA114T1: + vlans: + - 115 + vm_offset: 113 + ARISTA115T1: + vlans: + - 116 + vm_offset: 114 + ARISTA116T1: + vlans: + - 117 + vm_offset: 115 + ARISTA117T1: + vlans: + - 118 + vm_offset: 116 + ARISTA118T1: + vlans: + - 119 + vm_offset: 117 + ARISTA119T1: + vlans: + - 120 + vm_offset: 118 + ARISTA120T1: + vlans: + - 121 + vm_offset: 119 + ARISTA121T1: + vlans: + - 122 + vm_offset: 120 + ARISTA122T1: + vlans: + - 123 + vm_offset: 121 + ARISTA123T1: + vlans: + - 124 + vm_offset: 122 + ARISTA124T1: + vlans: + - 125 + vm_offset: 123 + ARISTA125T1: + vlans: + - 126 + vm_offset: 124 + ARISTA126T1: + vlans: + - 127 + vm_offset: 125 + ARISTA127T1: + vlans: + - 128 + vm_offset: 126 + ARISTA128T1: + vlans: + - 129 + vm_offset: 127 + ARISTA129T1: + vlans: + - 130 + vm_offset: 128 + ARISTA130T1: + vlans: + - 131 + vm_offset: 129 + ARISTA131T1: + vlans: + - 132 + vm_offset: 130 + ARISTA132T1: + vlans: + - 133 + vm_offset: 131 + ARISTA133T1: + vlans: + - 134 + vm_offset: 132 + ARISTA134T1: + vlans: + - 135 + vm_offset: 133 + ARISTA135T1: + vlans: + - 136 + vm_offset: 134 + ARISTA136T1: + vlans: + - 137 + vm_offset: 135 + ARISTA137T1: + vlans: + - 138 + vm_offset: 136 + ARISTA138T1: + vlans: + - 139 + vm_offset: 137 + ARISTA139T1: + vlans: + - 140 + vm_offset: 138 + ARISTA140T1: + vlans: + - 141 + vm_offset: 139 + ARISTA141T1: + vlans: + - 142 + vm_offset: 140 + ARISTA142T1: + vlans: + - 143 + vm_offset: 141 + ARISTA143T1: + vlans: + - 144 + vm_offset: 142 + ARISTA144T1: + vlans: + - 145 + vm_offset: 143 + ARISTA145T1: + vlans: + - 146 + vm_offset: 144 + ARISTA146T1: + vlans: + - 147 + vm_offset: 145 + ARISTA147T1: + vlans: + - 148 + vm_offset: 146 + ARISTA148T1: + vlans: + - 149 + vm_offset: 147 + ARISTA149T1: + vlans: + - 150 + vm_offset: 148 + ARISTA150T1: + vlans: + - 151 + vm_offset: 149 + ARISTA151T1: + vlans: + - 152 + vm_offset: 150 + ARISTA152T1: + vlans: + - 153 + vm_offset: 151 + ARISTA153T1: + vlans: + - 154 + vm_offset: 152 + ARISTA154T1: + vlans: + - 155 + vm_offset: 153 + ARISTA155T1: + vlans: + - 156 + vm_offset: 154 + ARISTA156T1: + vlans: + - 157 + vm_offset: 155 + ARISTA157T1: + vlans: + - 158 + vm_offset: 156 + ARISTA158T1: + vlans: + - 159 + vm_offset: 157 + ARISTA159T1: + vlans: + - 160 + vm_offset: 158 + ARISTA160T1: + vlans: + - 161 + vm_offset: 159 + ARISTA161T1: + vlans: + - 162 + vm_offset: 160 + ARISTA162T1: + vlans: + - 163 + vm_offset: 161 + ARISTA163T1: + vlans: + - 164 + vm_offset: 162 + ARISTA164T1: + vlans: + - 165 + vm_offset: 163 + ARISTA165T1: + vlans: + - 166 + vm_offset: 164 + ARISTA166T1: + vlans: + - 167 + vm_offset: 165 + ARISTA167T1: + vlans: + - 168 + vm_offset: 166 + ARISTA168T1: + vlans: + - 169 + vm_offset: 167 + ARISTA169T1: + vlans: + - 170 + vm_offset: 168 + ARISTA170T1: + vlans: + - 171 + vm_offset: 169 + ARISTA171T1: + vlans: + - 172 + vm_offset: 170 + ARISTA172T1: + vlans: + - 173 + vm_offset: 171 + ARISTA173T1: + vlans: + - 174 + vm_offset: 172 + ARISTA174T1: + vlans: + - 175 + vm_offset: 173 + ARISTA175T1: + vlans: + - 176 + vm_offset: 174 + ARISTA176T1: + vlans: + - 177 + vm_offset: 175 + ARISTA177T1: + vlans: + - 178 + vm_offset: 176 + ARISTA178T1: + vlans: + - 179 + vm_offset: 177 + ARISTA179T1: + vlans: + - 180 + vm_offset: 178 + ARISTA180T1: + vlans: + - 181 + vm_offset: 179 + ARISTA181T1: + vlans: + - 182 + vm_offset: 180 + ARISTA182T1: + vlans: + - 183 + vm_offset: 181 + ARISTA183T1: + vlans: + - 184 + vm_offset: 182 + ARISTA184T1: + vlans: + - 185 + vm_offset: 183 + ARISTA185T1: + vlans: + - 186 + vm_offset: 184 + ARISTA186T1: + vlans: + - 187 + vm_offset: 185 + ARISTA187T1: + vlans: + - 188 + vm_offset: 186 + ARISTA188T1: + vlans: + - 189 + vm_offset: 187 + ARISTA189T1: + vlans: + - 190 + vm_offset: 188 + ARISTA190T1: + vlans: + - 191 + vm_offset: 189 + ARISTA191T1: + vlans: + - 192 + vm_offset: 190 + ARISTA192T1: + vlans: + - 193 + vm_offset: 191 + ARISTA193T1: + vlans: + - 194 + vm_offset: 192 + ARISTA194T1: + vlans: + - 195 + vm_offset: 193 + ARISTA195T1: + vlans: + - 196 + vm_offset: 194 + ARISTA196T1: + vlans: + - 197 + vm_offset: 195 + ARISTA197T1: + vlans: + - 198 + vm_offset: 196 + ARISTA198T1: + vlans: + - 199 + vm_offset: 197 + ARISTA199T1: + vlans: + - 200 + vm_offset: 198 + ARISTA200T1: + vlans: + - 201 + vm_offset: 199 + ARISTA201T1: + vlans: + - 202 + vm_offset: 200 + ARISTA202T1: + vlans: + - 203 + vm_offset: 201 + ARISTA203T1: + vlans: + - 204 + vm_offset: 202 + ARISTA204T1: + vlans: + - 205 + vm_offset: 203 + ARISTA205T1: + vlans: + - 206 + vm_offset: 204 + ARISTA206T1: + vlans: + - 207 + vm_offset: 205 + ARISTA207T1: + vlans: + - 208 + vm_offset: 206 + ARISTA208T1: + vlans: + - 209 + vm_offset: 207 + ARISTA209T1: + vlans: + - 210 + vm_offset: 208 + ARISTA210T1: + vlans: + - 211 + vm_offset: 209 + ARISTA211T1: + vlans: + - 212 + vm_offset: 210 + ARISTA212T1: + vlans: + - 213 + vm_offset: 211 + ARISTA213T1: + vlans: + - 214 + vm_offset: 212 + ARISTA214T1: + vlans: + - 215 + vm_offset: 213 + ARISTA215T1: + vlans: + - 216 + vm_offset: 214 + ARISTA216T1: + vlans: + - 217 + vm_offset: 215 + ARISTA217T1: + vlans: + - 218 + vm_offset: 216 + ARISTA218T1: + vlans: + - 219 + vm_offset: 217 + ARISTA219T1: + vlans: + - 220 + vm_offset: 218 + ARISTA220T1: + vlans: + - 221 + vm_offset: 219 + ARISTA221T1: + vlans: + - 222 + vm_offset: 220 + ARISTA222T1: + vlans: + - 223 + vm_offset: 221 + ARISTA223T1: + vlans: + - 224 + vm_offset: 222 + ARISTA224T1: + vlans: + - 225 + vm_offset: 223 + ARISTA225T1: + vlans: + - 226 + vm_offset: 224 + ARISTA226T1: + vlans: + - 227 + vm_offset: 225 + ARISTA227T1: + vlans: + - 228 + vm_offset: 226 + ARISTA228T1: + vlans: + - 229 + vm_offset: 227 + ARISTA229T1: + vlans: + - 230 + vm_offset: 228 + ARISTA230T1: + vlans: + - 231 + vm_offset: 229 + ARISTA231T1: + vlans: + - 232 + vm_offset: 230 + ARISTA232T1: + vlans: + - 233 + vm_offset: 231 + ARISTA233T1: + vlans: + - 234 + vm_offset: 232 + ARISTA234T1: + vlans: + - 235 + vm_offset: 233 + ARISTA235T1: + vlans: + - 236 + vm_offset: 234 + ARISTA236T1: + vlans: + - 237 + vm_offset: 235 + ARISTA237T1: + vlans: + - 238 + vm_offset: 236 + ARISTA238T1: + vlans: + - 239 + vm_offset: 237 + ARISTA239T1: + vlans: + - 240 + vm_offset: 238 + ARISTA240T1: + vlans: + - 241 + vm_offset: 239 + ARISTA241T1: + vlans: + - 242 + vm_offset: 240 + ARISTA242T1: + vlans: + - 243 + vm_offset: 241 + ARISTA243T1: + vlans: + - 244 + vm_offset: 242 + ARISTA244T1: + vlans: + - 245 + vm_offset: 243 + ARISTA245T1: + vlans: + - 246 + vm_offset: 244 + ARISTA246T1: + vlans: + - 247 + vm_offset: 245 + ARISTA247T1: + vlans: + - 248 + vm_offset: 246 + ARISTA248T1: + vlans: + - 249 + vm_offset: 247 + ARISTA249T1: + vlans: + - 250 + vm_offset: 248 + ARISTA250T1: + vlans: + - 251 + vm_offset: 249 + ARISTA251T1: + vlans: + - 252 + vm_offset: 250 + ARISTA252T1: + vlans: + - 253 + vm_offset: 251 + ARISTA253T1: + vlans: + - 254 + vm_offset: 252 + ARISTA254T1: + vlans: + - 255 + vm_offset: 253 + ARISTA255T1: + vlans: + - 256 + vm_offset: 254 + ARISTA256T1: + vlans: + - 257 + vm_offset: 255 + ARISTA257T1: + vlans: + - 258 + vm_offset: 256 + ARISTA258T1: + vlans: + - 259 + vm_offset: 257 + ARISTA259T1: + vlans: + - 260 + vm_offset: 258 + ARISTA260T1: + vlans: + - 261 + vm_offset: 259 + ARISTA261T1: + vlans: + - 262 + vm_offset: 260 + ARISTA262T1: + vlans: + - 263 + vm_offset: 261 + ARISTA263T1: + vlans: + - 264 + vm_offset: 262 + ARISTA264T1: + vlans: + - 265 + vm_offset: 263 + ARISTA265T1: + vlans: + - 266 + vm_offset: 264 + ARISTA266T1: + vlans: + - 267 + vm_offset: 265 + ARISTA267T1: + vlans: + - 268 + vm_offset: 266 + ARISTA268T1: + vlans: + - 269 + vm_offset: 267 + ARISTA269T1: + vlans: + - 270 + vm_offset: 268 + ARISTA270T1: + vlans: + - 271 + vm_offset: 269 + ARISTA271T1: + vlans: + - 272 + vm_offset: 270 + ARISTA272T1: + vlans: + - 273 + vm_offset: 271 + ARISTA273T1: + vlans: + - 274 + vm_offset: 272 + ARISTA274T1: + vlans: + - 275 + vm_offset: 273 + ARISTA275T1: + vlans: + - 276 + vm_offset: 274 + ARISTA276T1: + vlans: + - 277 + vm_offset: 275 + ARISTA277T1: + vlans: + - 278 + vm_offset: 276 + ARISTA278T1: + vlans: + - 279 + vm_offset: 277 + ARISTA279T1: + vlans: + - 280 + vm_offset: 278 + ARISTA280T1: + vlans: + - 281 + vm_offset: 279 + ARISTA281T1: + vlans: + - 282 + vm_offset: 280 + ARISTA282T1: + vlans: + - 283 + vm_offset: 281 + ARISTA283T1: + vlans: + - 284 + vm_offset: 282 + ARISTA284T1: + vlans: + - 285 + vm_offset: 283 + ARISTA285T1: + vlans: + - 286 + vm_offset: 284 + ARISTA286T1: + vlans: + - 287 + vm_offset: 285 + ARISTA287T1: + vlans: + - 288 + vm_offset: 286 + ARISTA288T1: + vlans: + - 289 + vm_offset: 287 + ARISTA289T1: + vlans: + - 290 + vm_offset: 288 + ARISTA290T1: + vlans: + - 291 + vm_offset: 289 + ARISTA291T1: + vlans: + - 292 + vm_offset: 290 + ARISTA292T1: + vlans: + - 293 + vm_offset: 291 + ARISTA293T1: + vlans: + - 294 + vm_offset: 292 + ARISTA294T1: + vlans: + - 295 + vm_offset: 293 + ARISTA295T1: + vlans: + - 296 + vm_offset: 294 + ARISTA296T1: + vlans: + - 297 + vm_offset: 295 + ARISTA297T1: + vlans: + - 298 + vm_offset: 296 + ARISTA298T1: + vlans: + - 299 + vm_offset: 297 + ARISTA299T1: + vlans: + - 300 + vm_offset: 298 + ARISTA300T1: + vlans: + - 301 + vm_offset: 299 + ARISTA301T1: + vlans: + - 302 + vm_offset: 300 + ARISTA302T1: + vlans: + - 303 + vm_offset: 301 + ARISTA303T1: + vlans: + - 304 + vm_offset: 302 + ARISTA304T1: + vlans: + - 305 + vm_offset: 303 + ARISTA305T1: + vlans: + - 306 + vm_offset: 304 + ARISTA306T1: + vlans: + - 307 + vm_offset: 305 + ARISTA307T1: + vlans: + - 308 + vm_offset: 306 + ARISTA308T1: + vlans: + - 309 + vm_offset: 307 + ARISTA309T1: + vlans: + - 310 + vm_offset: 308 + ARISTA310T1: + vlans: + - 311 + vm_offset: 309 + ARISTA311T1: + vlans: + - 312 + vm_offset: 310 + ARISTA312T1: + vlans: + - 313 + vm_offset: 311 + ARISTA313T1: + vlans: + - 314 + vm_offset: 312 + ARISTA314T1: + vlans: + - 315 + vm_offset: 313 + ARISTA315T1: + vlans: + - 316 + vm_offset: 314 + ARISTA316T1: + vlans: + - 317 + vm_offset: 315 + ARISTA317T1: + vlans: + - 318 + vm_offset: 316 + ARISTA318T1: + vlans: + - 319 + vm_offset: 317 + ARISTA319T1: + vlans: + - 320 + vm_offset: 318 + ARISTA320T1: + vlans: + - 321 + vm_offset: 319 + ARISTA321T1: + vlans: + - 322 + vm_offset: 320 + ARISTA322T1: + vlans: + - 323 + vm_offset: 321 + ARISTA323T1: + vlans: + - 324 + vm_offset: 322 + ARISTA324T1: + vlans: + - 325 + vm_offset: 323 + ARISTA325T1: + vlans: + - 326 + vm_offset: 324 + ARISTA326T1: + vlans: + - 327 + vm_offset: 325 + ARISTA327T1: + vlans: + - 328 + vm_offset: 326 + ARISTA328T1: + vlans: + - 329 + vm_offset: 327 + ARISTA329T1: + vlans: + - 330 + vm_offset: 328 + ARISTA330T1: + vlans: + - 331 + vm_offset: 329 + ARISTA331T1: + vlans: + - 332 + vm_offset: 330 + ARISTA332T1: + vlans: + - 333 + vm_offset: 331 + ARISTA333T1: + vlans: + - 334 + vm_offset: 332 + ARISTA334T1: + vlans: + - 335 + vm_offset: 333 + ARISTA335T1: + vlans: + - 336 + vm_offset: 334 + ARISTA336T1: + vlans: + - 337 + vm_offset: 335 + ARISTA337T1: + vlans: + - 338 + vm_offset: 336 + ARISTA338T1: + vlans: + - 339 + vm_offset: 337 + ARISTA339T1: + vlans: + - 340 + vm_offset: 338 + ARISTA340T1: + vlans: + - 341 + vm_offset: 339 + ARISTA341T1: + vlans: + - 342 + vm_offset: 340 + ARISTA342T1: + vlans: + - 343 + vm_offset: 341 + ARISTA343T1: + vlans: + - 344 + vm_offset: 342 + ARISTA344T1: + vlans: + - 345 + vm_offset: 343 + ARISTA345T1: + vlans: + - 346 + vm_offset: 344 + ARISTA346T1: + vlans: + - 347 + vm_offset: 345 + ARISTA347T1: + vlans: + - 348 + vm_offset: 346 + ARISTA348T1: + vlans: + - 349 + vm_offset: 347 + ARISTA349T1: + vlans: + - 350 + vm_offset: 348 + ARISTA350T1: + vlans: + - 351 + vm_offset: 349 + ARISTA351T1: + vlans: + - 352 + vm_offset: 350 + ARISTA352T1: + vlans: + - 353 + vm_offset: 351 + ARISTA353T1: + vlans: + - 354 + vm_offset: 352 + ARISTA354T1: + vlans: + - 355 + vm_offset: 353 + ARISTA355T1: + vlans: + - 356 + vm_offset: 354 + ARISTA356T1: + vlans: + - 357 + vm_offset: 355 + ARISTA357T1: + vlans: + - 358 + vm_offset: 356 + ARISTA358T1: + vlans: + - 359 + vm_offset: 357 + ARISTA359T1: + vlans: + - 360 + vm_offset: 358 + ARISTA360T1: + vlans: + - 361 + vm_offset: 359 + ARISTA361T1: + vlans: + - 362 + vm_offset: 360 + ARISTA362T1: + vlans: + - 363 + vm_offset: 361 + ARISTA363T1: + vlans: + - 364 + vm_offset: 362 + ARISTA364T1: + vlans: + - 365 + vm_offset: 363 + ARISTA365T1: + vlans: + - 366 + vm_offset: 364 + ARISTA366T1: + vlans: + - 367 + vm_offset: 365 + ARISTA367T1: + vlans: + - 368 + vm_offset: 366 + ARISTA368T1: + vlans: + - 369 + vm_offset: 367 + ARISTA369T1: + vlans: + - 370 + vm_offset: 368 + ARISTA370T1: + vlans: + - 371 + vm_offset: 369 + ARISTA371T1: + vlans: + - 372 + vm_offset: 370 + ARISTA372T1: + vlans: + - 373 + vm_offset: 371 + ARISTA373T1: + vlans: + - 374 + vm_offset: 372 + ARISTA374T1: + vlans: + - 375 + vm_offset: 373 + ARISTA375T1: + vlans: + - 376 + vm_offset: 374 + ARISTA376T1: + vlans: + - 377 + vm_offset: 375 + ARISTA377T1: + vlans: + - 378 + vm_offset: 376 + ARISTA378T1: + vlans: + - 379 + vm_offset: 377 + ARISTA379T1: + vlans: + - 380 + vm_offset: 378 + ARISTA380T1: + vlans: + - 381 + vm_offset: 379 + ARISTA381T1: + vlans: + - 382 + vm_offset: 380 + ARISTA382T1: + vlans: + - 383 + vm_offset: 381 + ARISTA383T1: + vlans: + - 384 + vm_offset: 382 + ARISTA384T1: + vlans: + - 385 + vm_offset: 383 + ARISTA385T1: + vlans: + - 386 + vm_offset: 384 + ARISTA386T1: + vlans: + - 387 + vm_offset: 385 + ARISTA387T1: + vlans: + - 388 + vm_offset: 386 + ARISTA388T1: + vlans: + - 389 + vm_offset: 387 + ARISTA389T1: + vlans: + - 390 + vm_offset: 388 + ARISTA390T1: + vlans: + - 391 + vm_offset: 389 + ARISTA391T1: + vlans: + - 392 + vm_offset: 390 + ARISTA392T1: + vlans: + - 393 + vm_offset: 391 + ARISTA393T1: + vlans: + - 394 + vm_offset: 392 + ARISTA394T1: + vlans: + - 395 + vm_offset: 393 + ARISTA395T1: + vlans: + - 396 + vm_offset: 394 + ARISTA396T1: + vlans: + - 397 + vm_offset: 395 + ARISTA397T1: + vlans: + - 398 + vm_offset: 396 + ARISTA398T1: + vlans: + - 399 + vm_offset: 397 + ARISTA399T1: + vlans: + - 400 + vm_offset: 398 + ARISTA400T1: + vlans: + - 401 + vm_offset: 399 + ARISTA401T1: + vlans: + - 402 + vm_offset: 400 + ARISTA402T1: + vlans: + - 403 + vm_offset: 401 + ARISTA403T1: + vlans: + - 404 + vm_offset: 402 + ARISTA404T1: + vlans: + - 405 + vm_offset: 403 + ARISTA405T1: + vlans: + - 406 + vm_offset: 404 + ARISTA406T1: + vlans: + - 407 + vm_offset: 405 + ARISTA407T1: + vlans: + - 408 + vm_offset: 406 + ARISTA408T1: + vlans: + - 409 + vm_offset: 407 + ARISTA409T1: + vlans: + - 410 + vm_offset: 408 + ARISTA410T1: + vlans: + - 411 + vm_offset: 409 + ARISTA411T1: + vlans: + - 412 + vm_offset: 410 + ARISTA412T1: + vlans: + - 413 + vm_offset: 411 + ARISTA413T1: + vlans: + - 414 + vm_offset: 412 + ARISTA414T1: + vlans: + - 415 + vm_offset: 413 + ARISTA415T1: + vlans: + - 416 + vm_offset: 414 + ARISTA416T1: + vlans: + - 417 + vm_offset: 415 + ARISTA417T1: + vlans: + - 418 + vm_offset: 416 + ARISTA418T1: + vlans: + - 419 + vm_offset: 417 + ARISTA419T1: + vlans: + - 420 + vm_offset: 418 + ARISTA420T1: + vlans: + - 421 + vm_offset: 419 + ARISTA421T1: + vlans: + - 422 + vm_offset: 420 + ARISTA422T1: + vlans: + - 423 + vm_offset: 421 + ARISTA423T1: + vlans: + - 424 + vm_offset: 422 + ARISTA424T1: + vlans: + - 425 + vm_offset: 423 + ARISTA425T1: + vlans: + - 426 + vm_offset: 424 + ARISTA426T1: + vlans: + - 427 + vm_offset: 425 + ARISTA427T1: + vlans: + - 428 + vm_offset: 426 + ARISTA428T1: + vlans: + - 429 + vm_offset: 427 + ARISTA429T1: + vlans: + - 430 + vm_offset: 428 + ARISTA430T1: + vlans: + - 431 + vm_offset: 429 + ARISTA431T1: + vlans: + - 432 + vm_offset: 430 + ARISTA432T1: + vlans: + - 433 + vm_offset: 431 + ARISTA433T1: + vlans: + - 434 + vm_offset: 432 + ARISTA434T1: + vlans: + - 435 + vm_offset: 433 + ARISTA435T1: + vlans: + - 436 + vm_offset: 434 + ARISTA436T1: + vlans: + - 437 + vm_offset: 435 + ARISTA437T1: + vlans: + - 438 + vm_offset: 436 + ARISTA438T1: + vlans: + - 439 + vm_offset: 437 + ARISTA439T1: + vlans: + - 440 + vm_offset: 438 + ARISTA440T1: + vlans: + - 441 + vm_offset: 439 + ARISTA441T1: + vlans: + - 442 + vm_offset: 440 + ARISTA442T1: + vlans: + - 443 + vm_offset: 441 + ARISTA443T1: + vlans: + - 444 + vm_offset: 442 + ARISTA444T1: + vlans: + - 445 + vm_offset: 443 + ARISTA445T1: + vlans: + - 446 + vm_offset: 444 + ARISTA446T1: + vlans: + - 447 + vm_offset: 445 + ARISTA447T1: + vlans: + - 448 + vm_offset: 446 + ARISTA448T1: + vlans: + - 449 + vm_offset: 447 + ARISTA449T1: + vlans: + - 450 + vm_offset: 448 + ARISTA450T1: + vlans: + - 451 + vm_offset: 449 + ARISTA451T1: + vlans: + - 452 + vm_offset: 450 + ARISTA452T1: + vlans: + - 453 + vm_offset: 451 + ARISTA453T1: + vlans: + - 454 + vm_offset: 452 + ARISTA454T1: + vlans: + - 455 + vm_offset: 453 + ARISTA455T1: + vlans: + - 456 + vm_offset: 454 + ARISTA456T1: + vlans: + - 457 + vm_offset: 455 + ARISTA457T1: + vlans: + - 458 + vm_offset: 456 + ARISTA458T1: + vlans: + - 459 + vm_offset: 457 + ARISTA459T1: + vlans: + - 460 + vm_offset: 458 + ARISTA460T1: + vlans: + - 461 + vm_offset: 459 + ARISTA461T1: + vlans: + - 462 + vm_offset: 460 + ARISTA462T1: + vlans: + - 463 + vm_offset: 461 + ARISTA463T1: + vlans: + - 464 + vm_offset: 462 + ARISTA464T1: + vlans: + - 465 + vm_offset: 463 + ARISTA465T1: + vlans: + - 466 + vm_offset: 464 + ARISTA466T1: + vlans: + - 467 + vm_offset: 465 + ARISTA467T1: + vlans: + - 468 + vm_offset: 466 + ARISTA468T1: + vlans: + - 469 + vm_offset: 467 + ARISTA469T1: + vlans: + - 470 + vm_offset: 468 + ARISTA470T1: + vlans: + - 471 + vm_offset: 469 + ARISTA471T1: + vlans: + - 472 + vm_offset: 470 + ARISTA472T1: + vlans: + - 473 + vm_offset: 471 + ARISTA473T1: + vlans: + - 474 + vm_offset: 472 + ARISTA474T1: + vlans: + - 475 + vm_offset: 473 + ARISTA475T1: + vlans: + - 476 + vm_offset: 474 + ARISTA476T1: + vlans: + - 477 + vm_offset: 475 + ARISTA477T1: + vlans: + - 478 + vm_offset: 476 + ARISTA478T1: + vlans: + - 479 + vm_offset: 477 + ARISTA479T1: + vlans: + - 480 + vm_offset: 478 + ARISTA480T1: + vlans: + - 481 + vm_offset: 479 + ARISTA481T1: + vlans: + - 482 + vm_offset: 480 + ARISTA482T1: + vlans: + - 483 + vm_offset: 481 + ARISTA483T1: + vlans: + - 484 + vm_offset: 482 + ARISTA484T1: + vlans: + - 485 + vm_offset: 483 + ARISTA485T1: + vlans: + - 486 + vm_offset: 484 + ARISTA486T1: + vlans: + - 487 + vm_offset: 485 + ARISTA487T1: + vlans: + - 488 + vm_offset: 486 + ARISTA488T1: + vlans: + - 489 + vm_offset: 487 + ARISTA489T1: + vlans: + - 490 + vm_offset: 488 + ARISTA490T1: + vlans: + - 491 + vm_offset: 489 + ARISTA491T1: + vlans: + - 492 + vm_offset: 490 + ARISTA492T1: + vlans: + - 493 + vm_offset: 491 + ARISTA493T1: + vlans: + - 494 + vm_offset: 492 + ARISTA494T1: + vlans: + - 495 + vm_offset: 493 + ARISTA495T1: + vlans: + - 496 + vm_offset: 494 + ARISTA496T1: + vlans: + - 497 + vm_offset: 495 + ARISTA497T1: + vlans: + - 498 + vm_offset: 496 + ARISTA498T1: + vlans: + - 499 + vm_offset: 497 + ARISTA499T1: + vlans: + - 500 + vm_offset: 498 + ARISTA500T1: + vlans: + - 501 + vm_offset: 499 + ARISTA501T1: + vlans: + - 502 + vm_offset: 500 + ARISTA502T1: + vlans: + - 503 + vm_offset: 501 + ARISTA503T1: + vlans: + - 504 + vm_offset: 502 + ARISTA504T1: + vlans: + - 505 + vm_offset: 503 + ARISTA505T1: + vlans: + - 506 + vm_offset: 504 + ARISTA506T1: + vlans: + - 507 + vm_offset: 505 + ARISTA507T1: + vlans: + - 508 + vm_offset: 506 + ARISTA508T1: + vlans: + - 509 + vm_offset: 507 + ARISTA509T1: + vlans: + - 510 + vm_offset: 508 + ARISTA510T1: + vlans: + - 511 + vm_offset: 509 + DUT: + vlan_configs: + default_vlan_config: one_vlan_per_intf + one_vlan_per_intf: + Vlan1000: + id: 1000 + intfs: [0] + prefix_v6: fc00:c:c:0001::/64 + tag: 1000 + Vlan1001: + id: 1001 + intfs: [1] + prefix_v6: fc00:c:c:0002::/64 + tag: 1001 + +configuration_properties: + common: + dut_asn: 4200000000 + dut_type: ToRRouter + swrole: leaf + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + spine_asn: 4200200000 + leaf_asn_start: 4200100000 + tor_asn_start: 4200000000 + failure_rate: 0 + nhipv6: FC0A::FF + +configuration: + ARISTA01T1: + properties: + - common + bgp: + router-id: 0.12.0.3 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3::1/128 + Ethernet1: + ipv6: fc00:a::a/126 + bp_interface: + ipv6: fc00:b::3/64 + + ARISTA02T1: + properties: + - common + bgp: + router-id: 0.12.0.4 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d + interfaces: + Loopback0: + ipv6: fc00:c:c:4::1/128 + Ethernet1: + ipv6: fc00:a::e/126 + bp_interface: + ipv6: fc00:b::4/64 + + ARISTA03T1: + properties: + - common + bgp: + router-id: 0.12.0.5 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::11 + interfaces: + Loopback0: + ipv6: fc00:c:c:5::1/128 + Ethernet1: + ipv6: fc00:a::12/126 + bp_interface: + ipv6: fc00:b::5/64 + + ARISTA04T1: + properties: + - common + bgp: + router-id: 0.12.0.6 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::15 + interfaces: + Loopback0: + ipv6: fc00:c:c:6::1/128 + Ethernet1: + ipv6: fc00:a::16/126 + bp_interface: + ipv6: fc00:b::6/64 + + ARISTA05T1: + properties: + - common + bgp: + router-id: 0.12.0.7 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::19 + interfaces: + Loopback0: + ipv6: fc00:c:c:7::1/128 + Ethernet1: + ipv6: fc00:a::1a/126 + bp_interface: + ipv6: fc00:b::7/64 + + ARISTA06T1: + properties: + - common + bgp: + router-id: 0.12.0.8 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d + interfaces: + Loopback0: + ipv6: fc00:c:c:8::1/128 + Ethernet1: + ipv6: fc00:a::1e/126 + bp_interface: + ipv6: fc00:b::8/64 + + ARISTA07T1: + properties: + - common + bgp: + router-id: 0.12.0.9 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::21 + interfaces: + Loopback0: + ipv6: fc00:c:c:9::1/128 + Ethernet1: + ipv6: fc00:a::22/126 + bp_interface: + ipv6: fc00:b::9/64 + + ARISTA08T1: + properties: + - common + bgp: + router-id: 0.12.0.10 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::25 + interfaces: + Loopback0: + ipv6: fc00:c:c:a::1/128 + Ethernet1: + ipv6: fc00:a::26/126 + bp_interface: + ipv6: fc00:b::a/64 + + ARISTA09T1: + properties: + - common + bgp: + router-id: 0.12.0.11 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::29 + interfaces: + Loopback0: + ipv6: fc00:c:c:b::1/128 + Ethernet1: + ipv6: fc00:a::2a/126 + bp_interface: + ipv6: fc00:b::b/64 + + ARISTA10T1: + properties: + - common + bgp: + router-id: 0.12.0.12 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d + interfaces: + Loopback0: + ipv6: fc00:c:c:c::1/128 + Ethernet1: + ipv6: fc00:a::2e/126 + bp_interface: + ipv6: fc00:b::c/64 + + ARISTA11T1: + properties: + - common + bgp: + router-id: 0.12.0.13 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::31 + interfaces: + Loopback0: + ipv6: fc00:c:c:d::1/128 + Ethernet1: + ipv6: fc00:a::32/126 + bp_interface: + ipv6: fc00:b::d/64 + + ARISTA12T1: + properties: + - common + bgp: + router-id: 0.12.0.14 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::35 + interfaces: + Loopback0: + ipv6: fc00:c:c:e::1/128 + Ethernet1: + ipv6: fc00:a::36/126 + bp_interface: + ipv6: fc00:b::e/64 + + ARISTA13T1: + properties: + - common + bgp: + router-id: 0.12.0.15 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::39 + interfaces: + Loopback0: + ipv6: fc00:c:c:f::1/128 + Ethernet1: + ipv6: fc00:a::3a/126 + bp_interface: + ipv6: fc00:b::f/64 + + ARISTA14T1: + properties: + - common + bgp: + router-id: 0.12.0.16 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d + interfaces: + Loopback0: + ipv6: fc00:c:c:10::1/128 + Ethernet1: + ipv6: fc00:a::3e/126 + bp_interface: + ipv6: fc00:b::10/64 + + ARISTA15T1: + properties: + - common + bgp: + router-id: 0.12.0.17 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::41 + interfaces: + Loopback0: + ipv6: fc00:c:c:11::1/128 + Ethernet1: + ipv6: fc00:a::42/126 + bp_interface: + ipv6: fc00:b::11/64 + + ARISTA16T1: + properties: + - common + bgp: + router-id: 0.12.0.18 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::45 + interfaces: + Loopback0: + ipv6: fc00:c:c:12::1/128 + Ethernet1: + ipv6: fc00:a::46/126 + bp_interface: + ipv6: fc00:b::12/64 + + ARISTA17T1: + properties: + - common + bgp: + router-id: 0.12.0.19 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::49 + interfaces: + Loopback0: + ipv6: fc00:c:c:13::1/128 + Ethernet1: + ipv6: fc00:a::4a/126 + bp_interface: + ipv6: fc00:b::13/64 + + ARISTA18T1: + properties: + - common + bgp: + router-id: 0.12.0.20 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4d + interfaces: + Loopback0: + ipv6: fc00:c:c:14::1/128 + Ethernet1: + ipv6: fc00:a::4e/126 + bp_interface: + ipv6: fc00:b::14/64 + + ARISTA19T1: + properties: + - common + bgp: + router-id: 0.12.0.21 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::51 + interfaces: + Loopback0: + ipv6: fc00:c:c:15::1/128 + Ethernet1: + ipv6: fc00:a::52/126 + bp_interface: + ipv6: fc00:b::15/64 + + ARISTA20T1: + properties: + - common + bgp: + router-id: 0.12.0.22 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::55 + interfaces: + Loopback0: + ipv6: fc00:c:c:16::1/128 + Ethernet1: + ipv6: fc00:a::56/126 + bp_interface: + ipv6: fc00:b::16/64 + + ARISTA21T1: + properties: + - common + bgp: + router-id: 0.12.0.23 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::59 + interfaces: + Loopback0: + ipv6: fc00:c:c:17::1/128 + Ethernet1: + ipv6: fc00:a::5a/126 + bp_interface: + ipv6: fc00:b::17/64 + + ARISTA22T1: + properties: + - common + bgp: + router-id: 0.12.0.24 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5d + interfaces: + Loopback0: + ipv6: fc00:c:c:18::1/128 + Ethernet1: + ipv6: fc00:a::5e/126 + bp_interface: + ipv6: fc00:b::18/64 + + ARISTA23T1: + properties: + - common + bgp: + router-id: 0.12.0.25 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::61 + interfaces: + Loopback0: + ipv6: fc00:c:c:19::1/128 + Ethernet1: + ipv6: fc00:a::62/126 + bp_interface: + ipv6: fc00:b::19/64 + + ARISTA24T1: + properties: + - common + bgp: + router-id: 0.12.0.26 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::65 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a::1/128 + Ethernet1: + ipv6: fc00:a::66/126 + bp_interface: + ipv6: fc00:b::1a/64 + + ARISTA25T1: + properties: + - common + bgp: + router-id: 0.12.0.27 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::69 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b::1/128 + Ethernet1: + ipv6: fc00:a::6a/126 + bp_interface: + ipv6: fc00:b::1b/64 + + ARISTA26T1: + properties: + - common + bgp: + router-id: 0.12.0.28 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c::1/128 + Ethernet1: + ipv6: fc00:a::6e/126 + bp_interface: + ipv6: fc00:b::1c/64 + + ARISTA27T1: + properties: + - common + bgp: + router-id: 0.12.0.29 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::71 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d::1/128 + Ethernet1: + ipv6: fc00:a::72/126 + bp_interface: + ipv6: fc00:b::1d/64 + + ARISTA28T1: + properties: + - common + bgp: + router-id: 0.12.0.30 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::75 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e::1/128 + Ethernet1: + ipv6: fc00:a::76/126 + bp_interface: + ipv6: fc00:b::1e/64 + + ARISTA29T1: + properties: + - common + bgp: + router-id: 0.12.0.31 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::79 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f::1/128 + Ethernet1: + ipv6: fc00:a::7a/126 + bp_interface: + ipv6: fc00:b::1f/64 + + ARISTA30T1: + properties: + - common + bgp: + router-id: 0.12.0.32 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7d + interfaces: + Loopback0: + ipv6: fc00:c:c:20::1/128 + Ethernet1: + ipv6: fc00:a::7e/126 + bp_interface: + ipv6: fc00:b::20/64 + + ARISTA31T1: + properties: + - common + bgp: + router-id: 0.12.0.33 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::81 + interfaces: + Loopback0: + ipv6: fc00:c:c:21::1/128 + Ethernet1: + ipv6: fc00:a::82/126 + bp_interface: + ipv6: fc00:b::21/64 + + ARISTA32T1: + properties: + - common + bgp: + router-id: 0.12.0.34 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::85 + interfaces: + Loopback0: + ipv6: fc00:c:c:22::1/128 + Ethernet1: + ipv6: fc00:a::86/126 + bp_interface: + ipv6: fc00:b::22/64 + + ARISTA33T1: + properties: + - common + bgp: + router-id: 0.12.0.35 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::89 + interfaces: + Loopback0: + ipv6: fc00:c:c:23::1/128 + Ethernet1: + ipv6: fc00:a::8a/126 + bp_interface: + ipv6: fc00:b::23/64 + + ARISTA34T1: + properties: + - common + bgp: + router-id: 0.12.0.36 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::8d + interfaces: + Loopback0: + ipv6: fc00:c:c:24::1/128 + Ethernet1: + ipv6: fc00:a::8e/126 + bp_interface: + ipv6: fc00:b::24/64 + + ARISTA35T1: + properties: + - common + bgp: + router-id: 0.12.0.37 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::91 + interfaces: + Loopback0: + ipv6: fc00:c:c:25::1/128 + Ethernet1: + ipv6: fc00:a::92/126 + bp_interface: + ipv6: fc00:b::25/64 + + ARISTA36T1: + properties: + - common + bgp: + router-id: 0.12.0.38 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::95 + interfaces: + Loopback0: + ipv6: fc00:c:c:26::1/128 + Ethernet1: + ipv6: fc00:a::96/126 + bp_interface: + ipv6: fc00:b::26/64 + + ARISTA37T1: + properties: + - common + bgp: + router-id: 0.12.0.39 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::99 + interfaces: + Loopback0: + ipv6: fc00:c:c:27::1/128 + Ethernet1: + ipv6: fc00:a::9a/126 + bp_interface: + ipv6: fc00:b::27/64 + + ARISTA38T1: + properties: + - common + bgp: + router-id: 0.12.0.40 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::9d + interfaces: + Loopback0: + ipv6: fc00:c:c:28::1/128 + Ethernet1: + ipv6: fc00:a::9e/126 + bp_interface: + ipv6: fc00:b::28/64 + + ARISTA39T1: + properties: + - common + bgp: + router-id: 0.12.0.41 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:29::1/128 + Ethernet1: + ipv6: fc00:a::a2/126 + bp_interface: + ipv6: fc00:b::29/64 + + ARISTA40T1: + properties: + - common + bgp: + router-id: 0.12.0.42 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2a::1/128 + Ethernet1: + ipv6: fc00:a::a6/126 + bp_interface: + ipv6: fc00:b::2a/64 + + ARISTA41T1: + properties: + - common + bgp: + router-id: 0.12.0.43 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2b::1/128 + Ethernet1: + ipv6: fc00:a::aa/126 + bp_interface: + ipv6: fc00:b::2b/64 + + ARISTA42T1: + properties: + - common + bgp: + router-id: 0.12.0.44 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::ad + interfaces: + Loopback0: + ipv6: fc00:c:c:2c::1/128 + Ethernet1: + ipv6: fc00:a::ae/126 + bp_interface: + ipv6: fc00:b::2c/64 + + ARISTA43T1: + properties: + - common + bgp: + router-id: 0.12.0.45 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:2d::1/128 + Ethernet1: + ipv6: fc00:a::b2/126 + bp_interface: + ipv6: fc00:b::2d/64 + + ARISTA44T1: + properties: + - common + bgp: + router-id: 0.12.0.46 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2e::1/128 + Ethernet1: + ipv6: fc00:a::b6/126 + bp_interface: + ipv6: fc00:b::2e/64 + + ARISTA45T1: + properties: + - common + bgp: + router-id: 0.12.0.47 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2f::1/128 + Ethernet1: + ipv6: fc00:a::ba/126 + bp_interface: + ipv6: fc00:b::2f/64 + + ARISTA46T1: + properties: + - common + bgp: + router-id: 0.12.0.48 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::bd + interfaces: + Loopback0: + ipv6: fc00:c:c:30::1/128 + Ethernet1: + ipv6: fc00:a::be/126 + bp_interface: + ipv6: fc00:b::30/64 + + ARISTA47T1: + properties: + - common + bgp: + router-id: 0.12.0.49 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:31::1/128 + Ethernet1: + ipv6: fc00:a::c2/126 + bp_interface: + ipv6: fc00:b::31/64 + + ARISTA48T1: + properties: + - common + bgp: + router-id: 0.12.0.50 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:32::1/128 + Ethernet1: + ipv6: fc00:a::c6/126 + bp_interface: + ipv6: fc00:b::32/64 + + ARISTA49T1: + properties: + - common + bgp: + router-id: 0.12.0.51 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:33::1/128 + Ethernet1: + ipv6: fc00:a::ca/126 + bp_interface: + ipv6: fc00:b::33/64 + + ARISTA50T1: + properties: + - common + bgp: + router-id: 0.12.0.52 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::cd + interfaces: + Loopback0: + ipv6: fc00:c:c:34::1/128 + Ethernet1: + ipv6: fc00:a::ce/126 + bp_interface: + ipv6: fc00:b::34/64 + + ARISTA51T1: + properties: + - common + bgp: + router-id: 0.12.0.53 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:35::1/128 + Ethernet1: + ipv6: fc00:a::d2/126 + bp_interface: + ipv6: fc00:b::35/64 + + ARISTA52T1: + properties: + - common + bgp: + router-id: 0.12.0.54 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:36::1/128 + Ethernet1: + ipv6: fc00:a::d6/126 + bp_interface: + ipv6: fc00:b::36/64 + + ARISTA53T1: + properties: + - common + bgp: + router-id: 0.12.0.55 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:37::1/128 + Ethernet1: + ipv6: fc00:a::da/126 + bp_interface: + ipv6: fc00:b::37/64 + + ARISTA54T1: + properties: + - common + bgp: + router-id: 0.12.0.56 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::dd + interfaces: + Loopback0: + ipv6: fc00:c:c:38::1/128 + Ethernet1: + ipv6: fc00:a::de/126 + bp_interface: + ipv6: fc00:b::38/64 + + ARISTA55T1: + properties: + - common + bgp: + router-id: 0.12.0.57 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:39::1/128 + Ethernet1: + ipv6: fc00:a::e2/126 + bp_interface: + ipv6: fc00:b::39/64 + + ARISTA56T1: + properties: + - common + bgp: + router-id: 0.12.0.58 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3a::1/128 + Ethernet1: + ipv6: fc00:a::e6/126 + bp_interface: + ipv6: fc00:b::3a/64 + + ARISTA57T1: + properties: + - common + bgp: + router-id: 0.12.0.59 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3b::1/128 + Ethernet1: + ipv6: fc00:a::ea/126 + bp_interface: + ipv6: fc00:b::3b/64 + + ARISTA58T1: + properties: + - common + bgp: + router-id: 0.12.0.60 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::ed + interfaces: + Loopback0: + ipv6: fc00:c:c:3c::1/128 + Ethernet1: + ipv6: fc00:a::ee/126 + bp_interface: + ipv6: fc00:b::3c/64 + + ARISTA59T1: + properties: + - common + bgp: + router-id: 0.12.0.61 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:3d::1/128 + Ethernet1: + ipv6: fc00:a::f2/126 + bp_interface: + ipv6: fc00:b::3d/64 + + ARISTA60T1: + properties: + - common + bgp: + router-id: 0.12.0.62 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3e::1/128 + Ethernet1: + ipv6: fc00:a::f6/126 + bp_interface: + ipv6: fc00:b::3e/64 + + ARISTA61T1: + properties: + - common + bgp: + router-id: 0.12.0.63 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3f::1/128 + Ethernet1: + ipv6: fc00:a::fa/126 + bp_interface: + ipv6: fc00:b::3f/64 + + ARISTA62T1: + properties: + - common + bgp: + router-id: 0.12.0.64 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::fd + interfaces: + Loopback0: + ipv6: fc00:c:c:40::1/128 + Ethernet1: + ipv6: fc00:a::fe/126 + bp_interface: + ipv6: fc00:b::40/64 + + ARISTA63T1: + properties: + - common + bgp: + router-id: 0.12.0.65 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::101 + interfaces: + Loopback0: + ipv6: fc00:c:c:41::1/128 + Ethernet1: + ipv6: fc00:a::102/126 + bp_interface: + ipv6: fc00:b::41/64 + + ARISTA64T1: + properties: + - common + bgp: + router-id: 0.12.0.66 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::105 + interfaces: + Loopback0: + ipv6: fc00:c:c:42::1/128 + Ethernet1: + ipv6: fc00:a::106/126 + bp_interface: + ipv6: fc00:b::42/64 + + ARISTA65T1: + properties: + - common + bgp: + router-id: 0.12.0.67 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::109 + interfaces: + Loopback0: + ipv6: fc00:c:c:43::1/128 + Ethernet1: + ipv6: fc00:a::10a/126 + bp_interface: + ipv6: fc00:b::43/64 + + ARISTA66T1: + properties: + - common + bgp: + router-id: 0.12.0.68 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::10d + interfaces: + Loopback0: + ipv6: fc00:c:c:44::1/128 + Ethernet1: + ipv6: fc00:a::10e/126 + bp_interface: + ipv6: fc00:b::44/64 + + ARISTA67T1: + properties: + - common + bgp: + router-id: 0.12.0.69 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::111 + interfaces: + Loopback0: + ipv6: fc00:c:c:45::1/128 + Ethernet1: + ipv6: fc00:a::112/126 + bp_interface: + ipv6: fc00:b::45/64 + + ARISTA68T1: + properties: + - common + bgp: + router-id: 0.12.0.70 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::115 + interfaces: + Loopback0: + ipv6: fc00:c:c:46::1/128 + Ethernet1: + ipv6: fc00:a::116/126 + bp_interface: + ipv6: fc00:b::46/64 + + ARISTA69T1: + properties: + - common + bgp: + router-id: 0.12.0.71 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::119 + interfaces: + Loopback0: + ipv6: fc00:c:c:47::1/128 + Ethernet1: + ipv6: fc00:a::11a/126 + bp_interface: + ipv6: fc00:b::47/64 + + ARISTA70T1: + properties: + - common + bgp: + router-id: 0.12.0.72 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::11d + interfaces: + Loopback0: + ipv6: fc00:c:c:48::1/128 + Ethernet1: + ipv6: fc00:a::11e/126 + bp_interface: + ipv6: fc00:b::48/64 + + ARISTA71T1: + properties: + - common + bgp: + router-id: 0.12.0.73 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::121 + interfaces: + Loopback0: + ipv6: fc00:c:c:49::1/128 + Ethernet1: + ipv6: fc00:a::122/126 + bp_interface: + ipv6: fc00:b::49/64 + + ARISTA72T1: + properties: + - common + bgp: + router-id: 0.12.0.74 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::125 + interfaces: + Loopback0: + ipv6: fc00:c:c:4a::1/128 + Ethernet1: + ipv6: fc00:a::126/126 + bp_interface: + ipv6: fc00:b::4a/64 + + ARISTA73T1: + properties: + - common + bgp: + router-id: 0.12.0.75 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::129 + interfaces: + Loopback0: + ipv6: fc00:c:c:4b::1/128 + Ethernet1: + ipv6: fc00:a::12a/126 + bp_interface: + ipv6: fc00:b::4b/64 + + ARISTA74T1: + properties: + - common + bgp: + router-id: 0.12.0.76 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::12d + interfaces: + Loopback0: + ipv6: fc00:c:c:4c::1/128 + Ethernet1: + ipv6: fc00:a::12e/126 + bp_interface: + ipv6: fc00:b::4c/64 + + ARISTA75T1: + properties: + - common + bgp: + router-id: 0.12.0.77 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::131 + interfaces: + Loopback0: + ipv6: fc00:c:c:4d::1/128 + Ethernet1: + ipv6: fc00:a::132/126 + bp_interface: + ipv6: fc00:b::4d/64 + + ARISTA76T1: + properties: + - common + bgp: + router-id: 0.12.0.78 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::135 + interfaces: + Loopback0: + ipv6: fc00:c:c:4e::1/128 + Ethernet1: + ipv6: fc00:a::136/126 + bp_interface: + ipv6: fc00:b::4e/64 + + ARISTA77T1: + properties: + - common + bgp: + router-id: 0.12.0.79 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::139 + interfaces: + Loopback0: + ipv6: fc00:c:c:4f::1/128 + Ethernet1: + ipv6: fc00:a::13a/126 + bp_interface: + ipv6: fc00:b::4f/64 + + ARISTA78T1: + properties: + - common + bgp: + router-id: 0.12.0.80 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::13d + interfaces: + Loopback0: + ipv6: fc00:c:c:50::1/128 + Ethernet1: + ipv6: fc00:a::13e/126 + bp_interface: + ipv6: fc00:b::50/64 + + ARISTA79T1: + properties: + - common + bgp: + router-id: 0.12.0.81 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::141 + interfaces: + Loopback0: + ipv6: fc00:c:c:51::1/128 + Ethernet1: + ipv6: fc00:a::142/126 + bp_interface: + ipv6: fc00:b::51/64 + + ARISTA80T1: + properties: + - common + bgp: + router-id: 0.12.0.82 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::145 + interfaces: + Loopback0: + ipv6: fc00:c:c:52::1/128 + Ethernet1: + ipv6: fc00:a::146/126 + bp_interface: + ipv6: fc00:b::52/64 + + ARISTA81T1: + properties: + - common + bgp: + router-id: 0.12.0.83 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::149 + interfaces: + Loopback0: + ipv6: fc00:c:c:53::1/128 + Ethernet1: + ipv6: fc00:a::14a/126 + bp_interface: + ipv6: fc00:b::53/64 + + ARISTA82T1: + properties: + - common + bgp: + router-id: 0.12.0.84 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::14d + interfaces: + Loopback0: + ipv6: fc00:c:c:54::1/128 + Ethernet1: + ipv6: fc00:a::14e/126 + bp_interface: + ipv6: fc00:b::54/64 + + ARISTA83T1: + properties: + - common + bgp: + router-id: 0.12.0.85 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::151 + interfaces: + Loopback0: + ipv6: fc00:c:c:55::1/128 + Ethernet1: + ipv6: fc00:a::152/126 + bp_interface: + ipv6: fc00:b::55/64 + + ARISTA84T1: + properties: + - common + bgp: + router-id: 0.12.0.86 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::155 + interfaces: + Loopback0: + ipv6: fc00:c:c:56::1/128 + Ethernet1: + ipv6: fc00:a::156/126 + bp_interface: + ipv6: fc00:b::56/64 + + ARISTA85T1: + properties: + - common + bgp: + router-id: 0.12.0.87 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::159 + interfaces: + Loopback0: + ipv6: fc00:c:c:57::1/128 + Ethernet1: + ipv6: fc00:a::15a/126 + bp_interface: + ipv6: fc00:b::57/64 + + ARISTA86T1: + properties: + - common + bgp: + router-id: 0.12.0.88 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::15d + interfaces: + Loopback0: + ipv6: fc00:c:c:58::1/128 + Ethernet1: + ipv6: fc00:a::15e/126 + bp_interface: + ipv6: fc00:b::58/64 + + ARISTA87T1: + properties: + - common + bgp: + router-id: 0.12.0.89 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::161 + interfaces: + Loopback0: + ipv6: fc00:c:c:59::1/128 + Ethernet1: + ipv6: fc00:a::162/126 + bp_interface: + ipv6: fc00:b::59/64 + + ARISTA88T1: + properties: + - common + bgp: + router-id: 0.12.0.90 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::165 + interfaces: + Loopback0: + ipv6: fc00:c:c:5a::1/128 + Ethernet1: + ipv6: fc00:a::166/126 + bp_interface: + ipv6: fc00:b::5a/64 + + ARISTA89T1: + properties: + - common + bgp: + router-id: 0.12.0.91 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::169 + interfaces: + Loopback0: + ipv6: fc00:c:c:5b::1/128 + Ethernet1: + ipv6: fc00:a::16a/126 + bp_interface: + ipv6: fc00:b::5b/64 + + ARISTA90T1: + properties: + - common + bgp: + router-id: 0.12.0.92 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::16d + interfaces: + Loopback0: + ipv6: fc00:c:c:5c::1/128 + Ethernet1: + ipv6: fc00:a::16e/126 + bp_interface: + ipv6: fc00:b::5c/64 + + ARISTA91T1: + properties: + - common + bgp: + router-id: 0.12.0.93 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::171 + interfaces: + Loopback0: + ipv6: fc00:c:c:5d::1/128 + Ethernet1: + ipv6: fc00:a::172/126 + bp_interface: + ipv6: fc00:b::5d/64 + + ARISTA92T1: + properties: + - common + bgp: + router-id: 0.12.0.94 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::175 + interfaces: + Loopback0: + ipv6: fc00:c:c:5e::1/128 + Ethernet1: + ipv6: fc00:a::176/126 + bp_interface: + ipv6: fc00:b::5e/64 + + ARISTA93T1: + properties: + - common + bgp: + router-id: 0.12.0.95 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::179 + interfaces: + Loopback0: + ipv6: fc00:c:c:5f::1/128 + Ethernet1: + ipv6: fc00:a::17a/126 + bp_interface: + ipv6: fc00:b::5f/64 + + ARISTA94T1: + properties: + - common + bgp: + router-id: 0.12.0.96 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::17d + interfaces: + Loopback0: + ipv6: fc00:c:c:60::1/128 + Ethernet1: + ipv6: fc00:a::17e/126 + bp_interface: + ipv6: fc00:b::60/64 + + ARISTA95T1: + properties: + - common + bgp: + router-id: 0.12.0.97 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::181 + interfaces: + Loopback0: + ipv6: fc00:c:c:61::1/128 + Ethernet1: + ipv6: fc00:a::182/126 + bp_interface: + ipv6: fc00:b::61/64 + + ARISTA96T1: + properties: + - common + bgp: + router-id: 0.12.0.98 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::185 + interfaces: + Loopback0: + ipv6: fc00:c:c:62::1/128 + Ethernet1: + ipv6: fc00:a::186/126 + bp_interface: + ipv6: fc00:b::62/64 + + ARISTA97T1: + properties: + - common + bgp: + router-id: 0.12.0.99 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::189 + interfaces: + Loopback0: + ipv6: fc00:c:c:63::1/128 + Ethernet1: + ipv6: fc00:a::18a/126 + bp_interface: + ipv6: fc00:b::63/64 + + ARISTA98T1: + properties: + - common + bgp: + router-id: 0.12.0.100 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::18d + interfaces: + Loopback0: + ipv6: fc00:c:c:64::1/128 + Ethernet1: + ipv6: fc00:a::18e/126 + bp_interface: + ipv6: fc00:b::64/64 + + ARISTA99T1: + properties: + - common + bgp: + router-id: 0.12.0.101 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::191 + interfaces: + Loopback0: + ipv6: fc00:c:c:65::1/128 + Ethernet1: + ipv6: fc00:a::192/126 + bp_interface: + ipv6: fc00:b::65/64 + + ARISTA100T1: + properties: + - common + bgp: + router-id: 0.12.0.102 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::195 + interfaces: + Loopback0: + ipv6: fc00:c:c:66::1/128 + Ethernet1: + ipv6: fc00:a::196/126 + bp_interface: + ipv6: fc00:b::66/64 + + ARISTA101T1: + properties: + - common + bgp: + router-id: 0.12.0.103 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::199 + interfaces: + Loopback0: + ipv6: fc00:c:c:67::1/128 + Ethernet1: + ipv6: fc00:a::19a/126 + bp_interface: + ipv6: fc00:b::67/64 + + ARISTA102T1: + properties: + - common + bgp: + router-id: 0.12.0.104 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::19d + interfaces: + Loopback0: + ipv6: fc00:c:c:68::1/128 + Ethernet1: + ipv6: fc00:a::19e/126 + bp_interface: + ipv6: fc00:b::68/64 + + ARISTA103T1: + properties: + - common + bgp: + router-id: 0.12.0.105 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:69::1/128 + Ethernet1: + ipv6: fc00:a::1a2/126 + bp_interface: + ipv6: fc00:b::69/64 + + ARISTA104T1: + properties: + - common + bgp: + router-id: 0.12.0.106 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6a::1/128 + Ethernet1: + ipv6: fc00:a::1a6/126 + bp_interface: + ipv6: fc00:b::6a/64 + + ARISTA105T1: + properties: + - common + bgp: + router-id: 0.12.0.107 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6b::1/128 + Ethernet1: + ipv6: fc00:a::1aa/126 + bp_interface: + ipv6: fc00:b::6b/64 + + ARISTA106T1: + properties: + - common + bgp: + router-id: 0.12.0.108 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1ad + interfaces: + Loopback0: + ipv6: fc00:c:c:6c::1/128 + Ethernet1: + ipv6: fc00:a::1ae/126 + bp_interface: + ipv6: fc00:b::6c/64 + + ARISTA107T1: + properties: + - common + bgp: + router-id: 0.12.0.109 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:6d::1/128 + Ethernet1: + ipv6: fc00:a::1b2/126 + bp_interface: + ipv6: fc00:b::6d/64 + + ARISTA108T1: + properties: + - common + bgp: + router-id: 0.12.0.110 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6e::1/128 + Ethernet1: + ipv6: fc00:a::1b6/126 + bp_interface: + ipv6: fc00:b::6e/64 + + ARISTA109T1: + properties: + - common + bgp: + router-id: 0.12.0.111 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6f::1/128 + Ethernet1: + ipv6: fc00:a::1ba/126 + bp_interface: + ipv6: fc00:b::6f/64 + + ARISTA110T1: + properties: + - common + bgp: + router-id: 0.12.0.112 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1bd + interfaces: + Loopback0: + ipv6: fc00:c:c:70::1/128 + Ethernet1: + ipv6: fc00:a::1be/126 + bp_interface: + ipv6: fc00:b::70/64 + + ARISTA111T1: + properties: + - common + bgp: + router-id: 0.12.0.113 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:71::1/128 + Ethernet1: + ipv6: fc00:a::1c2/126 + bp_interface: + ipv6: fc00:b::71/64 + + ARISTA112T1: + properties: + - common + bgp: + router-id: 0.12.0.114 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:72::1/128 + Ethernet1: + ipv6: fc00:a::1c6/126 + bp_interface: + ipv6: fc00:b::72/64 + + ARISTA113T1: + properties: + - common + bgp: + router-id: 0.12.0.115 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:73::1/128 + Ethernet1: + ipv6: fc00:a::1ca/126 + bp_interface: + ipv6: fc00:b::73/64 + + ARISTA114T1: + properties: + - common + bgp: + router-id: 0.12.0.116 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1cd + interfaces: + Loopback0: + ipv6: fc00:c:c:74::1/128 + Ethernet1: + ipv6: fc00:a::1ce/126 + bp_interface: + ipv6: fc00:b::74/64 + + ARISTA115T1: + properties: + - common + bgp: + router-id: 0.12.0.117 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:75::1/128 + Ethernet1: + ipv6: fc00:a::1d2/126 + bp_interface: + ipv6: fc00:b::75/64 + + ARISTA116T1: + properties: + - common + bgp: + router-id: 0.12.0.118 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:76::1/128 + Ethernet1: + ipv6: fc00:a::1d6/126 + bp_interface: + ipv6: fc00:b::76/64 + + ARISTA117T1: + properties: + - common + bgp: + router-id: 0.12.0.119 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:77::1/128 + Ethernet1: + ipv6: fc00:a::1da/126 + bp_interface: + ipv6: fc00:b::77/64 + + ARISTA118T1: + properties: + - common + bgp: + router-id: 0.12.0.120 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1dd + interfaces: + Loopback0: + ipv6: fc00:c:c:78::1/128 + Ethernet1: + ipv6: fc00:a::1de/126 + bp_interface: + ipv6: fc00:b::78/64 + + ARISTA119T1: + properties: + - common + bgp: + router-id: 0.12.0.121 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:79::1/128 + Ethernet1: + ipv6: fc00:a::1e2/126 + bp_interface: + ipv6: fc00:b::79/64 + + ARISTA120T1: + properties: + - common + bgp: + router-id: 0.12.0.122 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7a::1/128 + Ethernet1: + ipv6: fc00:a::1e6/126 + bp_interface: + ipv6: fc00:b::7a/64 + + ARISTA121T1: + properties: + - common + bgp: + router-id: 0.12.0.123 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7b::1/128 + Ethernet1: + ipv6: fc00:a::1ea/126 + bp_interface: + ipv6: fc00:b::7b/64 + + ARISTA122T1: + properties: + - common + bgp: + router-id: 0.12.0.124 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1ed + interfaces: + Loopback0: + ipv6: fc00:c:c:7c::1/128 + Ethernet1: + ipv6: fc00:a::1ee/126 + bp_interface: + ipv6: fc00:b::7c/64 + + ARISTA123T1: + properties: + - common + bgp: + router-id: 0.12.0.125 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:7d::1/128 + Ethernet1: + ipv6: fc00:a::1f2/126 + bp_interface: + ipv6: fc00:b::7d/64 + + ARISTA124T1: + properties: + - common + bgp: + router-id: 0.12.0.126 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7e::1/128 + Ethernet1: + ipv6: fc00:a::1f6/126 + bp_interface: + ipv6: fc00:b::7e/64 + + ARISTA125T1: + properties: + - common + bgp: + router-id: 0.12.0.127 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7f::1/128 + Ethernet1: + ipv6: fc00:a::1fa/126 + bp_interface: + ipv6: fc00:b::7f/64 + + ARISTA126T1: + properties: + - common + bgp: + router-id: 0.12.0.128 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::1fd + interfaces: + Loopback0: + ipv6: fc00:c:c:80::1/128 + Ethernet1: + ipv6: fc00:a::1fe/126 + bp_interface: + ipv6: fc00:b::80/64 + + ARISTA127T1: + properties: + - common + bgp: + router-id: 0.12.0.129 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::201 + interfaces: + Loopback0: + ipv6: fc00:c:c:81::1/128 + Ethernet1: + ipv6: fc00:a::202/126 + bp_interface: + ipv6: fc00:b::81/64 + + ARISTA128T1: + properties: + - common + bgp: + router-id: 0.12.0.130 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::205 + interfaces: + Loopback0: + ipv6: fc00:c:c:82::1/128 + Ethernet1: + ipv6: fc00:a::206/126 + bp_interface: + ipv6: fc00:b::82/64 + + ARISTA129T1: + properties: + - common + bgp: + router-id: 0.12.0.131 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::209 + interfaces: + Loopback0: + ipv6: fc00:c:c:83::1/128 + Ethernet1: + ipv6: fc00:a::20a/126 + bp_interface: + ipv6: fc00:b::83/64 + + ARISTA130T1: + properties: + - common + bgp: + router-id: 0.12.0.132 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::20d + interfaces: + Loopback0: + ipv6: fc00:c:c:84::1/128 + Ethernet1: + ipv6: fc00:a::20e/126 + bp_interface: + ipv6: fc00:b::84/64 + + ARISTA131T1: + properties: + - common + bgp: + router-id: 0.12.0.133 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::211 + interfaces: + Loopback0: + ipv6: fc00:c:c:85::1/128 + Ethernet1: + ipv6: fc00:a::212/126 + bp_interface: + ipv6: fc00:b::85/64 + + ARISTA132T1: + properties: + - common + bgp: + router-id: 0.12.0.134 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::215 + interfaces: + Loopback0: + ipv6: fc00:c:c:86::1/128 + Ethernet1: + ipv6: fc00:a::216/126 + bp_interface: + ipv6: fc00:b::86/64 + + ARISTA133T1: + properties: + - common + bgp: + router-id: 0.12.0.135 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::219 + interfaces: + Loopback0: + ipv6: fc00:c:c:87::1/128 + Ethernet1: + ipv6: fc00:a::21a/126 + bp_interface: + ipv6: fc00:b::87/64 + + ARISTA134T1: + properties: + - common + bgp: + router-id: 0.12.0.136 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::21d + interfaces: + Loopback0: + ipv6: fc00:c:c:88::1/128 + Ethernet1: + ipv6: fc00:a::21e/126 + bp_interface: + ipv6: fc00:b::88/64 + + ARISTA135T1: + properties: + - common + bgp: + router-id: 0.12.0.137 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::221 + interfaces: + Loopback0: + ipv6: fc00:c:c:89::1/128 + Ethernet1: + ipv6: fc00:a::222/126 + bp_interface: + ipv6: fc00:b::89/64 + + ARISTA136T1: + properties: + - common + bgp: + router-id: 0.12.0.138 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::225 + interfaces: + Loopback0: + ipv6: fc00:c:c:8a::1/128 + Ethernet1: + ipv6: fc00:a::226/126 + bp_interface: + ipv6: fc00:b::8a/64 + + ARISTA137T1: + properties: + - common + bgp: + router-id: 0.12.0.139 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::229 + interfaces: + Loopback0: + ipv6: fc00:c:c:8b::1/128 + Ethernet1: + ipv6: fc00:a::22a/126 + bp_interface: + ipv6: fc00:b::8b/64 + + ARISTA138T1: + properties: + - common + bgp: + router-id: 0.12.0.140 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::22d + interfaces: + Loopback0: + ipv6: fc00:c:c:8c::1/128 + Ethernet1: + ipv6: fc00:a::22e/126 + bp_interface: + ipv6: fc00:b::8c/64 + + ARISTA139T1: + properties: + - common + bgp: + router-id: 0.12.0.141 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::231 + interfaces: + Loopback0: + ipv6: fc00:c:c:8d::1/128 + Ethernet1: + ipv6: fc00:a::232/126 + bp_interface: + ipv6: fc00:b::8d/64 + + ARISTA140T1: + properties: + - common + bgp: + router-id: 0.12.0.142 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::235 + interfaces: + Loopback0: + ipv6: fc00:c:c:8e::1/128 + Ethernet1: + ipv6: fc00:a::236/126 + bp_interface: + ipv6: fc00:b::8e/64 + + ARISTA141T1: + properties: + - common + bgp: + router-id: 0.12.0.143 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::239 + interfaces: + Loopback0: + ipv6: fc00:c:c:8f::1/128 + Ethernet1: + ipv6: fc00:a::23a/126 + bp_interface: + ipv6: fc00:b::8f/64 + + ARISTA142T1: + properties: + - common + bgp: + router-id: 0.12.0.144 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::23d + interfaces: + Loopback0: + ipv6: fc00:c:c:90::1/128 + Ethernet1: + ipv6: fc00:a::23e/126 + bp_interface: + ipv6: fc00:b::90/64 + + ARISTA143T1: + properties: + - common + bgp: + router-id: 0.12.0.145 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::241 + interfaces: + Loopback0: + ipv6: fc00:c:c:91::1/128 + Ethernet1: + ipv6: fc00:a::242/126 + bp_interface: + ipv6: fc00:b::91/64 + + ARISTA144T1: + properties: + - common + bgp: + router-id: 0.12.0.146 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::245 + interfaces: + Loopback0: + ipv6: fc00:c:c:92::1/128 + Ethernet1: + ipv6: fc00:a::246/126 + bp_interface: + ipv6: fc00:b::92/64 + + ARISTA145T1: + properties: + - common + bgp: + router-id: 0.12.0.147 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::249 + interfaces: + Loopback0: + ipv6: fc00:c:c:93::1/128 + Ethernet1: + ipv6: fc00:a::24a/126 + bp_interface: + ipv6: fc00:b::93/64 + + ARISTA146T1: + properties: + - common + bgp: + router-id: 0.12.0.148 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::24d + interfaces: + Loopback0: + ipv6: fc00:c:c:94::1/128 + Ethernet1: + ipv6: fc00:a::24e/126 + bp_interface: + ipv6: fc00:b::94/64 + + ARISTA147T1: + properties: + - common + bgp: + router-id: 0.12.0.149 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::251 + interfaces: + Loopback0: + ipv6: fc00:c:c:95::1/128 + Ethernet1: + ipv6: fc00:a::252/126 + bp_interface: + ipv6: fc00:b::95/64 + + ARISTA148T1: + properties: + - common + bgp: + router-id: 0.12.0.150 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::255 + interfaces: + Loopback0: + ipv6: fc00:c:c:96::1/128 + Ethernet1: + ipv6: fc00:a::256/126 + bp_interface: + ipv6: fc00:b::96/64 + + ARISTA149T1: + properties: + - common + bgp: + router-id: 0.12.0.151 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::259 + interfaces: + Loopback0: + ipv6: fc00:c:c:97::1/128 + Ethernet1: + ipv6: fc00:a::25a/126 + bp_interface: + ipv6: fc00:b::97/64 + + ARISTA150T1: + properties: + - common + bgp: + router-id: 0.12.0.152 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::25d + interfaces: + Loopback0: + ipv6: fc00:c:c:98::1/128 + Ethernet1: + ipv6: fc00:a::25e/126 + bp_interface: + ipv6: fc00:b::98/64 + + ARISTA151T1: + properties: + - common + bgp: + router-id: 0.12.0.153 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::261 + interfaces: + Loopback0: + ipv6: fc00:c:c:99::1/128 + Ethernet1: + ipv6: fc00:a::262/126 + bp_interface: + ipv6: fc00:b::99/64 + + ARISTA152T1: + properties: + - common + bgp: + router-id: 0.12.0.154 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::265 + interfaces: + Loopback0: + ipv6: fc00:c:c:9a::1/128 + Ethernet1: + ipv6: fc00:a::266/126 + bp_interface: + ipv6: fc00:b::9a/64 + + ARISTA153T1: + properties: + - common + bgp: + router-id: 0.12.0.155 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::269 + interfaces: + Loopback0: + ipv6: fc00:c:c:9b::1/128 + Ethernet1: + ipv6: fc00:a::26a/126 + bp_interface: + ipv6: fc00:b::9b/64 + + ARISTA154T1: + properties: + - common + bgp: + router-id: 0.12.0.156 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::26d + interfaces: + Loopback0: + ipv6: fc00:c:c:9c::1/128 + Ethernet1: + ipv6: fc00:a::26e/126 + bp_interface: + ipv6: fc00:b::9c/64 + + ARISTA155T1: + properties: + - common + bgp: + router-id: 0.12.0.157 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::271 + interfaces: + Loopback0: + ipv6: fc00:c:c:9d::1/128 + Ethernet1: + ipv6: fc00:a::272/126 + bp_interface: + ipv6: fc00:b::9d/64 + + ARISTA156T1: + properties: + - common + bgp: + router-id: 0.12.0.158 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::275 + interfaces: + Loopback0: + ipv6: fc00:c:c:9e::1/128 + Ethernet1: + ipv6: fc00:a::276/126 + bp_interface: + ipv6: fc00:b::9e/64 + + ARISTA157T1: + properties: + - common + bgp: + router-id: 0.12.0.159 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::279 + interfaces: + Loopback0: + ipv6: fc00:c:c:9f::1/128 + Ethernet1: + ipv6: fc00:a::27a/126 + bp_interface: + ipv6: fc00:b::9f/64 + + ARISTA158T1: + properties: + - common + bgp: + router-id: 0.12.0.160 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::27d + interfaces: + Loopback0: + ipv6: fc00:c:c:a0::1/128 + Ethernet1: + ipv6: fc00:a::27e/126 + bp_interface: + ipv6: fc00:b::a0/64 + + ARISTA159T1: + properties: + - common + bgp: + router-id: 0.12.0.161 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::281 + interfaces: + Loopback0: + ipv6: fc00:c:c:a1::1/128 + Ethernet1: + ipv6: fc00:a::282/126 + bp_interface: + ipv6: fc00:b::a1/64 + + ARISTA160T1: + properties: + - common + bgp: + router-id: 0.12.0.162 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::285 + interfaces: + Loopback0: + ipv6: fc00:c:c:a2::1/128 + Ethernet1: + ipv6: fc00:a::286/126 + bp_interface: + ipv6: fc00:b::a2/64 + + ARISTA161T1: + properties: + - common + bgp: + router-id: 0.12.0.163 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::289 + interfaces: + Loopback0: + ipv6: fc00:c:c:a3::1/128 + Ethernet1: + ipv6: fc00:a::28a/126 + bp_interface: + ipv6: fc00:b::a3/64 + + ARISTA162T1: + properties: + - common + bgp: + router-id: 0.12.0.164 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::28d + interfaces: + Loopback0: + ipv6: fc00:c:c:a4::1/128 + Ethernet1: + ipv6: fc00:a::28e/126 + bp_interface: + ipv6: fc00:b::a4/64 + + ARISTA163T1: + properties: + - common + bgp: + router-id: 0.12.0.165 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::291 + interfaces: + Loopback0: + ipv6: fc00:c:c:a5::1/128 + Ethernet1: + ipv6: fc00:a::292/126 + bp_interface: + ipv6: fc00:b::a5/64 + + ARISTA164T1: + properties: + - common + bgp: + router-id: 0.12.0.166 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::295 + interfaces: + Loopback0: + ipv6: fc00:c:c:a6::1/128 + Ethernet1: + ipv6: fc00:a::296/126 + bp_interface: + ipv6: fc00:b::a6/64 + + ARISTA165T1: + properties: + - common + bgp: + router-id: 0.12.0.167 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::299 + interfaces: + Loopback0: + ipv6: fc00:c:c:a7::1/128 + Ethernet1: + ipv6: fc00:a::29a/126 + bp_interface: + ipv6: fc00:b::a7/64 + + ARISTA166T1: + properties: + - common + bgp: + router-id: 0.12.0.168 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::29d + interfaces: + Loopback0: + ipv6: fc00:c:c:a8::1/128 + Ethernet1: + ipv6: fc00:a::29e/126 + bp_interface: + ipv6: fc00:b::a8/64 + + ARISTA167T1: + properties: + - common + bgp: + router-id: 0.12.0.169 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:a9::1/128 + Ethernet1: + ipv6: fc00:a::2a2/126 + bp_interface: + ipv6: fc00:b::a9/64 + + ARISTA168T1: + properties: + - common + bgp: + router-id: 0.12.0.170 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:aa::1/128 + Ethernet1: + ipv6: fc00:a::2a6/126 + bp_interface: + ipv6: fc00:b::aa/64 + + ARISTA169T1: + properties: + - common + bgp: + router-id: 0.12.0.171 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ab::1/128 + Ethernet1: + ipv6: fc00:a::2aa/126 + bp_interface: + ipv6: fc00:b::ab/64 + + ARISTA170T1: + properties: + - common + bgp: + router-id: 0.12.0.172 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ac::1/128 + Ethernet1: + ipv6: fc00:a::2ae/126 + bp_interface: + ipv6: fc00:b::ac/64 + + ARISTA171T1: + properties: + - common + bgp: + router-id: 0.12.0.173 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ad::1/128 + Ethernet1: + ipv6: fc00:a::2b2/126 + bp_interface: + ipv6: fc00:b::ad/64 + + ARISTA172T1: + properties: + - common + bgp: + router-id: 0.12.0.174 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ae::1/128 + Ethernet1: + ipv6: fc00:a::2b6/126 + bp_interface: + ipv6: fc00:b::ae/64 + + ARISTA173T1: + properties: + - common + bgp: + router-id: 0.12.0.175 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:af::1/128 + Ethernet1: + ipv6: fc00:a::2ba/126 + bp_interface: + ipv6: fc00:b::af/64 + + ARISTA174T1: + properties: + - common + bgp: + router-id: 0.12.0.176 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2bd + interfaces: + Loopback0: + ipv6: fc00:c:c:b0::1/128 + Ethernet1: + ipv6: fc00:a::2be/126 + bp_interface: + ipv6: fc00:b::b0/64 + + ARISTA175T1: + properties: + - common + bgp: + router-id: 0.12.0.177 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b1::1/128 + Ethernet1: + ipv6: fc00:a::2c2/126 + bp_interface: + ipv6: fc00:b::b1/64 + + ARISTA176T1: + properties: + - common + bgp: + router-id: 0.12.0.178 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b2::1/128 + Ethernet1: + ipv6: fc00:a::2c6/126 + bp_interface: + ipv6: fc00:b::b2/64 + + ARISTA177T1: + properties: + - common + bgp: + router-id: 0.12.0.179 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b3::1/128 + Ethernet1: + ipv6: fc00:a::2ca/126 + bp_interface: + ipv6: fc00:b::b3/64 + + ARISTA178T1: + properties: + - common + bgp: + router-id: 0.12.0.180 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2cd + interfaces: + Loopback0: + ipv6: fc00:c:c:b4::1/128 + Ethernet1: + ipv6: fc00:a::2ce/126 + bp_interface: + ipv6: fc00:b::b4/64 + + ARISTA179T1: + properties: + - common + bgp: + router-id: 0.12.0.181 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b5::1/128 + Ethernet1: + ipv6: fc00:a::2d2/126 + bp_interface: + ipv6: fc00:b::b5/64 + + ARISTA180T1: + properties: + - common + bgp: + router-id: 0.12.0.182 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b6::1/128 + Ethernet1: + ipv6: fc00:a::2d6/126 + bp_interface: + ipv6: fc00:b::b6/64 + + ARISTA181T1: + properties: + - common + bgp: + router-id: 0.12.0.183 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b7::1/128 + Ethernet1: + ipv6: fc00:a::2da/126 + bp_interface: + ipv6: fc00:b::b7/64 + + ARISTA182T1: + properties: + - common + bgp: + router-id: 0.12.0.184 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2dd + interfaces: + Loopback0: + ipv6: fc00:c:c:b8::1/128 + Ethernet1: + ipv6: fc00:a::2de/126 + bp_interface: + ipv6: fc00:b::b8/64 + + ARISTA183T1: + properties: + - common + bgp: + router-id: 0.12.0.185 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b9::1/128 + Ethernet1: + ipv6: fc00:a::2e2/126 + bp_interface: + ipv6: fc00:b::b9/64 + + ARISTA184T1: + properties: + - common + bgp: + router-id: 0.12.0.186 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ba::1/128 + Ethernet1: + ipv6: fc00:a::2e6/126 + bp_interface: + ipv6: fc00:b::ba/64 + + ARISTA185T1: + properties: + - common + bgp: + router-id: 0.12.0.187 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bb::1/128 + Ethernet1: + ipv6: fc00:a::2ea/126 + bp_interface: + ipv6: fc00:b::bb/64 + + ARISTA186T1: + properties: + - common + bgp: + router-id: 0.12.0.188 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2ed + interfaces: + Loopback0: + ipv6: fc00:c:c:bc::1/128 + Ethernet1: + ipv6: fc00:a::2ee/126 + bp_interface: + ipv6: fc00:b::bc/64 + + ARISTA187T1: + properties: + - common + bgp: + router-id: 0.12.0.189 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:bd::1/128 + Ethernet1: + ipv6: fc00:a::2f2/126 + bp_interface: + ipv6: fc00:b::bd/64 + + ARISTA188T1: + properties: + - common + bgp: + router-id: 0.12.0.190 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:be::1/128 + Ethernet1: + ipv6: fc00:a::2f6/126 + bp_interface: + ipv6: fc00:b::be/64 + + ARISTA189T1: + properties: + - common + bgp: + router-id: 0.12.0.191 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bf::1/128 + Ethernet1: + ipv6: fc00:a::2fa/126 + bp_interface: + ipv6: fc00:b::bf/64 + + ARISTA190T1: + properties: + - common + bgp: + router-id: 0.12.0.192 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::2fd + interfaces: + Loopback0: + ipv6: fc00:c:c:c0::1/128 + Ethernet1: + ipv6: fc00:a::2fe/126 + bp_interface: + ipv6: fc00:b::c0/64 + + ARISTA191T1: + properties: + - common + bgp: + router-id: 0.12.0.193 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::301 + interfaces: + Loopback0: + ipv6: fc00:c:c:c1::1/128 + Ethernet1: + ipv6: fc00:a::302/126 + bp_interface: + ipv6: fc00:b::c1/64 + + ARISTA192T1: + properties: + - common + bgp: + router-id: 0.12.0.194 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::305 + interfaces: + Loopback0: + ipv6: fc00:c:c:c2::1/128 + Ethernet1: + ipv6: fc00:a::306/126 + bp_interface: + ipv6: fc00:b::c2/64 + + ARISTA193T1: + properties: + - common + bgp: + router-id: 0.12.0.195 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::309 + interfaces: + Loopback0: + ipv6: fc00:c:c:c3::1/128 + Ethernet1: + ipv6: fc00:a::30a/126 + bp_interface: + ipv6: fc00:b::c3/64 + + ARISTA194T1: + properties: + - common + bgp: + router-id: 0.12.0.196 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::30d + interfaces: + Loopback0: + ipv6: fc00:c:c:c4::1/128 + Ethernet1: + ipv6: fc00:a::30e/126 + bp_interface: + ipv6: fc00:b::c4/64 + + ARISTA195T1: + properties: + - common + bgp: + router-id: 0.12.0.197 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::311 + interfaces: + Loopback0: + ipv6: fc00:c:c:c5::1/128 + Ethernet1: + ipv6: fc00:a::312/126 + bp_interface: + ipv6: fc00:b::c5/64 + + ARISTA196T1: + properties: + - common + bgp: + router-id: 0.12.0.198 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::315 + interfaces: + Loopback0: + ipv6: fc00:c:c:c6::1/128 + Ethernet1: + ipv6: fc00:a::316/126 + bp_interface: + ipv6: fc00:b::c6/64 + + ARISTA197T1: + properties: + - common + bgp: + router-id: 0.12.0.199 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::319 + interfaces: + Loopback0: + ipv6: fc00:c:c:c7::1/128 + Ethernet1: + ipv6: fc00:a::31a/126 + bp_interface: + ipv6: fc00:b::c7/64 + + ARISTA198T1: + properties: + - common + bgp: + router-id: 0.12.0.200 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::31d + interfaces: + Loopback0: + ipv6: fc00:c:c:c8::1/128 + Ethernet1: + ipv6: fc00:a::31e/126 + bp_interface: + ipv6: fc00:b::c8/64 + + ARISTA199T1: + properties: + - common + bgp: + router-id: 0.12.0.201 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::321 + interfaces: + Loopback0: + ipv6: fc00:c:c:c9::1/128 + Ethernet1: + ipv6: fc00:a::322/126 + bp_interface: + ipv6: fc00:b::c9/64 + + ARISTA200T1: + properties: + - common + bgp: + router-id: 0.12.0.202 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::325 + interfaces: + Loopback0: + ipv6: fc00:c:c:ca::1/128 + Ethernet1: + ipv6: fc00:a::326/126 + bp_interface: + ipv6: fc00:b::ca/64 + + ARISTA201T1: + properties: + - common + bgp: + router-id: 0.12.0.203 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::329 + interfaces: + Loopback0: + ipv6: fc00:c:c:cb::1/128 + Ethernet1: + ipv6: fc00:a::32a/126 + bp_interface: + ipv6: fc00:b::cb/64 + + ARISTA202T1: + properties: + - common + bgp: + router-id: 0.12.0.204 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::32d + interfaces: + Loopback0: + ipv6: fc00:c:c:cc::1/128 + Ethernet1: + ipv6: fc00:a::32e/126 + bp_interface: + ipv6: fc00:b::cc/64 + + ARISTA203T1: + properties: + - common + bgp: + router-id: 0.12.0.205 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::331 + interfaces: + Loopback0: + ipv6: fc00:c:c:cd::1/128 + Ethernet1: + ipv6: fc00:a::332/126 + bp_interface: + ipv6: fc00:b::cd/64 + + ARISTA204T1: + properties: + - common + bgp: + router-id: 0.12.0.206 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::335 + interfaces: + Loopback0: + ipv6: fc00:c:c:ce::1/128 + Ethernet1: + ipv6: fc00:a::336/126 + bp_interface: + ipv6: fc00:b::ce/64 + + ARISTA205T1: + properties: + - common + bgp: + router-id: 0.12.0.207 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::339 + interfaces: + Loopback0: + ipv6: fc00:c:c:cf::1/128 + Ethernet1: + ipv6: fc00:a::33a/126 + bp_interface: + ipv6: fc00:b::cf/64 + + ARISTA206T1: + properties: + - common + bgp: + router-id: 0.12.0.208 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::33d + interfaces: + Loopback0: + ipv6: fc00:c:c:d0::1/128 + Ethernet1: + ipv6: fc00:a::33e/126 + bp_interface: + ipv6: fc00:b::d0/64 + + ARISTA207T1: + properties: + - common + bgp: + router-id: 0.12.0.209 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::341 + interfaces: + Loopback0: + ipv6: fc00:c:c:d1::1/128 + Ethernet1: + ipv6: fc00:a::342/126 + bp_interface: + ipv6: fc00:b::d1/64 + + ARISTA208T1: + properties: + - common + bgp: + router-id: 0.12.0.210 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::345 + interfaces: + Loopback0: + ipv6: fc00:c:c:d2::1/128 + Ethernet1: + ipv6: fc00:a::346/126 + bp_interface: + ipv6: fc00:b::d2/64 + + ARISTA209T1: + properties: + - common + bgp: + router-id: 0.12.0.211 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::349 + interfaces: + Loopback0: + ipv6: fc00:c:c:d3::1/128 + Ethernet1: + ipv6: fc00:a::34a/126 + bp_interface: + ipv6: fc00:b::d3/64 + + ARISTA210T1: + properties: + - common + bgp: + router-id: 0.12.0.212 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::34d + interfaces: + Loopback0: + ipv6: fc00:c:c:d4::1/128 + Ethernet1: + ipv6: fc00:a::34e/126 + bp_interface: + ipv6: fc00:b::d4/64 + + ARISTA211T1: + properties: + - common + bgp: + router-id: 0.12.0.213 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::351 + interfaces: + Loopback0: + ipv6: fc00:c:c:d5::1/128 + Ethernet1: + ipv6: fc00:a::352/126 + bp_interface: + ipv6: fc00:b::d5/64 + + ARISTA212T1: + properties: + - common + bgp: + router-id: 0.12.0.214 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::355 + interfaces: + Loopback0: + ipv6: fc00:c:c:d6::1/128 + Ethernet1: + ipv6: fc00:a::356/126 + bp_interface: + ipv6: fc00:b::d6/64 + + ARISTA213T1: + properties: + - common + bgp: + router-id: 0.12.0.215 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::359 + interfaces: + Loopback0: + ipv6: fc00:c:c:d7::1/128 + Ethernet1: + ipv6: fc00:a::35a/126 + bp_interface: + ipv6: fc00:b::d7/64 + + ARISTA214T1: + properties: + - common + bgp: + router-id: 0.12.0.216 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::35d + interfaces: + Loopback0: + ipv6: fc00:c:c:d8::1/128 + Ethernet1: + ipv6: fc00:a::35e/126 + bp_interface: + ipv6: fc00:b::d8/64 + + ARISTA215T1: + properties: + - common + bgp: + router-id: 0.12.0.217 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::361 + interfaces: + Loopback0: + ipv6: fc00:c:c:d9::1/128 + Ethernet1: + ipv6: fc00:a::362/126 + bp_interface: + ipv6: fc00:b::d9/64 + + ARISTA216T1: + properties: + - common + bgp: + router-id: 0.12.0.218 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::365 + interfaces: + Loopback0: + ipv6: fc00:c:c:da::1/128 + Ethernet1: + ipv6: fc00:a::366/126 + bp_interface: + ipv6: fc00:b::da/64 + + ARISTA217T1: + properties: + - common + bgp: + router-id: 0.12.0.219 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::369 + interfaces: + Loopback0: + ipv6: fc00:c:c:db::1/128 + Ethernet1: + ipv6: fc00:a::36a/126 + bp_interface: + ipv6: fc00:b::db/64 + + ARISTA218T1: + properties: + - common + bgp: + router-id: 0.12.0.220 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::36d + interfaces: + Loopback0: + ipv6: fc00:c:c:dc::1/128 + Ethernet1: + ipv6: fc00:a::36e/126 + bp_interface: + ipv6: fc00:b::dc/64 + + ARISTA219T1: + properties: + - common + bgp: + router-id: 0.12.0.221 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::371 + interfaces: + Loopback0: + ipv6: fc00:c:c:dd::1/128 + Ethernet1: + ipv6: fc00:a::372/126 + bp_interface: + ipv6: fc00:b::dd/64 + + ARISTA220T1: + properties: + - common + bgp: + router-id: 0.12.0.222 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::375 + interfaces: + Loopback0: + ipv6: fc00:c:c:de::1/128 + Ethernet1: + ipv6: fc00:a::376/126 + bp_interface: + ipv6: fc00:b::de/64 + + ARISTA221T1: + properties: + - common + bgp: + router-id: 0.12.0.223 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::379 + interfaces: + Loopback0: + ipv6: fc00:c:c:df::1/128 + Ethernet1: + ipv6: fc00:a::37a/126 + bp_interface: + ipv6: fc00:b::df/64 + + ARISTA222T1: + properties: + - common + bgp: + router-id: 0.12.0.224 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::37d + interfaces: + Loopback0: + ipv6: fc00:c:c:e0::1/128 + Ethernet1: + ipv6: fc00:a::37e/126 + bp_interface: + ipv6: fc00:b::e0/64 + + ARISTA223T1: + properties: + - common + bgp: + router-id: 0.12.0.225 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::381 + interfaces: + Loopback0: + ipv6: fc00:c:c:e1::1/128 + Ethernet1: + ipv6: fc00:a::382/126 + bp_interface: + ipv6: fc00:b::e1/64 + + ARISTA224T1: + properties: + - common + bgp: + router-id: 0.12.0.226 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::385 + interfaces: + Loopback0: + ipv6: fc00:c:c:e2::1/128 + Ethernet1: + ipv6: fc00:a::386/126 + bp_interface: + ipv6: fc00:b::e2/64 + + ARISTA225T1: + properties: + - common + bgp: + router-id: 0.12.0.227 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::389 + interfaces: + Loopback0: + ipv6: fc00:c:c:e3::1/128 + Ethernet1: + ipv6: fc00:a::38a/126 + bp_interface: + ipv6: fc00:b::e3/64 + + ARISTA226T1: + properties: + - common + bgp: + router-id: 0.12.0.228 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::38d + interfaces: + Loopback0: + ipv6: fc00:c:c:e4::1/128 + Ethernet1: + ipv6: fc00:a::38e/126 + bp_interface: + ipv6: fc00:b::e4/64 + + ARISTA227T1: + properties: + - common + bgp: + router-id: 0.12.0.229 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::391 + interfaces: + Loopback0: + ipv6: fc00:c:c:e5::1/128 + Ethernet1: + ipv6: fc00:a::392/126 + bp_interface: + ipv6: fc00:b::e5/64 + + ARISTA228T1: + properties: + - common + bgp: + router-id: 0.12.0.230 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::395 + interfaces: + Loopback0: + ipv6: fc00:c:c:e6::1/128 + Ethernet1: + ipv6: fc00:a::396/126 + bp_interface: + ipv6: fc00:b::e6/64 + + ARISTA229T1: + properties: + - common + bgp: + router-id: 0.12.0.231 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::399 + interfaces: + Loopback0: + ipv6: fc00:c:c:e7::1/128 + Ethernet1: + ipv6: fc00:a::39a/126 + bp_interface: + ipv6: fc00:b::e7/64 + + ARISTA230T1: + properties: + - common + bgp: + router-id: 0.12.0.232 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::39d + interfaces: + Loopback0: + ipv6: fc00:c:c:e8::1/128 + Ethernet1: + ipv6: fc00:a::39e/126 + bp_interface: + ipv6: fc00:b::e8/64 + + ARISTA231T1: + properties: + - common + bgp: + router-id: 0.12.0.233 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:e9::1/128 + Ethernet1: + ipv6: fc00:a::3a2/126 + bp_interface: + ipv6: fc00:b::e9/64 + + ARISTA232T1: + properties: + - common + bgp: + router-id: 0.12.0.234 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ea::1/128 + Ethernet1: + ipv6: fc00:a::3a6/126 + bp_interface: + ipv6: fc00:b::ea/64 + + ARISTA233T1: + properties: + - common + bgp: + router-id: 0.12.0.235 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:eb::1/128 + Ethernet1: + ipv6: fc00:a::3aa/126 + bp_interface: + ipv6: fc00:b::eb/64 + + ARISTA234T1: + properties: + - common + bgp: + router-id: 0.12.0.236 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ec::1/128 + Ethernet1: + ipv6: fc00:a::3ae/126 + bp_interface: + ipv6: fc00:b::ec/64 + + ARISTA235T1: + properties: + - common + bgp: + router-id: 0.12.0.237 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ed::1/128 + Ethernet1: + ipv6: fc00:a::3b2/126 + bp_interface: + ipv6: fc00:b::ed/64 + + ARISTA236T1: + properties: + - common + bgp: + router-id: 0.12.0.238 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ee::1/128 + Ethernet1: + ipv6: fc00:a::3b6/126 + bp_interface: + ipv6: fc00:b::ee/64 + + ARISTA237T1: + properties: + - common + bgp: + router-id: 0.12.0.239 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ef::1/128 + Ethernet1: + ipv6: fc00:a::3ba/126 + bp_interface: + ipv6: fc00:b::ef/64 + + ARISTA238T1: + properties: + - common + bgp: + router-id: 0.12.0.240 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3bd + interfaces: + Loopback0: + ipv6: fc00:c:c:f0::1/128 + Ethernet1: + ipv6: fc00:a::3be/126 + bp_interface: + ipv6: fc00:b::f0/64 + + ARISTA239T1: + properties: + - common + bgp: + router-id: 0.12.0.241 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f1::1/128 + Ethernet1: + ipv6: fc00:a::3c2/126 + bp_interface: + ipv6: fc00:b::f1/64 + + ARISTA240T1: + properties: + - common + bgp: + router-id: 0.12.0.242 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f2::1/128 + Ethernet1: + ipv6: fc00:a::3c6/126 + bp_interface: + ipv6: fc00:b::f2/64 + + ARISTA241T1: + properties: + - common + bgp: + router-id: 0.12.0.243 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f3::1/128 + Ethernet1: + ipv6: fc00:a::3ca/126 + bp_interface: + ipv6: fc00:b::f3/64 + + ARISTA242T1: + properties: + - common + bgp: + router-id: 0.12.0.244 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3cd + interfaces: + Loopback0: + ipv6: fc00:c:c:f4::1/128 + Ethernet1: + ipv6: fc00:a::3ce/126 + bp_interface: + ipv6: fc00:b::f4/64 + + ARISTA243T1: + properties: + - common + bgp: + router-id: 0.12.0.245 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f5::1/128 + Ethernet1: + ipv6: fc00:a::3d2/126 + bp_interface: + ipv6: fc00:b::f5/64 + + ARISTA244T1: + properties: + - common + bgp: + router-id: 0.12.0.246 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f6::1/128 + Ethernet1: + ipv6: fc00:a::3d6/126 + bp_interface: + ipv6: fc00:b::f6/64 + + ARISTA245T1: + properties: + - common + bgp: + router-id: 0.12.0.247 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f7::1/128 + Ethernet1: + ipv6: fc00:a::3da/126 + bp_interface: + ipv6: fc00:b::f7/64 + + ARISTA246T1: + properties: + - common + bgp: + router-id: 0.12.0.248 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3dd + interfaces: + Loopback0: + ipv6: fc00:c:c:f8::1/128 + Ethernet1: + ipv6: fc00:a::3de/126 + bp_interface: + ipv6: fc00:b::f8/64 + + ARISTA247T1: + properties: + - common + bgp: + router-id: 0.12.0.249 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f9::1/128 + Ethernet1: + ipv6: fc00:a::3e2/126 + bp_interface: + ipv6: fc00:b::f9/64 + + ARISTA248T1: + properties: + - common + bgp: + router-id: 0.12.0.250 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fa::1/128 + Ethernet1: + ipv6: fc00:a::3e6/126 + bp_interface: + ipv6: fc00:b::fa/64 + + ARISTA249T1: + properties: + - common + bgp: + router-id: 0.12.0.251 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:fb::1/128 + Ethernet1: + ipv6: fc00:a::3ea/126 + bp_interface: + ipv6: fc00:b::fb/64 + + ARISTA250T1: + properties: + - common + bgp: + router-id: 0.12.0.252 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3ed + interfaces: + Loopback0: + ipv6: fc00:c:c:fc::1/128 + Ethernet1: + ipv6: fc00:a::3ee/126 + bp_interface: + ipv6: fc00:b::fc/64 + + ARISTA251T1: + properties: + - common + bgp: + router-id: 0.12.0.253 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:fd::1/128 + Ethernet1: + ipv6: fc00:a::3f2/126 + bp_interface: + ipv6: fc00:b::fd/64 + + ARISTA252T1: + properties: + - common + bgp: + router-id: 0.12.0.254 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fe::1/128 + Ethernet1: + ipv6: fc00:a::3f6/126 + bp_interface: + ipv6: fc00:b::fe/64 + + ARISTA253T1: + properties: + - common + bgp: + router-id: 0.12.0.255 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ff::1/128 + Ethernet1: + ipv6: fc00:a::3fa/126 + bp_interface: + ipv6: fc00:b::ff/64 + + ARISTA254T1: + properties: + - common + bgp: + router-id: 0.12.1.0 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::3fd + interfaces: + Loopback0: + ipv6: fc00:c:c:100::1/128 + Ethernet1: + ipv6: fc00:a::3fe/126 + bp_interface: + ipv6: fc00:b::100/64 + + ARISTA255T1: + properties: + - common + bgp: + router-id: 0.12.1.1 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::401 + interfaces: + Loopback0: + ipv6: fc00:c:c:101::1/128 + Ethernet1: + ipv6: fc00:a::402/126 + bp_interface: + ipv6: fc00:b::101/64 + + ARISTA256T1: + properties: + - common + bgp: + router-id: 0.12.1.2 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::405 + interfaces: + Loopback0: + ipv6: fc00:c:c:102::1/128 + Ethernet1: + ipv6: fc00:a::406/126 + bp_interface: + ipv6: fc00:b::102/64 + + ARISTA257T1: + properties: + - common + bgp: + router-id: 0.12.1.3 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::409 + interfaces: + Loopback0: + ipv6: fc00:c:c:103::1/128 + Ethernet1: + ipv6: fc00:a::40a/126 + bp_interface: + ipv6: fc00:b::103/64 + + ARISTA258T1: + properties: + - common + bgp: + router-id: 0.12.1.4 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::40d + interfaces: + Loopback0: + ipv6: fc00:c:c:104::1/128 + Ethernet1: + ipv6: fc00:a::40e/126 + bp_interface: + ipv6: fc00:b::104/64 + + ARISTA259T1: + properties: + - common + bgp: + router-id: 0.12.1.5 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::411 + interfaces: + Loopback0: + ipv6: fc00:c:c:105::1/128 + Ethernet1: + ipv6: fc00:a::412/126 + bp_interface: + ipv6: fc00:b::105/64 + + ARISTA260T1: + properties: + - common + bgp: + router-id: 0.12.1.6 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::415 + interfaces: + Loopback0: + ipv6: fc00:c:c:106::1/128 + Ethernet1: + ipv6: fc00:a::416/126 + bp_interface: + ipv6: fc00:b::106/64 + + ARISTA261T1: + properties: + - common + bgp: + router-id: 0.12.1.7 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::419 + interfaces: + Loopback0: + ipv6: fc00:c:c:107::1/128 + Ethernet1: + ipv6: fc00:a::41a/126 + bp_interface: + ipv6: fc00:b::107/64 + + ARISTA262T1: + properties: + - common + bgp: + router-id: 0.12.1.8 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::41d + interfaces: + Loopback0: + ipv6: fc00:c:c:108::1/128 + Ethernet1: + ipv6: fc00:a::41e/126 + bp_interface: + ipv6: fc00:b::108/64 + + ARISTA263T1: + properties: + - common + bgp: + router-id: 0.12.1.9 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::421 + interfaces: + Loopback0: + ipv6: fc00:c:c:109::1/128 + Ethernet1: + ipv6: fc00:a::422/126 + bp_interface: + ipv6: fc00:b::109/64 + + ARISTA264T1: + properties: + - common + bgp: + router-id: 0.12.1.10 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::425 + interfaces: + Loopback0: + ipv6: fc00:c:c:10a::1/128 + Ethernet1: + ipv6: fc00:a::426/126 + bp_interface: + ipv6: fc00:b::10a/64 + + ARISTA265T1: + properties: + - common + bgp: + router-id: 0.12.1.11 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::429 + interfaces: + Loopback0: + ipv6: fc00:c:c:10b::1/128 + Ethernet1: + ipv6: fc00:a::42a/126 + bp_interface: + ipv6: fc00:b::10b/64 + + ARISTA266T1: + properties: + - common + bgp: + router-id: 0.12.1.12 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::42d + interfaces: + Loopback0: + ipv6: fc00:c:c:10c::1/128 + Ethernet1: + ipv6: fc00:a::42e/126 + bp_interface: + ipv6: fc00:b::10c/64 + + ARISTA267T1: + properties: + - common + bgp: + router-id: 0.12.1.13 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::431 + interfaces: + Loopback0: + ipv6: fc00:c:c:10d::1/128 + Ethernet1: + ipv6: fc00:a::432/126 + bp_interface: + ipv6: fc00:b::10d/64 + + ARISTA268T1: + properties: + - common + bgp: + router-id: 0.12.1.14 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::435 + interfaces: + Loopback0: + ipv6: fc00:c:c:10e::1/128 + Ethernet1: + ipv6: fc00:a::436/126 + bp_interface: + ipv6: fc00:b::10e/64 + + ARISTA269T1: + properties: + - common + bgp: + router-id: 0.12.1.15 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::439 + interfaces: + Loopback0: + ipv6: fc00:c:c:10f::1/128 + Ethernet1: + ipv6: fc00:a::43a/126 + bp_interface: + ipv6: fc00:b::10f/64 + + ARISTA270T1: + properties: + - common + bgp: + router-id: 0.12.1.16 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::43d + interfaces: + Loopback0: + ipv6: fc00:c:c:110::1/128 + Ethernet1: + ipv6: fc00:a::43e/126 + bp_interface: + ipv6: fc00:b::110/64 + + ARISTA271T1: + properties: + - common + bgp: + router-id: 0.12.1.17 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::441 + interfaces: + Loopback0: + ipv6: fc00:c:c:111::1/128 + Ethernet1: + ipv6: fc00:a::442/126 + bp_interface: + ipv6: fc00:b::111/64 + + ARISTA272T1: + properties: + - common + bgp: + router-id: 0.12.1.18 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::445 + interfaces: + Loopback0: + ipv6: fc00:c:c:112::1/128 + Ethernet1: + ipv6: fc00:a::446/126 + bp_interface: + ipv6: fc00:b::112/64 + + ARISTA273T1: + properties: + - common + bgp: + router-id: 0.12.1.19 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::449 + interfaces: + Loopback0: + ipv6: fc00:c:c:113::1/128 + Ethernet1: + ipv6: fc00:a::44a/126 + bp_interface: + ipv6: fc00:b::113/64 + + ARISTA274T1: + properties: + - common + bgp: + router-id: 0.12.1.20 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::44d + interfaces: + Loopback0: + ipv6: fc00:c:c:114::1/128 + Ethernet1: + ipv6: fc00:a::44e/126 + bp_interface: + ipv6: fc00:b::114/64 + + ARISTA275T1: + properties: + - common + bgp: + router-id: 0.12.1.21 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::451 + interfaces: + Loopback0: + ipv6: fc00:c:c:115::1/128 + Ethernet1: + ipv6: fc00:a::452/126 + bp_interface: + ipv6: fc00:b::115/64 + + ARISTA276T1: + properties: + - common + bgp: + router-id: 0.12.1.22 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::455 + interfaces: + Loopback0: + ipv6: fc00:c:c:116::1/128 + Ethernet1: + ipv6: fc00:a::456/126 + bp_interface: + ipv6: fc00:b::116/64 + + ARISTA277T1: + properties: + - common + bgp: + router-id: 0.12.1.23 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::459 + interfaces: + Loopback0: + ipv6: fc00:c:c:117::1/128 + Ethernet1: + ipv6: fc00:a::45a/126 + bp_interface: + ipv6: fc00:b::117/64 + + ARISTA278T1: + properties: + - common + bgp: + router-id: 0.12.1.24 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::45d + interfaces: + Loopback0: + ipv6: fc00:c:c:118::1/128 + Ethernet1: + ipv6: fc00:a::45e/126 + bp_interface: + ipv6: fc00:b::118/64 + + ARISTA279T1: + properties: + - common + bgp: + router-id: 0.12.1.25 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::461 + interfaces: + Loopback0: + ipv6: fc00:c:c:119::1/128 + Ethernet1: + ipv6: fc00:a::462/126 + bp_interface: + ipv6: fc00:b::119/64 + + ARISTA280T1: + properties: + - common + bgp: + router-id: 0.12.1.26 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::465 + interfaces: + Loopback0: + ipv6: fc00:c:c:11a::1/128 + Ethernet1: + ipv6: fc00:a::466/126 + bp_interface: + ipv6: fc00:b::11a/64 + + ARISTA281T1: + properties: + - common + bgp: + router-id: 0.12.1.27 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::469 + interfaces: + Loopback0: + ipv6: fc00:c:c:11b::1/128 + Ethernet1: + ipv6: fc00:a::46a/126 + bp_interface: + ipv6: fc00:b::11b/64 + + ARISTA282T1: + properties: + - common + bgp: + router-id: 0.12.1.28 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::46d + interfaces: + Loopback0: + ipv6: fc00:c:c:11c::1/128 + Ethernet1: + ipv6: fc00:a::46e/126 + bp_interface: + ipv6: fc00:b::11c/64 + + ARISTA283T1: + properties: + - common + bgp: + router-id: 0.12.1.29 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::471 + interfaces: + Loopback0: + ipv6: fc00:c:c:11d::1/128 + Ethernet1: + ipv6: fc00:a::472/126 + bp_interface: + ipv6: fc00:b::11d/64 + + ARISTA284T1: + properties: + - common + bgp: + router-id: 0.12.1.30 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::475 + interfaces: + Loopback0: + ipv6: fc00:c:c:11e::1/128 + Ethernet1: + ipv6: fc00:a::476/126 + bp_interface: + ipv6: fc00:b::11e/64 + + ARISTA285T1: + properties: + - common + bgp: + router-id: 0.12.1.31 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::479 + interfaces: + Loopback0: + ipv6: fc00:c:c:11f::1/128 + Ethernet1: + ipv6: fc00:a::47a/126 + bp_interface: + ipv6: fc00:b::11f/64 + + ARISTA286T1: + properties: + - common + bgp: + router-id: 0.12.1.32 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::47d + interfaces: + Loopback0: + ipv6: fc00:c:c:120::1/128 + Ethernet1: + ipv6: fc00:a::47e/126 + bp_interface: + ipv6: fc00:b::120/64 + + ARISTA287T1: + properties: + - common + bgp: + router-id: 0.12.1.33 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::481 + interfaces: + Loopback0: + ipv6: fc00:c:c:121::1/128 + Ethernet1: + ipv6: fc00:a::482/126 + bp_interface: + ipv6: fc00:b::121/64 + + ARISTA288T1: + properties: + - common + bgp: + router-id: 0.12.1.34 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::485 + interfaces: + Loopback0: + ipv6: fc00:c:c:122::1/128 + Ethernet1: + ipv6: fc00:a::486/126 + bp_interface: + ipv6: fc00:b::122/64 + + ARISTA289T1: + properties: + - common + bgp: + router-id: 0.12.1.35 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::489 + interfaces: + Loopback0: + ipv6: fc00:c:c:123::1/128 + Ethernet1: + ipv6: fc00:a::48a/126 + bp_interface: + ipv6: fc00:b::123/64 + + ARISTA290T1: + properties: + - common + bgp: + router-id: 0.12.1.36 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::48d + interfaces: + Loopback0: + ipv6: fc00:c:c:124::1/128 + Ethernet1: + ipv6: fc00:a::48e/126 + bp_interface: + ipv6: fc00:b::124/64 + + ARISTA291T1: + properties: + - common + bgp: + router-id: 0.12.1.37 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::491 + interfaces: + Loopback0: + ipv6: fc00:c:c:125::1/128 + Ethernet1: + ipv6: fc00:a::492/126 + bp_interface: + ipv6: fc00:b::125/64 + + ARISTA292T1: + properties: + - common + bgp: + router-id: 0.12.1.38 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::495 + interfaces: + Loopback0: + ipv6: fc00:c:c:126::1/128 + Ethernet1: + ipv6: fc00:a::496/126 + bp_interface: + ipv6: fc00:b::126/64 + + ARISTA293T1: + properties: + - common + bgp: + router-id: 0.12.1.39 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::499 + interfaces: + Loopback0: + ipv6: fc00:c:c:127::1/128 + Ethernet1: + ipv6: fc00:a::49a/126 + bp_interface: + ipv6: fc00:b::127/64 + + ARISTA294T1: + properties: + - common + bgp: + router-id: 0.12.1.40 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::49d + interfaces: + Loopback0: + ipv6: fc00:c:c:128::1/128 + Ethernet1: + ipv6: fc00:a::49e/126 + bp_interface: + ipv6: fc00:b::128/64 + + ARISTA295T1: + properties: + - common + bgp: + router-id: 0.12.1.41 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:129::1/128 + Ethernet1: + ipv6: fc00:a::4a2/126 + bp_interface: + ipv6: fc00:b::129/64 + + ARISTA296T1: + properties: + - common + bgp: + router-id: 0.12.1.42 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:12a::1/128 + Ethernet1: + ipv6: fc00:a::4a6/126 + bp_interface: + ipv6: fc00:b::12a/64 + + ARISTA297T1: + properties: + - common + bgp: + router-id: 0.12.1.43 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:12b::1/128 + Ethernet1: + ipv6: fc00:a::4aa/126 + bp_interface: + ipv6: fc00:b::12b/64 + + ARISTA298T1: + properties: + - common + bgp: + router-id: 0.12.1.44 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4ad + interfaces: + Loopback0: + ipv6: fc00:c:c:12c::1/128 + Ethernet1: + ipv6: fc00:a::4ae/126 + bp_interface: + ipv6: fc00:b::12c/64 + + ARISTA299T1: + properties: + - common + bgp: + router-id: 0.12.1.45 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:12d::1/128 + Ethernet1: + ipv6: fc00:a::4b2/126 + bp_interface: + ipv6: fc00:b::12d/64 + + ARISTA300T1: + properties: + - common + bgp: + router-id: 0.12.1.46 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:12e::1/128 + Ethernet1: + ipv6: fc00:a::4b6/126 + bp_interface: + ipv6: fc00:b::12e/64 + + ARISTA301T1: + properties: + - common + bgp: + router-id: 0.12.1.47 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:12f::1/128 + Ethernet1: + ipv6: fc00:a::4ba/126 + bp_interface: + ipv6: fc00:b::12f/64 + + ARISTA302T1: + properties: + - common + bgp: + router-id: 0.12.1.48 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4bd + interfaces: + Loopback0: + ipv6: fc00:c:c:130::1/128 + Ethernet1: + ipv6: fc00:a::4be/126 + bp_interface: + ipv6: fc00:b::130/64 + + ARISTA303T1: + properties: + - common + bgp: + router-id: 0.12.1.49 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:131::1/128 + Ethernet1: + ipv6: fc00:a::4c2/126 + bp_interface: + ipv6: fc00:b::131/64 + + ARISTA304T1: + properties: + - common + bgp: + router-id: 0.12.1.50 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:132::1/128 + Ethernet1: + ipv6: fc00:a::4c6/126 + bp_interface: + ipv6: fc00:b::132/64 + + ARISTA305T1: + properties: + - common + bgp: + router-id: 0.12.1.51 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:133::1/128 + Ethernet1: + ipv6: fc00:a::4ca/126 + bp_interface: + ipv6: fc00:b::133/64 + + ARISTA306T1: + properties: + - common + bgp: + router-id: 0.12.1.52 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4cd + interfaces: + Loopback0: + ipv6: fc00:c:c:134::1/128 + Ethernet1: + ipv6: fc00:a::4ce/126 + bp_interface: + ipv6: fc00:b::134/64 + + ARISTA307T1: + properties: + - common + bgp: + router-id: 0.12.1.53 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:135::1/128 + Ethernet1: + ipv6: fc00:a::4d2/126 + bp_interface: + ipv6: fc00:b::135/64 + + ARISTA308T1: + properties: + - common + bgp: + router-id: 0.12.1.54 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:136::1/128 + Ethernet1: + ipv6: fc00:a::4d6/126 + bp_interface: + ipv6: fc00:b::136/64 + + ARISTA309T1: + properties: + - common + bgp: + router-id: 0.12.1.55 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:137::1/128 + Ethernet1: + ipv6: fc00:a::4da/126 + bp_interface: + ipv6: fc00:b::137/64 + + ARISTA310T1: + properties: + - common + bgp: + router-id: 0.12.1.56 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4dd + interfaces: + Loopback0: + ipv6: fc00:c:c:138::1/128 + Ethernet1: + ipv6: fc00:a::4de/126 + bp_interface: + ipv6: fc00:b::138/64 + + ARISTA311T1: + properties: + - common + bgp: + router-id: 0.12.1.57 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:139::1/128 + Ethernet1: + ipv6: fc00:a::4e2/126 + bp_interface: + ipv6: fc00:b::139/64 + + ARISTA312T1: + properties: + - common + bgp: + router-id: 0.12.1.58 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:13a::1/128 + Ethernet1: + ipv6: fc00:a::4e6/126 + bp_interface: + ipv6: fc00:b::13a/64 + + ARISTA313T1: + properties: + - common + bgp: + router-id: 0.12.1.59 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:13b::1/128 + Ethernet1: + ipv6: fc00:a::4ea/126 + bp_interface: + ipv6: fc00:b::13b/64 + + ARISTA314T1: + properties: + - common + bgp: + router-id: 0.12.1.60 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4ed + interfaces: + Loopback0: + ipv6: fc00:c:c:13c::1/128 + Ethernet1: + ipv6: fc00:a::4ee/126 + bp_interface: + ipv6: fc00:b::13c/64 + + ARISTA315T1: + properties: + - common + bgp: + router-id: 0.12.1.61 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:13d::1/128 + Ethernet1: + ipv6: fc00:a::4f2/126 + bp_interface: + ipv6: fc00:b::13d/64 + + ARISTA316T1: + properties: + - common + bgp: + router-id: 0.12.1.62 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:13e::1/128 + Ethernet1: + ipv6: fc00:a::4f6/126 + bp_interface: + ipv6: fc00:b::13e/64 + + ARISTA317T1: + properties: + - common + bgp: + router-id: 0.12.1.63 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:13f::1/128 + Ethernet1: + ipv6: fc00:a::4fa/126 + bp_interface: + ipv6: fc00:b::13f/64 + + ARISTA318T1: + properties: + - common + bgp: + router-id: 0.12.1.64 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::4fd + interfaces: + Loopback0: + ipv6: fc00:c:c:140::1/128 + Ethernet1: + ipv6: fc00:a::4fe/126 + bp_interface: + ipv6: fc00:b::140/64 + + ARISTA319T1: + properties: + - common + bgp: + router-id: 0.12.1.65 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::501 + interfaces: + Loopback0: + ipv6: fc00:c:c:141::1/128 + Ethernet1: + ipv6: fc00:a::502/126 + bp_interface: + ipv6: fc00:b::141/64 + + ARISTA320T1: + properties: + - common + bgp: + router-id: 0.12.1.66 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::505 + interfaces: + Loopback0: + ipv6: fc00:c:c:142::1/128 + Ethernet1: + ipv6: fc00:a::506/126 + bp_interface: + ipv6: fc00:b::142/64 + + ARISTA321T1: + properties: + - common + bgp: + router-id: 0.12.1.67 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::509 + interfaces: + Loopback0: + ipv6: fc00:c:c:143::1/128 + Ethernet1: + ipv6: fc00:a::50a/126 + bp_interface: + ipv6: fc00:b::143/64 + + ARISTA322T1: + properties: + - common + bgp: + router-id: 0.12.1.68 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::50d + interfaces: + Loopback0: + ipv6: fc00:c:c:144::1/128 + Ethernet1: + ipv6: fc00:a::50e/126 + bp_interface: + ipv6: fc00:b::144/64 + + ARISTA323T1: + properties: + - common + bgp: + router-id: 0.12.1.69 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::511 + interfaces: + Loopback0: + ipv6: fc00:c:c:145::1/128 + Ethernet1: + ipv6: fc00:a::512/126 + bp_interface: + ipv6: fc00:b::145/64 + + ARISTA324T1: + properties: + - common + bgp: + router-id: 0.12.1.70 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::515 + interfaces: + Loopback0: + ipv6: fc00:c:c:146::1/128 + Ethernet1: + ipv6: fc00:a::516/126 + bp_interface: + ipv6: fc00:b::146/64 + + ARISTA325T1: + properties: + - common + bgp: + router-id: 0.12.1.71 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::519 + interfaces: + Loopback0: + ipv6: fc00:c:c:147::1/128 + Ethernet1: + ipv6: fc00:a::51a/126 + bp_interface: + ipv6: fc00:b::147/64 + + ARISTA326T1: + properties: + - common + bgp: + router-id: 0.12.1.72 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::51d + interfaces: + Loopback0: + ipv6: fc00:c:c:148::1/128 + Ethernet1: + ipv6: fc00:a::51e/126 + bp_interface: + ipv6: fc00:b::148/64 + + ARISTA327T1: + properties: + - common + bgp: + router-id: 0.12.1.73 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::521 + interfaces: + Loopback0: + ipv6: fc00:c:c:149::1/128 + Ethernet1: + ipv6: fc00:a::522/126 + bp_interface: + ipv6: fc00:b::149/64 + + ARISTA328T1: + properties: + - common + bgp: + router-id: 0.12.1.74 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::525 + interfaces: + Loopback0: + ipv6: fc00:c:c:14a::1/128 + Ethernet1: + ipv6: fc00:a::526/126 + bp_interface: + ipv6: fc00:b::14a/64 + + ARISTA329T1: + properties: + - common + bgp: + router-id: 0.12.1.75 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::529 + interfaces: + Loopback0: + ipv6: fc00:c:c:14b::1/128 + Ethernet1: + ipv6: fc00:a::52a/126 + bp_interface: + ipv6: fc00:b::14b/64 + + ARISTA330T1: + properties: + - common + bgp: + router-id: 0.12.1.76 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::52d + interfaces: + Loopback0: + ipv6: fc00:c:c:14c::1/128 + Ethernet1: + ipv6: fc00:a::52e/126 + bp_interface: + ipv6: fc00:b::14c/64 + + ARISTA331T1: + properties: + - common + bgp: + router-id: 0.12.1.77 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::531 + interfaces: + Loopback0: + ipv6: fc00:c:c:14d::1/128 + Ethernet1: + ipv6: fc00:a::532/126 + bp_interface: + ipv6: fc00:b::14d/64 + + ARISTA332T1: + properties: + - common + bgp: + router-id: 0.12.1.78 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::535 + interfaces: + Loopback0: + ipv6: fc00:c:c:14e::1/128 + Ethernet1: + ipv6: fc00:a::536/126 + bp_interface: + ipv6: fc00:b::14e/64 + + ARISTA333T1: + properties: + - common + bgp: + router-id: 0.12.1.79 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::539 + interfaces: + Loopback0: + ipv6: fc00:c:c:14f::1/128 + Ethernet1: + ipv6: fc00:a::53a/126 + bp_interface: + ipv6: fc00:b::14f/64 + + ARISTA334T1: + properties: + - common + bgp: + router-id: 0.12.1.80 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::53d + interfaces: + Loopback0: + ipv6: fc00:c:c:150::1/128 + Ethernet1: + ipv6: fc00:a::53e/126 + bp_interface: + ipv6: fc00:b::150/64 + + ARISTA335T1: + properties: + - common + bgp: + router-id: 0.12.1.81 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::541 + interfaces: + Loopback0: + ipv6: fc00:c:c:151::1/128 + Ethernet1: + ipv6: fc00:a::542/126 + bp_interface: + ipv6: fc00:b::151/64 + + ARISTA336T1: + properties: + - common + bgp: + router-id: 0.12.1.82 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::545 + interfaces: + Loopback0: + ipv6: fc00:c:c:152::1/128 + Ethernet1: + ipv6: fc00:a::546/126 + bp_interface: + ipv6: fc00:b::152/64 + + ARISTA337T1: + properties: + - common + bgp: + router-id: 0.12.1.83 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::549 + interfaces: + Loopback0: + ipv6: fc00:c:c:153::1/128 + Ethernet1: + ipv6: fc00:a::54a/126 + bp_interface: + ipv6: fc00:b::153/64 + + ARISTA338T1: + properties: + - common + bgp: + router-id: 0.12.1.84 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::54d + interfaces: + Loopback0: + ipv6: fc00:c:c:154::1/128 + Ethernet1: + ipv6: fc00:a::54e/126 + bp_interface: + ipv6: fc00:b::154/64 + + ARISTA339T1: + properties: + - common + bgp: + router-id: 0.12.1.85 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::551 + interfaces: + Loopback0: + ipv6: fc00:c:c:155::1/128 + Ethernet1: + ipv6: fc00:a::552/126 + bp_interface: + ipv6: fc00:b::155/64 + + ARISTA340T1: + properties: + - common + bgp: + router-id: 0.12.1.86 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::555 + interfaces: + Loopback0: + ipv6: fc00:c:c:156::1/128 + Ethernet1: + ipv6: fc00:a::556/126 + bp_interface: + ipv6: fc00:b::156/64 + + ARISTA341T1: + properties: + - common + bgp: + router-id: 0.12.1.87 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::559 + interfaces: + Loopback0: + ipv6: fc00:c:c:157::1/128 + Ethernet1: + ipv6: fc00:a::55a/126 + bp_interface: + ipv6: fc00:b::157/64 + + ARISTA342T1: + properties: + - common + bgp: + router-id: 0.12.1.88 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::55d + interfaces: + Loopback0: + ipv6: fc00:c:c:158::1/128 + Ethernet1: + ipv6: fc00:a::55e/126 + bp_interface: + ipv6: fc00:b::158/64 + + ARISTA343T1: + properties: + - common + bgp: + router-id: 0.12.1.89 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::561 + interfaces: + Loopback0: + ipv6: fc00:c:c:159::1/128 + Ethernet1: + ipv6: fc00:a::562/126 + bp_interface: + ipv6: fc00:b::159/64 + + ARISTA344T1: + properties: + - common + bgp: + router-id: 0.12.1.90 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::565 + interfaces: + Loopback0: + ipv6: fc00:c:c:15a::1/128 + Ethernet1: + ipv6: fc00:a::566/126 + bp_interface: + ipv6: fc00:b::15a/64 + + ARISTA345T1: + properties: + - common + bgp: + router-id: 0.12.1.91 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::569 + interfaces: + Loopback0: + ipv6: fc00:c:c:15b::1/128 + Ethernet1: + ipv6: fc00:a::56a/126 + bp_interface: + ipv6: fc00:b::15b/64 + + ARISTA346T1: + properties: + - common + bgp: + router-id: 0.12.1.92 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::56d + interfaces: + Loopback0: + ipv6: fc00:c:c:15c::1/128 + Ethernet1: + ipv6: fc00:a::56e/126 + bp_interface: + ipv6: fc00:b::15c/64 + + ARISTA347T1: + properties: + - common + bgp: + router-id: 0.12.1.93 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::571 + interfaces: + Loopback0: + ipv6: fc00:c:c:15d::1/128 + Ethernet1: + ipv6: fc00:a::572/126 + bp_interface: + ipv6: fc00:b::15d/64 + + ARISTA348T1: + properties: + - common + bgp: + router-id: 0.12.1.94 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::575 + interfaces: + Loopback0: + ipv6: fc00:c:c:15e::1/128 + Ethernet1: + ipv6: fc00:a::576/126 + bp_interface: + ipv6: fc00:b::15e/64 + + ARISTA349T1: + properties: + - common + bgp: + router-id: 0.12.1.95 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::579 + interfaces: + Loopback0: + ipv6: fc00:c:c:15f::1/128 + Ethernet1: + ipv6: fc00:a::57a/126 + bp_interface: + ipv6: fc00:b::15f/64 + + ARISTA350T1: + properties: + - common + bgp: + router-id: 0.12.1.96 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::57d + interfaces: + Loopback0: + ipv6: fc00:c:c:160::1/128 + Ethernet1: + ipv6: fc00:a::57e/126 + bp_interface: + ipv6: fc00:b::160/64 + + ARISTA351T1: + properties: + - common + bgp: + router-id: 0.12.1.97 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::581 + interfaces: + Loopback0: + ipv6: fc00:c:c:161::1/128 + Ethernet1: + ipv6: fc00:a::582/126 + bp_interface: + ipv6: fc00:b::161/64 + + ARISTA352T1: + properties: + - common + bgp: + router-id: 0.12.1.98 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::585 + interfaces: + Loopback0: + ipv6: fc00:c:c:162::1/128 + Ethernet1: + ipv6: fc00:a::586/126 + bp_interface: + ipv6: fc00:b::162/64 + + ARISTA353T1: + properties: + - common + bgp: + router-id: 0.12.1.99 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::589 + interfaces: + Loopback0: + ipv6: fc00:c:c:163::1/128 + Ethernet1: + ipv6: fc00:a::58a/126 + bp_interface: + ipv6: fc00:b::163/64 + + ARISTA354T1: + properties: + - common + bgp: + router-id: 0.12.1.100 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::58d + interfaces: + Loopback0: + ipv6: fc00:c:c:164::1/128 + Ethernet1: + ipv6: fc00:a::58e/126 + bp_interface: + ipv6: fc00:b::164/64 + + ARISTA355T1: + properties: + - common + bgp: + router-id: 0.12.1.101 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::591 + interfaces: + Loopback0: + ipv6: fc00:c:c:165::1/128 + Ethernet1: + ipv6: fc00:a::592/126 + bp_interface: + ipv6: fc00:b::165/64 + + ARISTA356T1: + properties: + - common + bgp: + router-id: 0.12.1.102 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::595 + interfaces: + Loopback0: + ipv6: fc00:c:c:166::1/128 + Ethernet1: + ipv6: fc00:a::596/126 + bp_interface: + ipv6: fc00:b::166/64 + + ARISTA357T1: + properties: + - common + bgp: + router-id: 0.12.1.103 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::599 + interfaces: + Loopback0: + ipv6: fc00:c:c:167::1/128 + Ethernet1: + ipv6: fc00:a::59a/126 + bp_interface: + ipv6: fc00:b::167/64 + + ARISTA358T1: + properties: + - common + bgp: + router-id: 0.12.1.104 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::59d + interfaces: + Loopback0: + ipv6: fc00:c:c:168::1/128 + Ethernet1: + ipv6: fc00:a::59e/126 + bp_interface: + ipv6: fc00:b::168/64 + + ARISTA359T1: + properties: + - common + bgp: + router-id: 0.12.1.105 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:169::1/128 + Ethernet1: + ipv6: fc00:a::5a2/126 + bp_interface: + ipv6: fc00:b::169/64 + + ARISTA360T1: + properties: + - common + bgp: + router-id: 0.12.1.106 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:16a::1/128 + Ethernet1: + ipv6: fc00:a::5a6/126 + bp_interface: + ipv6: fc00:b::16a/64 + + ARISTA361T1: + properties: + - common + bgp: + router-id: 0.12.1.107 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:16b::1/128 + Ethernet1: + ipv6: fc00:a::5aa/126 + bp_interface: + ipv6: fc00:b::16b/64 + + ARISTA362T1: + properties: + - common + bgp: + router-id: 0.12.1.108 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5ad + interfaces: + Loopback0: + ipv6: fc00:c:c:16c::1/128 + Ethernet1: + ipv6: fc00:a::5ae/126 + bp_interface: + ipv6: fc00:b::16c/64 + + ARISTA363T1: + properties: + - common + bgp: + router-id: 0.12.1.109 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:16d::1/128 + Ethernet1: + ipv6: fc00:a::5b2/126 + bp_interface: + ipv6: fc00:b::16d/64 + + ARISTA364T1: + properties: + - common + bgp: + router-id: 0.12.1.110 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:16e::1/128 + Ethernet1: + ipv6: fc00:a::5b6/126 + bp_interface: + ipv6: fc00:b::16e/64 + + ARISTA365T1: + properties: + - common + bgp: + router-id: 0.12.1.111 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:16f::1/128 + Ethernet1: + ipv6: fc00:a::5ba/126 + bp_interface: + ipv6: fc00:b::16f/64 + + ARISTA366T1: + properties: + - common + bgp: + router-id: 0.12.1.112 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5bd + interfaces: + Loopback0: + ipv6: fc00:c:c:170::1/128 + Ethernet1: + ipv6: fc00:a::5be/126 + bp_interface: + ipv6: fc00:b::170/64 + + ARISTA367T1: + properties: + - common + bgp: + router-id: 0.12.1.113 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:171::1/128 + Ethernet1: + ipv6: fc00:a::5c2/126 + bp_interface: + ipv6: fc00:b::171/64 + + ARISTA368T1: + properties: + - common + bgp: + router-id: 0.12.1.114 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:172::1/128 + Ethernet1: + ipv6: fc00:a::5c6/126 + bp_interface: + ipv6: fc00:b::172/64 + + ARISTA369T1: + properties: + - common + bgp: + router-id: 0.12.1.115 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:173::1/128 + Ethernet1: + ipv6: fc00:a::5ca/126 + bp_interface: + ipv6: fc00:b::173/64 + + ARISTA370T1: + properties: + - common + bgp: + router-id: 0.12.1.116 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5cd + interfaces: + Loopback0: + ipv6: fc00:c:c:174::1/128 + Ethernet1: + ipv6: fc00:a::5ce/126 + bp_interface: + ipv6: fc00:b::174/64 + + ARISTA371T1: + properties: + - common + bgp: + router-id: 0.12.1.117 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:175::1/128 + Ethernet1: + ipv6: fc00:a::5d2/126 + bp_interface: + ipv6: fc00:b::175/64 + + ARISTA372T1: + properties: + - common + bgp: + router-id: 0.12.1.118 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:176::1/128 + Ethernet1: + ipv6: fc00:a::5d6/126 + bp_interface: + ipv6: fc00:b::176/64 + + ARISTA373T1: + properties: + - common + bgp: + router-id: 0.12.1.119 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:177::1/128 + Ethernet1: + ipv6: fc00:a::5da/126 + bp_interface: + ipv6: fc00:b::177/64 + + ARISTA374T1: + properties: + - common + bgp: + router-id: 0.12.1.120 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5dd + interfaces: + Loopback0: + ipv6: fc00:c:c:178::1/128 + Ethernet1: + ipv6: fc00:a::5de/126 + bp_interface: + ipv6: fc00:b::178/64 + + ARISTA375T1: + properties: + - common + bgp: + router-id: 0.12.1.121 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:179::1/128 + Ethernet1: + ipv6: fc00:a::5e2/126 + bp_interface: + ipv6: fc00:b::179/64 + + ARISTA376T1: + properties: + - common + bgp: + router-id: 0.12.1.122 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:17a::1/128 + Ethernet1: + ipv6: fc00:a::5e6/126 + bp_interface: + ipv6: fc00:b::17a/64 + + ARISTA377T1: + properties: + - common + bgp: + router-id: 0.12.1.123 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:17b::1/128 + Ethernet1: + ipv6: fc00:a::5ea/126 + bp_interface: + ipv6: fc00:b::17b/64 + + ARISTA378T1: + properties: + - common + bgp: + router-id: 0.12.1.124 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5ed + interfaces: + Loopback0: + ipv6: fc00:c:c:17c::1/128 + Ethernet1: + ipv6: fc00:a::5ee/126 + bp_interface: + ipv6: fc00:b::17c/64 + + ARISTA379T1: + properties: + - common + bgp: + router-id: 0.12.1.125 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:17d::1/128 + Ethernet1: + ipv6: fc00:a::5f2/126 + bp_interface: + ipv6: fc00:b::17d/64 + + ARISTA380T1: + properties: + - common + bgp: + router-id: 0.12.1.126 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:17e::1/128 + Ethernet1: + ipv6: fc00:a::5f6/126 + bp_interface: + ipv6: fc00:b::17e/64 + + ARISTA381T1: + properties: + - common + bgp: + router-id: 0.12.1.127 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:17f::1/128 + Ethernet1: + ipv6: fc00:a::5fa/126 + bp_interface: + ipv6: fc00:b::17f/64 + + ARISTA382T1: + properties: + - common + bgp: + router-id: 0.12.1.128 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::5fd + interfaces: + Loopback0: + ipv6: fc00:c:c:180::1/128 + Ethernet1: + ipv6: fc00:a::5fe/126 + bp_interface: + ipv6: fc00:b::180/64 + + ARISTA383T1: + properties: + - common + bgp: + router-id: 0.12.1.129 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::601 + interfaces: + Loopback0: + ipv6: fc00:c:c:181::1/128 + Ethernet1: + ipv6: fc00:a::602/126 + bp_interface: + ipv6: fc00:b::181/64 + + ARISTA384T1: + properties: + - common + bgp: + router-id: 0.12.1.130 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::605 + interfaces: + Loopback0: + ipv6: fc00:c:c:182::1/128 + Ethernet1: + ipv6: fc00:a::606/126 + bp_interface: + ipv6: fc00:b::182/64 + + ARISTA385T1: + properties: + - common + bgp: + router-id: 0.12.1.131 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::609 + interfaces: + Loopback0: + ipv6: fc00:c:c:183::1/128 + Ethernet1: + ipv6: fc00:a::60a/126 + bp_interface: + ipv6: fc00:b::183/64 + + ARISTA386T1: + properties: + - common + bgp: + router-id: 0.12.1.132 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::60d + interfaces: + Loopback0: + ipv6: fc00:c:c:184::1/128 + Ethernet1: + ipv6: fc00:a::60e/126 + bp_interface: + ipv6: fc00:b::184/64 + + ARISTA387T1: + properties: + - common + bgp: + router-id: 0.12.1.133 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::611 + interfaces: + Loopback0: + ipv6: fc00:c:c:185::1/128 + Ethernet1: + ipv6: fc00:a::612/126 + bp_interface: + ipv6: fc00:b::185/64 + + ARISTA388T1: + properties: + - common + bgp: + router-id: 0.12.1.134 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::615 + interfaces: + Loopback0: + ipv6: fc00:c:c:186::1/128 + Ethernet1: + ipv6: fc00:a::616/126 + bp_interface: + ipv6: fc00:b::186/64 + + ARISTA389T1: + properties: + - common + bgp: + router-id: 0.12.1.135 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::619 + interfaces: + Loopback0: + ipv6: fc00:c:c:187::1/128 + Ethernet1: + ipv6: fc00:a::61a/126 + bp_interface: + ipv6: fc00:b::187/64 + + ARISTA390T1: + properties: + - common + bgp: + router-id: 0.12.1.136 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::61d + interfaces: + Loopback0: + ipv6: fc00:c:c:188::1/128 + Ethernet1: + ipv6: fc00:a::61e/126 + bp_interface: + ipv6: fc00:b::188/64 + + ARISTA391T1: + properties: + - common + bgp: + router-id: 0.12.1.137 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::621 + interfaces: + Loopback0: + ipv6: fc00:c:c:189::1/128 + Ethernet1: + ipv6: fc00:a::622/126 + bp_interface: + ipv6: fc00:b::189/64 + + ARISTA392T1: + properties: + - common + bgp: + router-id: 0.12.1.138 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::625 + interfaces: + Loopback0: + ipv6: fc00:c:c:18a::1/128 + Ethernet1: + ipv6: fc00:a::626/126 + bp_interface: + ipv6: fc00:b::18a/64 + + ARISTA393T1: + properties: + - common + bgp: + router-id: 0.12.1.139 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::629 + interfaces: + Loopback0: + ipv6: fc00:c:c:18b::1/128 + Ethernet1: + ipv6: fc00:a::62a/126 + bp_interface: + ipv6: fc00:b::18b/64 + + ARISTA394T1: + properties: + - common + bgp: + router-id: 0.12.1.140 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::62d + interfaces: + Loopback0: + ipv6: fc00:c:c:18c::1/128 + Ethernet1: + ipv6: fc00:a::62e/126 + bp_interface: + ipv6: fc00:b::18c/64 + + ARISTA395T1: + properties: + - common + bgp: + router-id: 0.12.1.141 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::631 + interfaces: + Loopback0: + ipv6: fc00:c:c:18d::1/128 + Ethernet1: + ipv6: fc00:a::632/126 + bp_interface: + ipv6: fc00:b::18d/64 + + ARISTA396T1: + properties: + - common + bgp: + router-id: 0.12.1.142 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::635 + interfaces: + Loopback0: + ipv6: fc00:c:c:18e::1/128 + Ethernet1: + ipv6: fc00:a::636/126 + bp_interface: + ipv6: fc00:b::18e/64 + + ARISTA397T1: + properties: + - common + bgp: + router-id: 0.12.1.143 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::639 + interfaces: + Loopback0: + ipv6: fc00:c:c:18f::1/128 + Ethernet1: + ipv6: fc00:a::63a/126 + bp_interface: + ipv6: fc00:b::18f/64 + + ARISTA398T1: + properties: + - common + bgp: + router-id: 0.12.1.144 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::63d + interfaces: + Loopback0: + ipv6: fc00:c:c:190::1/128 + Ethernet1: + ipv6: fc00:a::63e/126 + bp_interface: + ipv6: fc00:b::190/64 + + ARISTA399T1: + properties: + - common + bgp: + router-id: 0.12.1.145 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::641 + interfaces: + Loopback0: + ipv6: fc00:c:c:191::1/128 + Ethernet1: + ipv6: fc00:a::642/126 + bp_interface: + ipv6: fc00:b::191/64 + + ARISTA400T1: + properties: + - common + bgp: + router-id: 0.12.1.146 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::645 + interfaces: + Loopback0: + ipv6: fc00:c:c:192::1/128 + Ethernet1: + ipv6: fc00:a::646/126 + bp_interface: + ipv6: fc00:b::192/64 + + ARISTA401T1: + properties: + - common + bgp: + router-id: 0.12.1.147 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::649 + interfaces: + Loopback0: + ipv6: fc00:c:c:193::1/128 + Ethernet1: + ipv6: fc00:a::64a/126 + bp_interface: + ipv6: fc00:b::193/64 + + ARISTA402T1: + properties: + - common + bgp: + router-id: 0.12.1.148 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::64d + interfaces: + Loopback0: + ipv6: fc00:c:c:194::1/128 + Ethernet1: + ipv6: fc00:a::64e/126 + bp_interface: + ipv6: fc00:b::194/64 + + ARISTA403T1: + properties: + - common + bgp: + router-id: 0.12.1.149 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::651 + interfaces: + Loopback0: + ipv6: fc00:c:c:195::1/128 + Ethernet1: + ipv6: fc00:a::652/126 + bp_interface: + ipv6: fc00:b::195/64 + + ARISTA404T1: + properties: + - common + bgp: + router-id: 0.12.1.150 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::655 + interfaces: + Loopback0: + ipv6: fc00:c:c:196::1/128 + Ethernet1: + ipv6: fc00:a::656/126 + bp_interface: + ipv6: fc00:b::196/64 + + ARISTA405T1: + properties: + - common + bgp: + router-id: 0.12.1.151 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::659 + interfaces: + Loopback0: + ipv6: fc00:c:c:197::1/128 + Ethernet1: + ipv6: fc00:a::65a/126 + bp_interface: + ipv6: fc00:b::197/64 + + ARISTA406T1: + properties: + - common + bgp: + router-id: 0.12.1.152 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::65d + interfaces: + Loopback0: + ipv6: fc00:c:c:198::1/128 + Ethernet1: + ipv6: fc00:a::65e/126 + bp_interface: + ipv6: fc00:b::198/64 + + ARISTA407T1: + properties: + - common + bgp: + router-id: 0.12.1.153 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::661 + interfaces: + Loopback0: + ipv6: fc00:c:c:199::1/128 + Ethernet1: + ipv6: fc00:a::662/126 + bp_interface: + ipv6: fc00:b::199/64 + + ARISTA408T1: + properties: + - common + bgp: + router-id: 0.12.1.154 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::665 + interfaces: + Loopback0: + ipv6: fc00:c:c:19a::1/128 + Ethernet1: + ipv6: fc00:a::666/126 + bp_interface: + ipv6: fc00:b::19a/64 + + ARISTA409T1: + properties: + - common + bgp: + router-id: 0.12.1.155 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::669 + interfaces: + Loopback0: + ipv6: fc00:c:c:19b::1/128 + Ethernet1: + ipv6: fc00:a::66a/126 + bp_interface: + ipv6: fc00:b::19b/64 + + ARISTA410T1: + properties: + - common + bgp: + router-id: 0.12.1.156 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::66d + interfaces: + Loopback0: + ipv6: fc00:c:c:19c::1/128 + Ethernet1: + ipv6: fc00:a::66e/126 + bp_interface: + ipv6: fc00:b::19c/64 + + ARISTA411T1: + properties: + - common + bgp: + router-id: 0.12.1.157 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::671 + interfaces: + Loopback0: + ipv6: fc00:c:c:19d::1/128 + Ethernet1: + ipv6: fc00:a::672/126 + bp_interface: + ipv6: fc00:b::19d/64 + + ARISTA412T1: + properties: + - common + bgp: + router-id: 0.12.1.158 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::675 + interfaces: + Loopback0: + ipv6: fc00:c:c:19e::1/128 + Ethernet1: + ipv6: fc00:a::676/126 + bp_interface: + ipv6: fc00:b::19e/64 + + ARISTA413T1: + properties: + - common + bgp: + router-id: 0.12.1.159 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::679 + interfaces: + Loopback0: + ipv6: fc00:c:c:19f::1/128 + Ethernet1: + ipv6: fc00:a::67a/126 + bp_interface: + ipv6: fc00:b::19f/64 + + ARISTA414T1: + properties: + - common + bgp: + router-id: 0.12.1.160 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::67d + interfaces: + Loopback0: + ipv6: fc00:c:c:1a0::1/128 + Ethernet1: + ipv6: fc00:a::67e/126 + bp_interface: + ipv6: fc00:b::1a0/64 + + ARISTA415T1: + properties: + - common + bgp: + router-id: 0.12.1.161 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::681 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a1::1/128 + Ethernet1: + ipv6: fc00:a::682/126 + bp_interface: + ipv6: fc00:b::1a1/64 + + ARISTA416T1: + properties: + - common + bgp: + router-id: 0.12.1.162 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::685 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a2::1/128 + Ethernet1: + ipv6: fc00:a::686/126 + bp_interface: + ipv6: fc00:b::1a2/64 + + ARISTA417T1: + properties: + - common + bgp: + router-id: 0.12.1.163 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::689 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a3::1/128 + Ethernet1: + ipv6: fc00:a::68a/126 + bp_interface: + ipv6: fc00:b::1a3/64 + + ARISTA418T1: + properties: + - common + bgp: + router-id: 0.12.1.164 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::68d + interfaces: + Loopback0: + ipv6: fc00:c:c:1a4::1/128 + Ethernet1: + ipv6: fc00:a::68e/126 + bp_interface: + ipv6: fc00:b::1a4/64 + + ARISTA419T1: + properties: + - common + bgp: + router-id: 0.12.1.165 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::691 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a5::1/128 + Ethernet1: + ipv6: fc00:a::692/126 + bp_interface: + ipv6: fc00:b::1a5/64 + + ARISTA420T1: + properties: + - common + bgp: + router-id: 0.12.1.166 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::695 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a6::1/128 + Ethernet1: + ipv6: fc00:a::696/126 + bp_interface: + ipv6: fc00:b::1a6/64 + + ARISTA421T1: + properties: + - common + bgp: + router-id: 0.12.1.167 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::699 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a7::1/128 + Ethernet1: + ipv6: fc00:a::69a/126 + bp_interface: + ipv6: fc00:b::1a7/64 + + ARISTA422T1: + properties: + - common + bgp: + router-id: 0.12.1.168 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::69d + interfaces: + Loopback0: + ipv6: fc00:c:c:1a8::1/128 + Ethernet1: + ipv6: fc00:a::69e/126 + bp_interface: + ipv6: fc00:b::1a8/64 + + ARISTA423T1: + properties: + - common + bgp: + router-id: 0.12.1.169 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a9::1/128 + Ethernet1: + ipv6: fc00:a::6a2/126 + bp_interface: + ipv6: fc00:b::1a9/64 + + ARISTA424T1: + properties: + - common + bgp: + router-id: 0.12.1.170 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1aa::1/128 + Ethernet1: + ipv6: fc00:a::6a6/126 + bp_interface: + ipv6: fc00:b::1aa/64 + + ARISTA425T1: + properties: + - common + bgp: + router-id: 0.12.1.171 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ab::1/128 + Ethernet1: + ipv6: fc00:a::6aa/126 + bp_interface: + ipv6: fc00:b::1ab/64 + + ARISTA426T1: + properties: + - common + bgp: + router-id: 0.12.1.172 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6ad + interfaces: + Loopback0: + ipv6: fc00:c:c:1ac::1/128 + Ethernet1: + ipv6: fc00:a::6ae/126 + bp_interface: + ipv6: fc00:b::1ac/64 + + ARISTA427T1: + properties: + - common + bgp: + router-id: 0.12.1.173 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ad::1/128 + Ethernet1: + ipv6: fc00:a::6b2/126 + bp_interface: + ipv6: fc00:b::1ad/64 + + ARISTA428T1: + properties: + - common + bgp: + router-id: 0.12.1.174 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ae::1/128 + Ethernet1: + ipv6: fc00:a::6b6/126 + bp_interface: + ipv6: fc00:b::1ae/64 + + ARISTA429T1: + properties: + - common + bgp: + router-id: 0.12.1.175 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1af::1/128 + Ethernet1: + ipv6: fc00:a::6ba/126 + bp_interface: + ipv6: fc00:b::1af/64 + + ARISTA430T1: + properties: + - common + bgp: + router-id: 0.12.1.176 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6bd + interfaces: + Loopback0: + ipv6: fc00:c:c:1b0::1/128 + Ethernet1: + ipv6: fc00:a::6be/126 + bp_interface: + ipv6: fc00:b::1b0/64 + + ARISTA431T1: + properties: + - common + bgp: + router-id: 0.12.1.177 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b1::1/128 + Ethernet1: + ipv6: fc00:a::6c2/126 + bp_interface: + ipv6: fc00:b::1b1/64 + + ARISTA432T1: + properties: + - common + bgp: + router-id: 0.12.1.178 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b2::1/128 + Ethernet1: + ipv6: fc00:a::6c6/126 + bp_interface: + ipv6: fc00:b::1b2/64 + + ARISTA433T1: + properties: + - common + bgp: + router-id: 0.12.1.179 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b3::1/128 + Ethernet1: + ipv6: fc00:a::6ca/126 + bp_interface: + ipv6: fc00:b::1b3/64 + + ARISTA434T1: + properties: + - common + bgp: + router-id: 0.12.1.180 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6cd + interfaces: + Loopback0: + ipv6: fc00:c:c:1b4::1/128 + Ethernet1: + ipv6: fc00:a::6ce/126 + bp_interface: + ipv6: fc00:b::1b4/64 + + ARISTA435T1: + properties: + - common + bgp: + router-id: 0.12.1.181 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b5::1/128 + Ethernet1: + ipv6: fc00:a::6d2/126 + bp_interface: + ipv6: fc00:b::1b5/64 + + ARISTA436T1: + properties: + - common + bgp: + router-id: 0.12.1.182 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b6::1/128 + Ethernet1: + ipv6: fc00:a::6d6/126 + bp_interface: + ipv6: fc00:b::1b6/64 + + ARISTA437T1: + properties: + - common + bgp: + router-id: 0.12.1.183 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b7::1/128 + Ethernet1: + ipv6: fc00:a::6da/126 + bp_interface: + ipv6: fc00:b::1b7/64 + + ARISTA438T1: + properties: + - common + bgp: + router-id: 0.12.1.184 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6dd + interfaces: + Loopback0: + ipv6: fc00:c:c:1b8::1/128 + Ethernet1: + ipv6: fc00:a::6de/126 + bp_interface: + ipv6: fc00:b::1b8/64 + + ARISTA439T1: + properties: + - common + bgp: + router-id: 0.12.1.185 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b9::1/128 + Ethernet1: + ipv6: fc00:a::6e2/126 + bp_interface: + ipv6: fc00:b::1b9/64 + + ARISTA440T1: + properties: + - common + bgp: + router-id: 0.12.1.186 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ba::1/128 + Ethernet1: + ipv6: fc00:a::6e6/126 + bp_interface: + ipv6: fc00:b::1ba/64 + + ARISTA441T1: + properties: + - common + bgp: + router-id: 0.12.1.187 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1bb::1/128 + Ethernet1: + ipv6: fc00:a::6ea/126 + bp_interface: + ipv6: fc00:b::1bb/64 + + ARISTA442T1: + properties: + - common + bgp: + router-id: 0.12.1.188 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6ed + interfaces: + Loopback0: + ipv6: fc00:c:c:1bc::1/128 + Ethernet1: + ipv6: fc00:a::6ee/126 + bp_interface: + ipv6: fc00:b::1bc/64 + + ARISTA443T1: + properties: + - common + bgp: + router-id: 0.12.1.189 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1bd::1/128 + Ethernet1: + ipv6: fc00:a::6f2/126 + bp_interface: + ipv6: fc00:b::1bd/64 + + ARISTA444T1: + properties: + - common + bgp: + router-id: 0.12.1.190 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1be::1/128 + Ethernet1: + ipv6: fc00:a::6f6/126 + bp_interface: + ipv6: fc00:b::1be/64 + + ARISTA445T1: + properties: + - common + bgp: + router-id: 0.12.1.191 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1bf::1/128 + Ethernet1: + ipv6: fc00:a::6fa/126 + bp_interface: + ipv6: fc00:b::1bf/64 + + ARISTA446T1: + properties: + - common + bgp: + router-id: 0.12.1.192 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::6fd + interfaces: + Loopback0: + ipv6: fc00:c:c:1c0::1/128 + Ethernet1: + ipv6: fc00:a::6fe/126 + bp_interface: + ipv6: fc00:b::1c0/64 + + ARISTA447T1: + properties: + - common + bgp: + router-id: 0.12.1.193 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::701 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c1::1/128 + Ethernet1: + ipv6: fc00:a::702/126 + bp_interface: + ipv6: fc00:b::1c1/64 + + ARISTA448T1: + properties: + - common + bgp: + router-id: 0.12.1.194 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::705 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c2::1/128 + Ethernet1: + ipv6: fc00:a::706/126 + bp_interface: + ipv6: fc00:b::1c2/64 + + ARISTA449T1: + properties: + - common + bgp: + router-id: 0.12.1.195 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::709 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c3::1/128 + Ethernet1: + ipv6: fc00:a::70a/126 + bp_interface: + ipv6: fc00:b::1c3/64 + + ARISTA450T1: + properties: + - common + bgp: + router-id: 0.12.1.196 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::70d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c4::1/128 + Ethernet1: + ipv6: fc00:a::70e/126 + bp_interface: + ipv6: fc00:b::1c4/64 + + ARISTA451T1: + properties: + - common + bgp: + router-id: 0.12.1.197 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::711 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c5::1/128 + Ethernet1: + ipv6: fc00:a::712/126 + bp_interface: + ipv6: fc00:b::1c5/64 + + ARISTA452T1: + properties: + - common + bgp: + router-id: 0.12.1.198 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::715 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c6::1/128 + Ethernet1: + ipv6: fc00:a::716/126 + bp_interface: + ipv6: fc00:b::1c6/64 + + ARISTA453T1: + properties: + - common + bgp: + router-id: 0.12.1.199 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::719 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c7::1/128 + Ethernet1: + ipv6: fc00:a::71a/126 + bp_interface: + ipv6: fc00:b::1c7/64 + + ARISTA454T1: + properties: + - common + bgp: + router-id: 0.12.1.200 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::71d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c8::1/128 + Ethernet1: + ipv6: fc00:a::71e/126 + bp_interface: + ipv6: fc00:b::1c8/64 + + ARISTA455T1: + properties: + - common + bgp: + router-id: 0.12.1.201 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::721 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c9::1/128 + Ethernet1: + ipv6: fc00:a::722/126 + bp_interface: + ipv6: fc00:b::1c9/64 + + ARISTA456T1: + properties: + - common + bgp: + router-id: 0.12.1.202 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::725 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ca::1/128 + Ethernet1: + ipv6: fc00:a::726/126 + bp_interface: + ipv6: fc00:b::1ca/64 + + ARISTA457T1: + properties: + - common + bgp: + router-id: 0.12.1.203 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::729 + interfaces: + Loopback0: + ipv6: fc00:c:c:1cb::1/128 + Ethernet1: + ipv6: fc00:a::72a/126 + bp_interface: + ipv6: fc00:b::1cb/64 + + ARISTA458T1: + properties: + - common + bgp: + router-id: 0.12.1.204 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::72d + interfaces: + Loopback0: + ipv6: fc00:c:c:1cc::1/128 + Ethernet1: + ipv6: fc00:a::72e/126 + bp_interface: + ipv6: fc00:b::1cc/64 + + ARISTA459T1: + properties: + - common + bgp: + router-id: 0.12.1.205 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::731 + interfaces: + Loopback0: + ipv6: fc00:c:c:1cd::1/128 + Ethernet1: + ipv6: fc00:a::732/126 + bp_interface: + ipv6: fc00:b::1cd/64 + + ARISTA460T1: + properties: + - common + bgp: + router-id: 0.12.1.206 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::735 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ce::1/128 + Ethernet1: + ipv6: fc00:a::736/126 + bp_interface: + ipv6: fc00:b::1ce/64 + + ARISTA461T1: + properties: + - common + bgp: + router-id: 0.12.1.207 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::739 + interfaces: + Loopback0: + ipv6: fc00:c:c:1cf::1/128 + Ethernet1: + ipv6: fc00:a::73a/126 + bp_interface: + ipv6: fc00:b::1cf/64 + + ARISTA462T1: + properties: + - common + bgp: + router-id: 0.12.1.208 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::73d + interfaces: + Loopback0: + ipv6: fc00:c:c:1d0::1/128 + Ethernet1: + ipv6: fc00:a::73e/126 + bp_interface: + ipv6: fc00:b::1d0/64 + + ARISTA463T1: + properties: + - common + bgp: + router-id: 0.12.1.209 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::741 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d1::1/128 + Ethernet1: + ipv6: fc00:a::742/126 + bp_interface: + ipv6: fc00:b::1d1/64 + + ARISTA464T1: + properties: + - common + bgp: + router-id: 0.12.1.210 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::745 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d2::1/128 + Ethernet1: + ipv6: fc00:a::746/126 + bp_interface: + ipv6: fc00:b::1d2/64 + + ARISTA465T1: + properties: + - common + bgp: + router-id: 0.12.1.211 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::749 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d3::1/128 + Ethernet1: + ipv6: fc00:a::74a/126 + bp_interface: + ipv6: fc00:b::1d3/64 + + ARISTA466T1: + properties: + - common + bgp: + router-id: 0.12.1.212 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::74d + interfaces: + Loopback0: + ipv6: fc00:c:c:1d4::1/128 + Ethernet1: + ipv6: fc00:a::74e/126 + bp_interface: + ipv6: fc00:b::1d4/64 + + ARISTA467T1: + properties: + - common + bgp: + router-id: 0.12.1.213 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::751 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d5::1/128 + Ethernet1: + ipv6: fc00:a::752/126 + bp_interface: + ipv6: fc00:b::1d5/64 + + ARISTA468T1: + properties: + - common + bgp: + router-id: 0.12.1.214 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::755 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d6::1/128 + Ethernet1: + ipv6: fc00:a::756/126 + bp_interface: + ipv6: fc00:b::1d6/64 + + ARISTA469T1: + properties: + - common + bgp: + router-id: 0.12.1.215 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::759 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d7::1/128 + Ethernet1: + ipv6: fc00:a::75a/126 + bp_interface: + ipv6: fc00:b::1d7/64 + + ARISTA470T1: + properties: + - common + bgp: + router-id: 0.12.1.216 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::75d + interfaces: + Loopback0: + ipv6: fc00:c:c:1d8::1/128 + Ethernet1: + ipv6: fc00:a::75e/126 + bp_interface: + ipv6: fc00:b::1d8/64 + + ARISTA471T1: + properties: + - common + bgp: + router-id: 0.12.1.217 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::761 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d9::1/128 + Ethernet1: + ipv6: fc00:a::762/126 + bp_interface: + ipv6: fc00:b::1d9/64 + + ARISTA472T1: + properties: + - common + bgp: + router-id: 0.12.1.218 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::765 + interfaces: + Loopback0: + ipv6: fc00:c:c:1da::1/128 + Ethernet1: + ipv6: fc00:a::766/126 + bp_interface: + ipv6: fc00:b::1da/64 + + ARISTA473T1: + properties: + - common + bgp: + router-id: 0.12.1.219 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::769 + interfaces: + Loopback0: + ipv6: fc00:c:c:1db::1/128 + Ethernet1: + ipv6: fc00:a::76a/126 + bp_interface: + ipv6: fc00:b::1db/64 + + ARISTA474T1: + properties: + - common + bgp: + router-id: 0.12.1.220 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::76d + interfaces: + Loopback0: + ipv6: fc00:c:c:1dc::1/128 + Ethernet1: + ipv6: fc00:a::76e/126 + bp_interface: + ipv6: fc00:b::1dc/64 + + ARISTA475T1: + properties: + - common + bgp: + router-id: 0.12.1.221 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::771 + interfaces: + Loopback0: + ipv6: fc00:c:c:1dd::1/128 + Ethernet1: + ipv6: fc00:a::772/126 + bp_interface: + ipv6: fc00:b::1dd/64 + + ARISTA476T1: + properties: + - common + bgp: + router-id: 0.12.1.222 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::775 + interfaces: + Loopback0: + ipv6: fc00:c:c:1de::1/128 + Ethernet1: + ipv6: fc00:a::776/126 + bp_interface: + ipv6: fc00:b::1de/64 + + ARISTA477T1: + properties: + - common + bgp: + router-id: 0.12.1.223 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::779 + interfaces: + Loopback0: + ipv6: fc00:c:c:1df::1/128 + Ethernet1: + ipv6: fc00:a::77a/126 + bp_interface: + ipv6: fc00:b::1df/64 + + ARISTA478T1: + properties: + - common + bgp: + router-id: 0.12.1.224 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::77d + interfaces: + Loopback0: + ipv6: fc00:c:c:1e0::1/128 + Ethernet1: + ipv6: fc00:a::77e/126 + bp_interface: + ipv6: fc00:b::1e0/64 + + ARISTA479T1: + properties: + - common + bgp: + router-id: 0.12.1.225 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::781 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e1::1/128 + Ethernet1: + ipv6: fc00:a::782/126 + bp_interface: + ipv6: fc00:b::1e1/64 + + ARISTA480T1: + properties: + - common + bgp: + router-id: 0.12.1.226 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::785 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e2::1/128 + Ethernet1: + ipv6: fc00:a::786/126 + bp_interface: + ipv6: fc00:b::1e2/64 + + ARISTA481T1: + properties: + - common + bgp: + router-id: 0.12.1.227 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::789 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e3::1/128 + Ethernet1: + ipv6: fc00:a::78a/126 + bp_interface: + ipv6: fc00:b::1e3/64 + + ARISTA482T1: + properties: + - common + bgp: + router-id: 0.12.1.228 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::78d + interfaces: + Loopback0: + ipv6: fc00:c:c:1e4::1/128 + Ethernet1: + ipv6: fc00:a::78e/126 + bp_interface: + ipv6: fc00:b::1e4/64 + + ARISTA483T1: + properties: + - common + bgp: + router-id: 0.12.1.229 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::791 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e5::1/128 + Ethernet1: + ipv6: fc00:a::792/126 + bp_interface: + ipv6: fc00:b::1e5/64 + + ARISTA484T1: + properties: + - common + bgp: + router-id: 0.12.1.230 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::795 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e6::1/128 + Ethernet1: + ipv6: fc00:a::796/126 + bp_interface: + ipv6: fc00:b::1e6/64 + + ARISTA485T1: + properties: + - common + bgp: + router-id: 0.12.1.231 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::799 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e7::1/128 + Ethernet1: + ipv6: fc00:a::79a/126 + bp_interface: + ipv6: fc00:b::1e7/64 + + ARISTA486T1: + properties: + - common + bgp: + router-id: 0.12.1.232 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::79d + interfaces: + Loopback0: + ipv6: fc00:c:c:1e8::1/128 + Ethernet1: + ipv6: fc00:a::79e/126 + bp_interface: + ipv6: fc00:b::1e8/64 + + ARISTA487T1: + properties: + - common + bgp: + router-id: 0.12.1.233 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e9::1/128 + Ethernet1: + ipv6: fc00:a::7a2/126 + bp_interface: + ipv6: fc00:b::1e9/64 + + ARISTA488T1: + properties: + - common + bgp: + router-id: 0.12.1.234 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ea::1/128 + Ethernet1: + ipv6: fc00:a::7a6/126 + bp_interface: + ipv6: fc00:b::1ea/64 + + ARISTA489T1: + properties: + - common + bgp: + router-id: 0.12.1.235 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1eb::1/128 + Ethernet1: + ipv6: fc00:a::7aa/126 + bp_interface: + ipv6: fc00:b::1eb/64 + + ARISTA490T1: + properties: + - common + bgp: + router-id: 0.12.1.236 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7ad + interfaces: + Loopback0: + ipv6: fc00:c:c:1ec::1/128 + Ethernet1: + ipv6: fc00:a::7ae/126 + bp_interface: + ipv6: fc00:b::1ec/64 + + ARISTA491T1: + properties: + - common + bgp: + router-id: 0.12.1.237 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ed::1/128 + Ethernet1: + ipv6: fc00:a::7b2/126 + bp_interface: + ipv6: fc00:b::1ed/64 + + ARISTA492T1: + properties: + - common + bgp: + router-id: 0.12.1.238 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ee::1/128 + Ethernet1: + ipv6: fc00:a::7b6/126 + bp_interface: + ipv6: fc00:b::1ee/64 + + ARISTA493T1: + properties: + - common + bgp: + router-id: 0.12.1.239 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ef::1/128 + Ethernet1: + ipv6: fc00:a::7ba/126 + bp_interface: + ipv6: fc00:b::1ef/64 + + ARISTA494T1: + properties: + - common + bgp: + router-id: 0.12.1.240 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7bd + interfaces: + Loopback0: + ipv6: fc00:c:c:1f0::1/128 + Ethernet1: + ipv6: fc00:a::7be/126 + bp_interface: + ipv6: fc00:b::1f0/64 + + ARISTA495T1: + properties: + - common + bgp: + router-id: 0.12.1.241 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f1::1/128 + Ethernet1: + ipv6: fc00:a::7c2/126 + bp_interface: + ipv6: fc00:b::1f1/64 + + ARISTA496T1: + properties: + - common + bgp: + router-id: 0.12.1.242 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f2::1/128 + Ethernet1: + ipv6: fc00:a::7c6/126 + bp_interface: + ipv6: fc00:b::1f2/64 + + ARISTA497T1: + properties: + - common + bgp: + router-id: 0.12.1.243 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f3::1/128 + Ethernet1: + ipv6: fc00:a::7ca/126 + bp_interface: + ipv6: fc00:b::1f3/64 + + ARISTA498T1: + properties: + - common + bgp: + router-id: 0.12.1.244 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7cd + interfaces: + Loopback0: + ipv6: fc00:c:c:1f4::1/128 + Ethernet1: + ipv6: fc00:a::7ce/126 + bp_interface: + ipv6: fc00:b::1f4/64 + + ARISTA499T1: + properties: + - common + bgp: + router-id: 0.12.1.245 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f5::1/128 + Ethernet1: + ipv6: fc00:a::7d2/126 + bp_interface: + ipv6: fc00:b::1f5/64 + + ARISTA500T1: + properties: + - common + bgp: + router-id: 0.12.1.246 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f6::1/128 + Ethernet1: + ipv6: fc00:a::7d6/126 + bp_interface: + ipv6: fc00:b::1f6/64 + + ARISTA501T1: + properties: + - common + bgp: + router-id: 0.12.1.247 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f7::1/128 + Ethernet1: + ipv6: fc00:a::7da/126 + bp_interface: + ipv6: fc00:b::1f7/64 + + ARISTA502T1: + properties: + - common + bgp: + router-id: 0.12.1.248 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7dd + interfaces: + Loopback0: + ipv6: fc00:c:c:1f8::1/128 + Ethernet1: + ipv6: fc00:a::7de/126 + bp_interface: + ipv6: fc00:b::1f8/64 + + ARISTA503T1: + properties: + - common + bgp: + router-id: 0.12.1.249 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f9::1/128 + Ethernet1: + ipv6: fc00:a::7e2/126 + bp_interface: + ipv6: fc00:b::1f9/64 + + ARISTA504T1: + properties: + - common + bgp: + router-id: 0.12.1.250 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fa::1/128 + Ethernet1: + ipv6: fc00:a::7e6/126 + bp_interface: + ipv6: fc00:b::1fa/64 + + ARISTA505T1: + properties: + - common + bgp: + router-id: 0.12.1.251 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fb::1/128 + Ethernet1: + ipv6: fc00:a::7ea/126 + bp_interface: + ipv6: fc00:b::1fb/64 + + ARISTA506T1: + properties: + - common + bgp: + router-id: 0.12.1.252 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7ed + interfaces: + Loopback0: + ipv6: fc00:c:c:1fc::1/128 + Ethernet1: + ipv6: fc00:a::7ee/126 + bp_interface: + ipv6: fc00:b::1fc/64 + + ARISTA507T1: + properties: + - common + bgp: + router-id: 0.12.1.253 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fd::1/128 + Ethernet1: + ipv6: fc00:a::7f2/126 + bp_interface: + ipv6: fc00:b::1fd/64 + + ARISTA508T1: + properties: + - common + bgp: + router-id: 0.12.1.254 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fe::1/128 + Ethernet1: + ipv6: fc00:a::7f6/126 + bp_interface: + ipv6: fc00:b::1fe/64 + + ARISTA509T1: + properties: + - common + bgp: + router-id: 0.12.1.255 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ff::1/128 + Ethernet1: + ipv6: fc00:a::7fa/126 + bp_interface: + ipv6: fc00:b::1ff/64 + + ARISTA510T1: + properties: + - common + bgp: + router-id: 0.12.2.0 + asn: 4200100000 + peers: + 4200000000: + - fc00:a::7fd + interfaces: + Loopback0: + ipv6: fc00:c:c:200::1/128 + Ethernet1: + ipv6: fc00:a::7fe/126 + bp_interface: + ipv6: fc00:b::200/64 diff --git a/ansible/vars/topo_t1-isolated-u2d254.yaml b/ansible/vars/topo_t1-isolated-u2d254.yaml new file mode 100644 index 00000000000..47477a6ba03 --- /dev/null +++ b/ansible/vars/topo_t1-isolated-u2d254.yaml @@ -0,0 +1,5650 @@ +topology: + VMs: + ARISTA01T2: + vlans: + - 0 + vm_offset: 0 + ARISTA02T2: + vlans: + - 1 + vm_offset: 1 + ARISTA01T0: + vlans: + - 2 + vm_offset: 2 + ARISTA02T0: + vlans: + - 3 + vm_offset: 3 + ARISTA03T0: + vlans: + - 4 + vm_offset: 4 + ARISTA04T0: + vlans: + - 5 + vm_offset: 5 + ARISTA05T0: + vlans: + - 6 + vm_offset: 6 + ARISTA06T0: + vlans: + - 7 + vm_offset: 7 + ARISTA07T0: + vlans: + - 8 + vm_offset: 8 + ARISTA08T0: + vlans: + - 9 + vm_offset: 9 + ARISTA09T0: + vlans: + - 10 + vm_offset: 10 + ARISTA10T0: + vlans: + - 11 + vm_offset: 11 + ARISTA11T0: + vlans: + - 12 + vm_offset: 12 + ARISTA12T0: + vlans: + - 13 + vm_offset: 13 + ARISTA13T0: + vlans: + - 14 + vm_offset: 14 + ARISTA14T0: + vlans: + - 15 + vm_offset: 15 + ARISTA15T0: + vlans: + - 16 + vm_offset: 16 + ARISTA16T0: + vlans: + - 17 + vm_offset: 17 + ARISTA17T0: + vlans: + - 18 + vm_offset: 18 + ARISTA18T0: + vlans: + - 19 + vm_offset: 19 + ARISTA19T0: + vlans: + - 20 + vm_offset: 20 + ARISTA20T0: + vlans: + - 21 + vm_offset: 21 + ARISTA21T0: + vlans: + - 22 + vm_offset: 22 + ARISTA22T0: + vlans: + - 23 + vm_offset: 23 + ARISTA23T0: + vlans: + - 24 + vm_offset: 24 + ARISTA24T0: + vlans: + - 25 + vm_offset: 25 + ARISTA25T0: + vlans: + - 26 + vm_offset: 26 + ARISTA26T0: + vlans: + - 27 + vm_offset: 27 + ARISTA27T0: + vlans: + - 28 + vm_offset: 28 + ARISTA28T0: + vlans: + - 29 + vm_offset: 29 + ARISTA29T0: + vlans: + - 30 + vm_offset: 30 + ARISTA30T0: + vlans: + - 31 + vm_offset: 31 + ARISTA31T0: + vlans: + - 32 + vm_offset: 32 + ARISTA32T0: + vlans: + - 33 + vm_offset: 33 + ARISTA33T0: + vlans: + - 34 + vm_offset: 34 + ARISTA34T0: + vlans: + - 35 + vm_offset: 35 + ARISTA35T0: + vlans: + - 36 + vm_offset: 36 + ARISTA36T0: + vlans: + - 37 + vm_offset: 37 + ARISTA37T0: + vlans: + - 38 + vm_offset: 38 + ARISTA38T0: + vlans: + - 39 + vm_offset: 39 + ARISTA39T0: + vlans: + - 40 + vm_offset: 40 + ARISTA40T0: + vlans: + - 41 + vm_offset: 41 + ARISTA41T0: + vlans: + - 42 + vm_offset: 42 + ARISTA42T0: + vlans: + - 43 + vm_offset: 43 + ARISTA43T0: + vlans: + - 44 + vm_offset: 44 + ARISTA44T0: + vlans: + - 45 + vm_offset: 45 + ARISTA45T0: + vlans: + - 46 + vm_offset: 46 + ARISTA46T0: + vlans: + - 47 + vm_offset: 47 + ARISTA47T0: + vlans: + - 48 + vm_offset: 48 + ARISTA48T0: + vlans: + - 49 + vm_offset: 49 + ARISTA49T0: + vlans: + - 50 + vm_offset: 50 + ARISTA50T0: + vlans: + - 51 + vm_offset: 51 + ARISTA51T0: + vlans: + - 52 + vm_offset: 52 + ARISTA52T0: + vlans: + - 53 + vm_offset: 53 + ARISTA53T0: + vlans: + - 54 + vm_offset: 54 + ARISTA54T0: + vlans: + - 55 + vm_offset: 55 + ARISTA55T0: + vlans: + - 56 + vm_offset: 56 + ARISTA56T0: + vlans: + - 57 + vm_offset: 57 + ARISTA57T0: + vlans: + - 58 + vm_offset: 58 + ARISTA58T0: + vlans: + - 59 + vm_offset: 59 + ARISTA59T0: + vlans: + - 60 + vm_offset: 60 + ARISTA60T0: + vlans: + - 61 + vm_offset: 61 + ARISTA61T0: + vlans: + - 62 + vm_offset: 62 + ARISTA62T0: + vlans: + - 63 + vm_offset: 63 + ARISTA63T0: + vlans: + - 64 + vm_offset: 64 + ARISTA64T0: + vlans: + - 65 + vm_offset: 65 + ARISTA65T0: + vlans: + - 66 + vm_offset: 66 + ARISTA66T0: + vlans: + - 67 + vm_offset: 67 + ARISTA67T0: + vlans: + - 68 + vm_offset: 68 + ARISTA68T0: + vlans: + - 69 + vm_offset: 69 + ARISTA69T0: + vlans: + - 70 + vm_offset: 70 + ARISTA70T0: + vlans: + - 71 + vm_offset: 71 + ARISTA71T0: + vlans: + - 72 + vm_offset: 72 + ARISTA72T0: + vlans: + - 73 + vm_offset: 73 + ARISTA73T0: + vlans: + - 74 + vm_offset: 74 + ARISTA74T0: + vlans: + - 75 + vm_offset: 75 + ARISTA75T0: + vlans: + - 76 + vm_offset: 76 + ARISTA76T0: + vlans: + - 77 + vm_offset: 77 + ARISTA77T0: + vlans: + - 78 + vm_offset: 78 + ARISTA78T0: + vlans: + - 79 + vm_offset: 79 + ARISTA79T0: + vlans: + - 80 + vm_offset: 80 + ARISTA80T0: + vlans: + - 81 + vm_offset: 81 + ARISTA81T0: + vlans: + - 82 + vm_offset: 82 + ARISTA82T0: + vlans: + - 83 + vm_offset: 83 + ARISTA83T0: + vlans: + - 84 + vm_offset: 84 + ARISTA84T0: + vlans: + - 85 + vm_offset: 85 + ARISTA85T0: + vlans: + - 86 + vm_offset: 86 + ARISTA86T0: + vlans: + - 87 + vm_offset: 87 + ARISTA87T0: + vlans: + - 88 + vm_offset: 88 + ARISTA88T0: + vlans: + - 89 + vm_offset: 89 + ARISTA89T0: + vlans: + - 90 + vm_offset: 90 + ARISTA90T0: + vlans: + - 91 + vm_offset: 91 + ARISTA91T0: + vlans: + - 92 + vm_offset: 92 + ARISTA92T0: + vlans: + - 93 + vm_offset: 93 + ARISTA93T0: + vlans: + - 94 + vm_offset: 94 + ARISTA94T0: + vlans: + - 95 + vm_offset: 95 + ARISTA95T0: + vlans: + - 96 + vm_offset: 96 + ARISTA96T0: + vlans: + - 97 + vm_offset: 97 + ARISTA97T0: + vlans: + - 98 + vm_offset: 98 + ARISTA98T0: + vlans: + - 99 + vm_offset: 99 + ARISTA99T0: + vlans: + - 100 + vm_offset: 100 + ARISTA100T0: + vlans: + - 101 + vm_offset: 101 + ARISTA101T0: + vlans: + - 102 + vm_offset: 102 + ARISTA102T0: + vlans: + - 103 + vm_offset: 103 + ARISTA103T0: + vlans: + - 104 + vm_offset: 104 + ARISTA104T0: + vlans: + - 105 + vm_offset: 105 + ARISTA105T0: + vlans: + - 106 + vm_offset: 106 + ARISTA106T0: + vlans: + - 107 + vm_offset: 107 + ARISTA107T0: + vlans: + - 108 + vm_offset: 108 + ARISTA108T0: + vlans: + - 109 + vm_offset: 109 + ARISTA109T0: + vlans: + - 110 + vm_offset: 110 + ARISTA110T0: + vlans: + - 111 + vm_offset: 111 + ARISTA111T0: + vlans: + - 112 + vm_offset: 112 + ARISTA112T0: + vlans: + - 113 + vm_offset: 113 + ARISTA113T0: + vlans: + - 114 + vm_offset: 114 + ARISTA114T0: + vlans: + - 115 + vm_offset: 115 + ARISTA115T0: + vlans: + - 116 + vm_offset: 116 + ARISTA116T0: + vlans: + - 117 + vm_offset: 117 + ARISTA117T0: + vlans: + - 118 + vm_offset: 118 + ARISTA118T0: + vlans: + - 119 + vm_offset: 119 + ARISTA119T0: + vlans: + - 120 + vm_offset: 120 + ARISTA120T0: + vlans: + - 121 + vm_offset: 121 + ARISTA121T0: + vlans: + - 122 + vm_offset: 122 + ARISTA122T0: + vlans: + - 123 + vm_offset: 123 + ARISTA123T0: + vlans: + - 124 + vm_offset: 124 + ARISTA124T0: + vlans: + - 125 + vm_offset: 125 + ARISTA125T0: + vlans: + - 126 + vm_offset: 126 + ARISTA126T0: + vlans: + - 127 + vm_offset: 127 + ARISTA127T0: + vlans: + - 128 + vm_offset: 128 + ARISTA128T0: + vlans: + - 129 + vm_offset: 129 + ARISTA129T0: + vlans: + - 130 + vm_offset: 130 + ARISTA130T0: + vlans: + - 131 + vm_offset: 131 + ARISTA131T0: + vlans: + - 132 + vm_offset: 132 + ARISTA132T0: + vlans: + - 133 + vm_offset: 133 + ARISTA133T0: + vlans: + - 134 + vm_offset: 134 + ARISTA134T0: + vlans: + - 135 + vm_offset: 135 + ARISTA135T0: + vlans: + - 136 + vm_offset: 136 + ARISTA136T0: + vlans: + - 137 + vm_offset: 137 + ARISTA137T0: + vlans: + - 138 + vm_offset: 138 + ARISTA138T0: + vlans: + - 139 + vm_offset: 139 + ARISTA139T0: + vlans: + - 140 + vm_offset: 140 + ARISTA140T0: + vlans: + - 141 + vm_offset: 141 + ARISTA141T0: + vlans: + - 142 + vm_offset: 142 + ARISTA142T0: + vlans: + - 143 + vm_offset: 143 + ARISTA143T0: + vlans: + - 144 + vm_offset: 144 + ARISTA144T0: + vlans: + - 145 + vm_offset: 145 + ARISTA145T0: + vlans: + - 146 + vm_offset: 146 + ARISTA146T0: + vlans: + - 147 + vm_offset: 147 + ARISTA147T0: + vlans: + - 148 + vm_offset: 148 + ARISTA148T0: + vlans: + - 149 + vm_offset: 149 + ARISTA149T0: + vlans: + - 150 + vm_offset: 150 + ARISTA150T0: + vlans: + - 151 + vm_offset: 151 + ARISTA151T0: + vlans: + - 152 + vm_offset: 152 + ARISTA152T0: + vlans: + - 153 + vm_offset: 153 + ARISTA153T0: + vlans: + - 154 + vm_offset: 154 + ARISTA154T0: + vlans: + - 155 + vm_offset: 155 + ARISTA155T0: + vlans: + - 156 + vm_offset: 156 + ARISTA156T0: + vlans: + - 157 + vm_offset: 157 + ARISTA157T0: + vlans: + - 158 + vm_offset: 158 + ARISTA158T0: + vlans: + - 159 + vm_offset: 159 + ARISTA159T0: + vlans: + - 160 + vm_offset: 160 + ARISTA160T0: + vlans: + - 161 + vm_offset: 161 + ARISTA161T0: + vlans: + - 162 + vm_offset: 162 + ARISTA162T0: + vlans: + - 163 + vm_offset: 163 + ARISTA163T0: + vlans: + - 164 + vm_offset: 164 + ARISTA164T0: + vlans: + - 165 + vm_offset: 165 + ARISTA165T0: + vlans: + - 166 + vm_offset: 166 + ARISTA166T0: + vlans: + - 167 + vm_offset: 167 + ARISTA167T0: + vlans: + - 168 + vm_offset: 168 + ARISTA168T0: + vlans: + - 169 + vm_offset: 169 + ARISTA169T0: + vlans: + - 170 + vm_offset: 170 + ARISTA170T0: + vlans: + - 171 + vm_offset: 171 + ARISTA171T0: + vlans: + - 172 + vm_offset: 172 + ARISTA172T0: + vlans: + - 173 + vm_offset: 173 + ARISTA173T0: + vlans: + - 174 + vm_offset: 174 + ARISTA174T0: + vlans: + - 175 + vm_offset: 175 + ARISTA175T0: + vlans: + - 176 + vm_offset: 176 + ARISTA176T0: + vlans: + - 177 + vm_offset: 177 + ARISTA177T0: + vlans: + - 178 + vm_offset: 178 + ARISTA178T0: + vlans: + - 179 + vm_offset: 179 + ARISTA179T0: + vlans: + - 180 + vm_offset: 180 + ARISTA180T0: + vlans: + - 181 + vm_offset: 181 + ARISTA181T0: + vlans: + - 182 + vm_offset: 182 + ARISTA182T0: + vlans: + - 183 + vm_offset: 183 + ARISTA183T0: + vlans: + - 184 + vm_offset: 184 + ARISTA184T0: + vlans: + - 185 + vm_offset: 185 + ARISTA185T0: + vlans: + - 186 + vm_offset: 186 + ARISTA186T0: + vlans: + - 187 + vm_offset: 187 + ARISTA187T0: + vlans: + - 188 + vm_offset: 188 + ARISTA188T0: + vlans: + - 189 + vm_offset: 189 + ARISTA189T0: + vlans: + - 190 + vm_offset: 190 + ARISTA190T0: + vlans: + - 191 + vm_offset: 191 + ARISTA191T0: + vlans: + - 192 + vm_offset: 192 + ARISTA192T0: + vlans: + - 193 + vm_offset: 193 + ARISTA193T0: + vlans: + - 194 + vm_offset: 194 + ARISTA194T0: + vlans: + - 195 + vm_offset: 195 + ARISTA195T0: + vlans: + - 196 + vm_offset: 196 + ARISTA196T0: + vlans: + - 197 + vm_offset: 197 + ARISTA197T0: + vlans: + - 198 + vm_offset: 198 + ARISTA198T0: + vlans: + - 199 + vm_offset: 199 + ARISTA199T0: + vlans: + - 200 + vm_offset: 200 + ARISTA200T0: + vlans: + - 201 + vm_offset: 201 + ARISTA201T0: + vlans: + - 202 + vm_offset: 202 + ARISTA202T0: + vlans: + - 203 + vm_offset: 203 + ARISTA203T0: + vlans: + - 204 + vm_offset: 204 + ARISTA204T0: + vlans: + - 205 + vm_offset: 205 + ARISTA205T0: + vlans: + - 206 + vm_offset: 206 + ARISTA206T0: + vlans: + - 207 + vm_offset: 207 + ARISTA207T0: + vlans: + - 208 + vm_offset: 208 + ARISTA208T0: + vlans: + - 209 + vm_offset: 209 + ARISTA209T0: + vlans: + - 210 + vm_offset: 210 + ARISTA210T0: + vlans: + - 211 + vm_offset: 211 + ARISTA211T0: + vlans: + - 212 + vm_offset: 212 + ARISTA212T0: + vlans: + - 213 + vm_offset: 213 + ARISTA213T0: + vlans: + - 214 + vm_offset: 214 + ARISTA214T0: + vlans: + - 215 + vm_offset: 215 + ARISTA215T0: + vlans: + - 216 + vm_offset: 216 + ARISTA216T0: + vlans: + - 217 + vm_offset: 217 + ARISTA217T0: + vlans: + - 218 + vm_offset: 218 + ARISTA218T0: + vlans: + - 219 + vm_offset: 219 + ARISTA219T0: + vlans: + - 220 + vm_offset: 220 + ARISTA220T0: + vlans: + - 221 + vm_offset: 221 + ARISTA221T0: + vlans: + - 222 + vm_offset: 222 + ARISTA222T0: + vlans: + - 223 + vm_offset: 223 + ARISTA223T0: + vlans: + - 224 + vm_offset: 224 + ARISTA224T0: + vlans: + - 225 + vm_offset: 225 + ARISTA225T0: + vlans: + - 226 + vm_offset: 226 + ARISTA226T0: + vlans: + - 227 + vm_offset: 227 + ARISTA227T0: + vlans: + - 228 + vm_offset: 228 + ARISTA228T0: + vlans: + - 229 + vm_offset: 229 + ARISTA229T0: + vlans: + - 230 + vm_offset: 230 + ARISTA230T0: + vlans: + - 231 + vm_offset: 231 + ARISTA231T0: + vlans: + - 232 + vm_offset: 232 + ARISTA232T0: + vlans: + - 233 + vm_offset: 233 + ARISTA233T0: + vlans: + - 234 + vm_offset: 234 + ARISTA234T0: + vlans: + - 235 + vm_offset: 235 + ARISTA235T0: + vlans: + - 236 + vm_offset: 236 + ARISTA236T0: + vlans: + - 237 + vm_offset: 237 + ARISTA237T0: + vlans: + - 238 + vm_offset: 238 + ARISTA238T0: + vlans: + - 239 + vm_offset: 239 + ARISTA239T0: + vlans: + - 240 + vm_offset: 240 + ARISTA240T0: + vlans: + - 241 + vm_offset: 241 + ARISTA241T0: + vlans: + - 242 + vm_offset: 242 + ARISTA242T0: + vlans: + - 243 + vm_offset: 243 + ARISTA243T0: + vlans: + - 244 + vm_offset: 244 + ARISTA244T0: + vlans: + - 245 + vm_offset: 245 + ARISTA245T0: + vlans: + - 246 + vm_offset: 246 + ARISTA246T0: + vlans: + - 247 + vm_offset: 247 + ARISTA247T0: + vlans: + - 248 + vm_offset: 248 + ARISTA248T0: + vlans: + - 249 + vm_offset: 249 + ARISTA249T0: + vlans: + - 250 + vm_offset: 250 + ARISTA250T0: + vlans: + - 251 + vm_offset: 251 + ARISTA251T0: + vlans: + - 252 + vm_offset: 252 + ARISTA252T0: + vlans: + - 253 + vm_offset: 253 + ARISTA253T0: + vlans: + - 254 + vm_offset: 254 + ARISTA254T0: + vlans: + - 255 + vm_offset: 255 + +configuration_properties: + common: + dut_asn: 4200100000 + dut_type: LeafRouter + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + nhipv6: FC0A::FF + spine: + swrole: spine + tor: + swrole: tor + +configuration: + ARISTA01T2: + properties: + - common + - spine + bgp: + router-id: 0.12.0.1 + asn: 4200200000 + peers: + 4200100000: + - fc00:a::1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1::1/128 + Ethernet1: + ipv6: fc00:a::2/126 + bp_interface: + ipv6: fc00:b::1/64 + + ARISTA02T2: + properties: + - common + - spine + bgp: + router-id: 0.12.0.2 + asn: 4200200000 + peers: + 4200100000: + - fc00:a::5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2::1/128 + Ethernet1: + ipv6: fc00:a::6/126 + bp_interface: + ipv6: fc00:b::2/64 + + ARISTA01T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.3 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3::1/128 + Ethernet1: + ipv6: fc00:a::a/126 + bp_interface: + ipv6: fc00:b::3/64 + + ARISTA02T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.4 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d + interfaces: + Loopback0: + ipv6: fc00:c:c:4::1/128 + Ethernet1: + ipv6: fc00:a::e/126 + bp_interface: + ipv6: fc00:b::4/64 + + ARISTA03T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.5 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::11 + interfaces: + Loopback0: + ipv6: fc00:c:c:5::1/128 + Ethernet1: + ipv6: fc00:a::12/126 + bp_interface: + ipv6: fc00:b::5/64 + + ARISTA04T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.6 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::15 + interfaces: + Loopback0: + ipv6: fc00:c:c:6::1/128 + Ethernet1: + ipv6: fc00:a::16/126 + bp_interface: + ipv6: fc00:b::6/64 + + ARISTA05T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.7 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::19 + interfaces: + Loopback0: + ipv6: fc00:c:c:7::1/128 + Ethernet1: + ipv6: fc00:a::1a/126 + bp_interface: + ipv6: fc00:b::7/64 + + ARISTA06T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.8 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d + interfaces: + Loopback0: + ipv6: fc00:c:c:8::1/128 + Ethernet1: + ipv6: fc00:a::1e/126 + bp_interface: + ipv6: fc00:b::8/64 + + ARISTA07T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.9 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::21 + interfaces: + Loopback0: + ipv6: fc00:c:c:9::1/128 + Ethernet1: + ipv6: fc00:a::22/126 + bp_interface: + ipv6: fc00:b::9/64 + + ARISTA08T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.10 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::25 + interfaces: + Loopback0: + ipv6: fc00:c:c:a::1/128 + Ethernet1: + ipv6: fc00:a::26/126 + bp_interface: + ipv6: fc00:b::a/64 + + ARISTA09T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.11 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::29 + interfaces: + Loopback0: + ipv6: fc00:c:c:b::1/128 + Ethernet1: + ipv6: fc00:a::2a/126 + bp_interface: + ipv6: fc00:b::b/64 + + ARISTA10T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.12 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d + interfaces: + Loopback0: + ipv6: fc00:c:c:c::1/128 + Ethernet1: + ipv6: fc00:a::2e/126 + bp_interface: + ipv6: fc00:b::c/64 + + ARISTA11T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.13 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::31 + interfaces: + Loopback0: + ipv6: fc00:c:c:d::1/128 + Ethernet1: + ipv6: fc00:a::32/126 + bp_interface: + ipv6: fc00:b::d/64 + + ARISTA12T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.14 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::35 + interfaces: + Loopback0: + ipv6: fc00:c:c:e::1/128 + Ethernet1: + ipv6: fc00:a::36/126 + bp_interface: + ipv6: fc00:b::e/64 + + ARISTA13T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.15 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::39 + interfaces: + Loopback0: + ipv6: fc00:c:c:f::1/128 + Ethernet1: + ipv6: fc00:a::3a/126 + bp_interface: + ipv6: fc00:b::f/64 + + ARISTA14T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.16 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d + interfaces: + Loopback0: + ipv6: fc00:c:c:10::1/128 + Ethernet1: + ipv6: fc00:a::3e/126 + bp_interface: + ipv6: fc00:b::10/64 + + ARISTA15T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.17 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::41 + interfaces: + Loopback0: + ipv6: fc00:c:c:11::1/128 + Ethernet1: + ipv6: fc00:a::42/126 + bp_interface: + ipv6: fc00:b::11/64 + + ARISTA16T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.18 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::45 + interfaces: + Loopback0: + ipv6: fc00:c:c:12::1/128 + Ethernet1: + ipv6: fc00:a::46/126 + bp_interface: + ipv6: fc00:b::12/64 + + ARISTA17T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.19 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::49 + interfaces: + Loopback0: + ipv6: fc00:c:c:13::1/128 + Ethernet1: + ipv6: fc00:a::4a/126 + bp_interface: + ipv6: fc00:b::13/64 + + ARISTA18T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.20 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4d + interfaces: + Loopback0: + ipv6: fc00:c:c:14::1/128 + Ethernet1: + ipv6: fc00:a::4e/126 + bp_interface: + ipv6: fc00:b::14/64 + + ARISTA19T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.21 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::51 + interfaces: + Loopback0: + ipv6: fc00:c:c:15::1/128 + Ethernet1: + ipv6: fc00:a::52/126 + bp_interface: + ipv6: fc00:b::15/64 + + ARISTA20T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.22 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::55 + interfaces: + Loopback0: + ipv6: fc00:c:c:16::1/128 + Ethernet1: + ipv6: fc00:a::56/126 + bp_interface: + ipv6: fc00:b::16/64 + + ARISTA21T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.23 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::59 + interfaces: + Loopback0: + ipv6: fc00:c:c:17::1/128 + Ethernet1: + ipv6: fc00:a::5a/126 + bp_interface: + ipv6: fc00:b::17/64 + + ARISTA22T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.24 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5d + interfaces: + Loopback0: + ipv6: fc00:c:c:18::1/128 + Ethernet1: + ipv6: fc00:a::5e/126 + bp_interface: + ipv6: fc00:b::18/64 + + ARISTA23T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.25 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::61 + interfaces: + Loopback0: + ipv6: fc00:c:c:19::1/128 + Ethernet1: + ipv6: fc00:a::62/126 + bp_interface: + ipv6: fc00:b::19/64 + + ARISTA24T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.26 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::65 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a::1/128 + Ethernet1: + ipv6: fc00:a::66/126 + bp_interface: + ipv6: fc00:b::1a/64 + + ARISTA25T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.27 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::69 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b::1/128 + Ethernet1: + ipv6: fc00:a::6a/126 + bp_interface: + ipv6: fc00:b::1b/64 + + ARISTA26T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.28 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c::1/128 + Ethernet1: + ipv6: fc00:a::6e/126 + bp_interface: + ipv6: fc00:b::1c/64 + + ARISTA27T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.29 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::71 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d::1/128 + Ethernet1: + ipv6: fc00:a::72/126 + bp_interface: + ipv6: fc00:b::1d/64 + + ARISTA28T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.30 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::75 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e::1/128 + Ethernet1: + ipv6: fc00:a::76/126 + bp_interface: + ipv6: fc00:b::1e/64 + + ARISTA29T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.31 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::79 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f::1/128 + Ethernet1: + ipv6: fc00:a::7a/126 + bp_interface: + ipv6: fc00:b::1f/64 + + ARISTA30T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.32 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7d + interfaces: + Loopback0: + ipv6: fc00:c:c:20::1/128 + Ethernet1: + ipv6: fc00:a::7e/126 + bp_interface: + ipv6: fc00:b::20/64 + + ARISTA31T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.33 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::81 + interfaces: + Loopback0: + ipv6: fc00:c:c:21::1/128 + Ethernet1: + ipv6: fc00:a::82/126 + bp_interface: + ipv6: fc00:b::21/64 + + ARISTA32T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.34 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::85 + interfaces: + Loopback0: + ipv6: fc00:c:c:22::1/128 + Ethernet1: + ipv6: fc00:a::86/126 + bp_interface: + ipv6: fc00:b::22/64 + + ARISTA33T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.35 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::89 + interfaces: + Loopback0: + ipv6: fc00:c:c:23::1/128 + Ethernet1: + ipv6: fc00:a::8a/126 + bp_interface: + ipv6: fc00:b::23/64 + + ARISTA34T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.36 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::8d + interfaces: + Loopback0: + ipv6: fc00:c:c:24::1/128 + Ethernet1: + ipv6: fc00:a::8e/126 + bp_interface: + ipv6: fc00:b::24/64 + + ARISTA35T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.37 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::91 + interfaces: + Loopback0: + ipv6: fc00:c:c:25::1/128 + Ethernet1: + ipv6: fc00:a::92/126 + bp_interface: + ipv6: fc00:b::25/64 + + ARISTA36T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.38 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::95 + interfaces: + Loopback0: + ipv6: fc00:c:c:26::1/128 + Ethernet1: + ipv6: fc00:a::96/126 + bp_interface: + ipv6: fc00:b::26/64 + + ARISTA37T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.39 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::99 + interfaces: + Loopback0: + ipv6: fc00:c:c:27::1/128 + Ethernet1: + ipv6: fc00:a::9a/126 + bp_interface: + ipv6: fc00:b::27/64 + + ARISTA38T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.40 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::9d + interfaces: + Loopback0: + ipv6: fc00:c:c:28::1/128 + Ethernet1: + ipv6: fc00:a::9e/126 + bp_interface: + ipv6: fc00:b::28/64 + + ARISTA39T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.41 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:29::1/128 + Ethernet1: + ipv6: fc00:a::a2/126 + bp_interface: + ipv6: fc00:b::29/64 + + ARISTA40T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.42 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2a::1/128 + Ethernet1: + ipv6: fc00:a::a6/126 + bp_interface: + ipv6: fc00:b::2a/64 + + ARISTA41T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.43 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2b::1/128 + Ethernet1: + ipv6: fc00:a::aa/126 + bp_interface: + ipv6: fc00:b::2b/64 + + ARISTA42T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.44 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::ad + interfaces: + Loopback0: + ipv6: fc00:c:c:2c::1/128 + Ethernet1: + ipv6: fc00:a::ae/126 + bp_interface: + ipv6: fc00:b::2c/64 + + ARISTA43T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.45 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:2d::1/128 + Ethernet1: + ipv6: fc00:a::b2/126 + bp_interface: + ipv6: fc00:b::2d/64 + + ARISTA44T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.46 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2e::1/128 + Ethernet1: + ipv6: fc00:a::b6/126 + bp_interface: + ipv6: fc00:b::2e/64 + + ARISTA45T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.47 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2f::1/128 + Ethernet1: + ipv6: fc00:a::ba/126 + bp_interface: + ipv6: fc00:b::2f/64 + + ARISTA46T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.48 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::bd + interfaces: + Loopback0: + ipv6: fc00:c:c:30::1/128 + Ethernet1: + ipv6: fc00:a::be/126 + bp_interface: + ipv6: fc00:b::30/64 + + ARISTA47T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.49 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:31::1/128 + Ethernet1: + ipv6: fc00:a::c2/126 + bp_interface: + ipv6: fc00:b::31/64 + + ARISTA48T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.50 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:32::1/128 + Ethernet1: + ipv6: fc00:a::c6/126 + bp_interface: + ipv6: fc00:b::32/64 + + ARISTA49T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.51 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:33::1/128 + Ethernet1: + ipv6: fc00:a::ca/126 + bp_interface: + ipv6: fc00:b::33/64 + + ARISTA50T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.52 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::cd + interfaces: + Loopback0: + ipv6: fc00:c:c:34::1/128 + Ethernet1: + ipv6: fc00:a::ce/126 + bp_interface: + ipv6: fc00:b::34/64 + + ARISTA51T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.53 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:35::1/128 + Ethernet1: + ipv6: fc00:a::d2/126 + bp_interface: + ipv6: fc00:b::35/64 + + ARISTA52T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.54 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:36::1/128 + Ethernet1: + ipv6: fc00:a::d6/126 + bp_interface: + ipv6: fc00:b::36/64 + + ARISTA53T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.55 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:37::1/128 + Ethernet1: + ipv6: fc00:a::da/126 + bp_interface: + ipv6: fc00:b::37/64 + + ARISTA54T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.56 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::dd + interfaces: + Loopback0: + ipv6: fc00:c:c:38::1/128 + Ethernet1: + ipv6: fc00:a::de/126 + bp_interface: + ipv6: fc00:b::38/64 + + ARISTA55T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.57 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:39::1/128 + Ethernet1: + ipv6: fc00:a::e2/126 + bp_interface: + ipv6: fc00:b::39/64 + + ARISTA56T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.58 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3a::1/128 + Ethernet1: + ipv6: fc00:a::e6/126 + bp_interface: + ipv6: fc00:b::3a/64 + + ARISTA57T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.59 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3b::1/128 + Ethernet1: + ipv6: fc00:a::ea/126 + bp_interface: + ipv6: fc00:b::3b/64 + + ARISTA58T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.60 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::ed + interfaces: + Loopback0: + ipv6: fc00:c:c:3c::1/128 + Ethernet1: + ipv6: fc00:a::ee/126 + bp_interface: + ipv6: fc00:b::3c/64 + + ARISTA59T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.61 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:3d::1/128 + Ethernet1: + ipv6: fc00:a::f2/126 + bp_interface: + ipv6: fc00:b::3d/64 + + ARISTA60T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.62 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3e::1/128 + Ethernet1: + ipv6: fc00:a::f6/126 + bp_interface: + ipv6: fc00:b::3e/64 + + ARISTA61T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.63 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3f::1/128 + Ethernet1: + ipv6: fc00:a::fa/126 + bp_interface: + ipv6: fc00:b::3f/64 + + ARISTA62T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.64 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::fd + interfaces: + Loopback0: + ipv6: fc00:c:c:40::1/128 + Ethernet1: + ipv6: fc00:a::fe/126 + bp_interface: + ipv6: fc00:b::40/64 + + ARISTA63T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.65 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::101 + interfaces: + Loopback0: + ipv6: fc00:c:c:41::1/128 + Ethernet1: + ipv6: fc00:a::102/126 + bp_interface: + ipv6: fc00:b::41/64 + + ARISTA64T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.66 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::105 + interfaces: + Loopback0: + ipv6: fc00:c:c:42::1/128 + Ethernet1: + ipv6: fc00:a::106/126 + bp_interface: + ipv6: fc00:b::42/64 + + ARISTA65T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.67 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::109 + interfaces: + Loopback0: + ipv6: fc00:c:c:43::1/128 + Ethernet1: + ipv6: fc00:a::10a/126 + bp_interface: + ipv6: fc00:b::43/64 + + ARISTA66T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.68 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::10d + interfaces: + Loopback0: + ipv6: fc00:c:c:44::1/128 + Ethernet1: + ipv6: fc00:a::10e/126 + bp_interface: + ipv6: fc00:b::44/64 + + ARISTA67T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.69 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::111 + interfaces: + Loopback0: + ipv6: fc00:c:c:45::1/128 + Ethernet1: + ipv6: fc00:a::112/126 + bp_interface: + ipv6: fc00:b::45/64 + + ARISTA68T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.70 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::115 + interfaces: + Loopback0: + ipv6: fc00:c:c:46::1/128 + Ethernet1: + ipv6: fc00:a::116/126 + bp_interface: + ipv6: fc00:b::46/64 + + ARISTA69T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.71 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::119 + interfaces: + Loopback0: + ipv6: fc00:c:c:47::1/128 + Ethernet1: + ipv6: fc00:a::11a/126 + bp_interface: + ipv6: fc00:b::47/64 + + ARISTA70T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.72 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::11d + interfaces: + Loopback0: + ipv6: fc00:c:c:48::1/128 + Ethernet1: + ipv6: fc00:a::11e/126 + bp_interface: + ipv6: fc00:b::48/64 + + ARISTA71T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.73 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::121 + interfaces: + Loopback0: + ipv6: fc00:c:c:49::1/128 + Ethernet1: + ipv6: fc00:a::122/126 + bp_interface: + ipv6: fc00:b::49/64 + + ARISTA72T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.74 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::125 + interfaces: + Loopback0: + ipv6: fc00:c:c:4a::1/128 + Ethernet1: + ipv6: fc00:a::126/126 + bp_interface: + ipv6: fc00:b::4a/64 + + ARISTA73T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.75 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::129 + interfaces: + Loopback0: + ipv6: fc00:c:c:4b::1/128 + Ethernet1: + ipv6: fc00:a::12a/126 + bp_interface: + ipv6: fc00:b::4b/64 + + ARISTA74T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.76 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::12d + interfaces: + Loopback0: + ipv6: fc00:c:c:4c::1/128 + Ethernet1: + ipv6: fc00:a::12e/126 + bp_interface: + ipv6: fc00:b::4c/64 + + ARISTA75T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.77 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::131 + interfaces: + Loopback0: + ipv6: fc00:c:c:4d::1/128 + Ethernet1: + ipv6: fc00:a::132/126 + bp_interface: + ipv6: fc00:b::4d/64 + + ARISTA76T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.78 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::135 + interfaces: + Loopback0: + ipv6: fc00:c:c:4e::1/128 + Ethernet1: + ipv6: fc00:a::136/126 + bp_interface: + ipv6: fc00:b::4e/64 + + ARISTA77T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.79 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::139 + interfaces: + Loopback0: + ipv6: fc00:c:c:4f::1/128 + Ethernet1: + ipv6: fc00:a::13a/126 + bp_interface: + ipv6: fc00:b::4f/64 + + ARISTA78T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.80 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::13d + interfaces: + Loopback0: + ipv6: fc00:c:c:50::1/128 + Ethernet1: + ipv6: fc00:a::13e/126 + bp_interface: + ipv6: fc00:b::50/64 + + ARISTA79T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.81 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::141 + interfaces: + Loopback0: + ipv6: fc00:c:c:51::1/128 + Ethernet1: + ipv6: fc00:a::142/126 + bp_interface: + ipv6: fc00:b::51/64 + + ARISTA80T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.82 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::145 + interfaces: + Loopback0: + ipv6: fc00:c:c:52::1/128 + Ethernet1: + ipv6: fc00:a::146/126 + bp_interface: + ipv6: fc00:b::52/64 + + ARISTA81T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.83 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::149 + interfaces: + Loopback0: + ipv6: fc00:c:c:53::1/128 + Ethernet1: + ipv6: fc00:a::14a/126 + bp_interface: + ipv6: fc00:b::53/64 + + ARISTA82T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.84 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::14d + interfaces: + Loopback0: + ipv6: fc00:c:c:54::1/128 + Ethernet1: + ipv6: fc00:a::14e/126 + bp_interface: + ipv6: fc00:b::54/64 + + ARISTA83T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.85 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::151 + interfaces: + Loopback0: + ipv6: fc00:c:c:55::1/128 + Ethernet1: + ipv6: fc00:a::152/126 + bp_interface: + ipv6: fc00:b::55/64 + + ARISTA84T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.86 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::155 + interfaces: + Loopback0: + ipv6: fc00:c:c:56::1/128 + Ethernet1: + ipv6: fc00:a::156/126 + bp_interface: + ipv6: fc00:b::56/64 + + ARISTA85T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.87 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::159 + interfaces: + Loopback0: + ipv6: fc00:c:c:57::1/128 + Ethernet1: + ipv6: fc00:a::15a/126 + bp_interface: + ipv6: fc00:b::57/64 + + ARISTA86T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.88 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::15d + interfaces: + Loopback0: + ipv6: fc00:c:c:58::1/128 + Ethernet1: + ipv6: fc00:a::15e/126 + bp_interface: + ipv6: fc00:b::58/64 + + ARISTA87T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.89 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::161 + interfaces: + Loopback0: + ipv6: fc00:c:c:59::1/128 + Ethernet1: + ipv6: fc00:a::162/126 + bp_interface: + ipv6: fc00:b::59/64 + + ARISTA88T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.90 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::165 + interfaces: + Loopback0: + ipv6: fc00:c:c:5a::1/128 + Ethernet1: + ipv6: fc00:a::166/126 + bp_interface: + ipv6: fc00:b::5a/64 + + ARISTA89T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.91 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::169 + interfaces: + Loopback0: + ipv6: fc00:c:c:5b::1/128 + Ethernet1: + ipv6: fc00:a::16a/126 + bp_interface: + ipv6: fc00:b::5b/64 + + ARISTA90T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.92 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::16d + interfaces: + Loopback0: + ipv6: fc00:c:c:5c::1/128 + Ethernet1: + ipv6: fc00:a::16e/126 + bp_interface: + ipv6: fc00:b::5c/64 + + ARISTA91T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.93 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::171 + interfaces: + Loopback0: + ipv6: fc00:c:c:5d::1/128 + Ethernet1: + ipv6: fc00:a::172/126 + bp_interface: + ipv6: fc00:b::5d/64 + + ARISTA92T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.94 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::175 + interfaces: + Loopback0: + ipv6: fc00:c:c:5e::1/128 + Ethernet1: + ipv6: fc00:a::176/126 + bp_interface: + ipv6: fc00:b::5e/64 + + ARISTA93T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.95 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::179 + interfaces: + Loopback0: + ipv6: fc00:c:c:5f::1/128 + Ethernet1: + ipv6: fc00:a::17a/126 + bp_interface: + ipv6: fc00:b::5f/64 + + ARISTA94T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.96 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::17d + interfaces: + Loopback0: + ipv6: fc00:c:c:60::1/128 + Ethernet1: + ipv6: fc00:a::17e/126 + bp_interface: + ipv6: fc00:b::60/64 + + ARISTA95T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.97 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::181 + interfaces: + Loopback0: + ipv6: fc00:c:c:61::1/128 + Ethernet1: + ipv6: fc00:a::182/126 + bp_interface: + ipv6: fc00:b::61/64 + + ARISTA96T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.98 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::185 + interfaces: + Loopback0: + ipv6: fc00:c:c:62::1/128 + Ethernet1: + ipv6: fc00:a::186/126 + bp_interface: + ipv6: fc00:b::62/64 + + ARISTA97T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.99 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::189 + interfaces: + Loopback0: + ipv6: fc00:c:c:63::1/128 + Ethernet1: + ipv6: fc00:a::18a/126 + bp_interface: + ipv6: fc00:b::63/64 + + ARISTA98T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.100 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::18d + interfaces: + Loopback0: + ipv6: fc00:c:c:64::1/128 + Ethernet1: + ipv6: fc00:a::18e/126 + bp_interface: + ipv6: fc00:b::64/64 + + ARISTA99T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.101 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::191 + interfaces: + Loopback0: + ipv6: fc00:c:c:65::1/128 + Ethernet1: + ipv6: fc00:a::192/126 + bp_interface: + ipv6: fc00:b::65/64 + + ARISTA100T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.102 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::195 + interfaces: + Loopback0: + ipv6: fc00:c:c:66::1/128 + Ethernet1: + ipv6: fc00:a::196/126 + bp_interface: + ipv6: fc00:b::66/64 + + ARISTA101T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.103 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::199 + interfaces: + Loopback0: + ipv6: fc00:c:c:67::1/128 + Ethernet1: + ipv6: fc00:a::19a/126 + bp_interface: + ipv6: fc00:b::67/64 + + ARISTA102T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.104 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::19d + interfaces: + Loopback0: + ipv6: fc00:c:c:68::1/128 + Ethernet1: + ipv6: fc00:a::19e/126 + bp_interface: + ipv6: fc00:b::68/64 + + ARISTA103T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.105 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:69::1/128 + Ethernet1: + ipv6: fc00:a::1a2/126 + bp_interface: + ipv6: fc00:b::69/64 + + ARISTA104T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.106 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6a::1/128 + Ethernet1: + ipv6: fc00:a::1a6/126 + bp_interface: + ipv6: fc00:b::6a/64 + + ARISTA105T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.107 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6b::1/128 + Ethernet1: + ipv6: fc00:a::1aa/126 + bp_interface: + ipv6: fc00:b::6b/64 + + ARISTA106T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.108 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1ad + interfaces: + Loopback0: + ipv6: fc00:c:c:6c::1/128 + Ethernet1: + ipv6: fc00:a::1ae/126 + bp_interface: + ipv6: fc00:b::6c/64 + + ARISTA107T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.109 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:6d::1/128 + Ethernet1: + ipv6: fc00:a::1b2/126 + bp_interface: + ipv6: fc00:b::6d/64 + + ARISTA108T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.110 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6e::1/128 + Ethernet1: + ipv6: fc00:a::1b6/126 + bp_interface: + ipv6: fc00:b::6e/64 + + ARISTA109T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.111 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6f::1/128 + Ethernet1: + ipv6: fc00:a::1ba/126 + bp_interface: + ipv6: fc00:b::6f/64 + + ARISTA110T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.112 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1bd + interfaces: + Loopback0: + ipv6: fc00:c:c:70::1/128 + Ethernet1: + ipv6: fc00:a::1be/126 + bp_interface: + ipv6: fc00:b::70/64 + + ARISTA111T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.113 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:71::1/128 + Ethernet1: + ipv6: fc00:a::1c2/126 + bp_interface: + ipv6: fc00:b::71/64 + + ARISTA112T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.114 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:72::1/128 + Ethernet1: + ipv6: fc00:a::1c6/126 + bp_interface: + ipv6: fc00:b::72/64 + + ARISTA113T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.115 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:73::1/128 + Ethernet1: + ipv6: fc00:a::1ca/126 + bp_interface: + ipv6: fc00:b::73/64 + + ARISTA114T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.116 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1cd + interfaces: + Loopback0: + ipv6: fc00:c:c:74::1/128 + Ethernet1: + ipv6: fc00:a::1ce/126 + bp_interface: + ipv6: fc00:b::74/64 + + ARISTA115T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.117 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:75::1/128 + Ethernet1: + ipv6: fc00:a::1d2/126 + bp_interface: + ipv6: fc00:b::75/64 + + ARISTA116T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.118 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:76::1/128 + Ethernet1: + ipv6: fc00:a::1d6/126 + bp_interface: + ipv6: fc00:b::76/64 + + ARISTA117T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.119 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:77::1/128 + Ethernet1: + ipv6: fc00:a::1da/126 + bp_interface: + ipv6: fc00:b::77/64 + + ARISTA118T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.120 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1dd + interfaces: + Loopback0: + ipv6: fc00:c:c:78::1/128 + Ethernet1: + ipv6: fc00:a::1de/126 + bp_interface: + ipv6: fc00:b::78/64 + + ARISTA119T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.121 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:79::1/128 + Ethernet1: + ipv6: fc00:a::1e2/126 + bp_interface: + ipv6: fc00:b::79/64 + + ARISTA120T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.122 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7a::1/128 + Ethernet1: + ipv6: fc00:a::1e6/126 + bp_interface: + ipv6: fc00:b::7a/64 + + ARISTA121T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.123 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7b::1/128 + Ethernet1: + ipv6: fc00:a::1ea/126 + bp_interface: + ipv6: fc00:b::7b/64 + + ARISTA122T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.124 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1ed + interfaces: + Loopback0: + ipv6: fc00:c:c:7c::1/128 + Ethernet1: + ipv6: fc00:a::1ee/126 + bp_interface: + ipv6: fc00:b::7c/64 + + ARISTA123T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.125 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:7d::1/128 + Ethernet1: + ipv6: fc00:a::1f2/126 + bp_interface: + ipv6: fc00:b::7d/64 + + ARISTA124T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.126 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7e::1/128 + Ethernet1: + ipv6: fc00:a::1f6/126 + bp_interface: + ipv6: fc00:b::7e/64 + + ARISTA125T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.127 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7f::1/128 + Ethernet1: + ipv6: fc00:a::1fa/126 + bp_interface: + ipv6: fc00:b::7f/64 + + ARISTA126T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.128 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1fd + interfaces: + Loopback0: + ipv6: fc00:c:c:80::1/128 + Ethernet1: + ipv6: fc00:a::1fe/126 + bp_interface: + ipv6: fc00:b::80/64 + + ARISTA127T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.129 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::201 + interfaces: + Loopback0: + ipv6: fc00:c:c:81::1/128 + Ethernet1: + ipv6: fc00:a::202/126 + bp_interface: + ipv6: fc00:b::81/64 + + ARISTA128T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.130 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::205 + interfaces: + Loopback0: + ipv6: fc00:c:c:82::1/128 + Ethernet1: + ipv6: fc00:a::206/126 + bp_interface: + ipv6: fc00:b::82/64 + + ARISTA129T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.131 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::209 + interfaces: + Loopback0: + ipv6: fc00:c:c:83::1/128 + Ethernet1: + ipv6: fc00:a::20a/126 + bp_interface: + ipv6: fc00:b::83/64 + + ARISTA130T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.132 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::20d + interfaces: + Loopback0: + ipv6: fc00:c:c:84::1/128 + Ethernet1: + ipv6: fc00:a::20e/126 + bp_interface: + ipv6: fc00:b::84/64 + + ARISTA131T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.133 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::211 + interfaces: + Loopback0: + ipv6: fc00:c:c:85::1/128 + Ethernet1: + ipv6: fc00:a::212/126 + bp_interface: + ipv6: fc00:b::85/64 + + ARISTA132T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.134 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::215 + interfaces: + Loopback0: + ipv6: fc00:c:c:86::1/128 + Ethernet1: + ipv6: fc00:a::216/126 + bp_interface: + ipv6: fc00:b::86/64 + + ARISTA133T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.135 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::219 + interfaces: + Loopback0: + ipv6: fc00:c:c:87::1/128 + Ethernet1: + ipv6: fc00:a::21a/126 + bp_interface: + ipv6: fc00:b::87/64 + + ARISTA134T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.136 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::21d + interfaces: + Loopback0: + ipv6: fc00:c:c:88::1/128 + Ethernet1: + ipv6: fc00:a::21e/126 + bp_interface: + ipv6: fc00:b::88/64 + + ARISTA135T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.137 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::221 + interfaces: + Loopback0: + ipv6: fc00:c:c:89::1/128 + Ethernet1: + ipv6: fc00:a::222/126 + bp_interface: + ipv6: fc00:b::89/64 + + ARISTA136T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.138 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::225 + interfaces: + Loopback0: + ipv6: fc00:c:c:8a::1/128 + Ethernet1: + ipv6: fc00:a::226/126 + bp_interface: + ipv6: fc00:b::8a/64 + + ARISTA137T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.139 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::229 + interfaces: + Loopback0: + ipv6: fc00:c:c:8b::1/128 + Ethernet1: + ipv6: fc00:a::22a/126 + bp_interface: + ipv6: fc00:b::8b/64 + + ARISTA138T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.140 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::22d + interfaces: + Loopback0: + ipv6: fc00:c:c:8c::1/128 + Ethernet1: + ipv6: fc00:a::22e/126 + bp_interface: + ipv6: fc00:b::8c/64 + + ARISTA139T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.141 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::231 + interfaces: + Loopback0: + ipv6: fc00:c:c:8d::1/128 + Ethernet1: + ipv6: fc00:a::232/126 + bp_interface: + ipv6: fc00:b::8d/64 + + ARISTA140T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.142 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::235 + interfaces: + Loopback0: + ipv6: fc00:c:c:8e::1/128 + Ethernet1: + ipv6: fc00:a::236/126 + bp_interface: + ipv6: fc00:b::8e/64 + + ARISTA141T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.143 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::239 + interfaces: + Loopback0: + ipv6: fc00:c:c:8f::1/128 + Ethernet1: + ipv6: fc00:a::23a/126 + bp_interface: + ipv6: fc00:b::8f/64 + + ARISTA142T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.144 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::23d + interfaces: + Loopback0: + ipv6: fc00:c:c:90::1/128 + Ethernet1: + ipv6: fc00:a::23e/126 + bp_interface: + ipv6: fc00:b::90/64 + + ARISTA143T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.145 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::241 + interfaces: + Loopback0: + ipv6: fc00:c:c:91::1/128 + Ethernet1: + ipv6: fc00:a::242/126 + bp_interface: + ipv6: fc00:b::91/64 + + ARISTA144T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.146 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::245 + interfaces: + Loopback0: + ipv6: fc00:c:c:92::1/128 + Ethernet1: + ipv6: fc00:a::246/126 + bp_interface: + ipv6: fc00:b::92/64 + + ARISTA145T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.147 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::249 + interfaces: + Loopback0: + ipv6: fc00:c:c:93::1/128 + Ethernet1: + ipv6: fc00:a::24a/126 + bp_interface: + ipv6: fc00:b::93/64 + + ARISTA146T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.148 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::24d + interfaces: + Loopback0: + ipv6: fc00:c:c:94::1/128 + Ethernet1: + ipv6: fc00:a::24e/126 + bp_interface: + ipv6: fc00:b::94/64 + + ARISTA147T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.149 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::251 + interfaces: + Loopback0: + ipv6: fc00:c:c:95::1/128 + Ethernet1: + ipv6: fc00:a::252/126 + bp_interface: + ipv6: fc00:b::95/64 + + ARISTA148T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.150 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::255 + interfaces: + Loopback0: + ipv6: fc00:c:c:96::1/128 + Ethernet1: + ipv6: fc00:a::256/126 + bp_interface: + ipv6: fc00:b::96/64 + + ARISTA149T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.151 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::259 + interfaces: + Loopback0: + ipv6: fc00:c:c:97::1/128 + Ethernet1: + ipv6: fc00:a::25a/126 + bp_interface: + ipv6: fc00:b::97/64 + + ARISTA150T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.152 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::25d + interfaces: + Loopback0: + ipv6: fc00:c:c:98::1/128 + Ethernet1: + ipv6: fc00:a::25e/126 + bp_interface: + ipv6: fc00:b::98/64 + + ARISTA151T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.153 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::261 + interfaces: + Loopback0: + ipv6: fc00:c:c:99::1/128 + Ethernet1: + ipv6: fc00:a::262/126 + bp_interface: + ipv6: fc00:b::99/64 + + ARISTA152T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.154 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::265 + interfaces: + Loopback0: + ipv6: fc00:c:c:9a::1/128 + Ethernet1: + ipv6: fc00:a::266/126 + bp_interface: + ipv6: fc00:b::9a/64 + + ARISTA153T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.155 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::269 + interfaces: + Loopback0: + ipv6: fc00:c:c:9b::1/128 + Ethernet1: + ipv6: fc00:a::26a/126 + bp_interface: + ipv6: fc00:b::9b/64 + + ARISTA154T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.156 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::26d + interfaces: + Loopback0: + ipv6: fc00:c:c:9c::1/128 + Ethernet1: + ipv6: fc00:a::26e/126 + bp_interface: + ipv6: fc00:b::9c/64 + + ARISTA155T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.157 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::271 + interfaces: + Loopback0: + ipv6: fc00:c:c:9d::1/128 + Ethernet1: + ipv6: fc00:a::272/126 + bp_interface: + ipv6: fc00:b::9d/64 + + ARISTA156T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.158 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::275 + interfaces: + Loopback0: + ipv6: fc00:c:c:9e::1/128 + Ethernet1: + ipv6: fc00:a::276/126 + bp_interface: + ipv6: fc00:b::9e/64 + + ARISTA157T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.159 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::279 + interfaces: + Loopback0: + ipv6: fc00:c:c:9f::1/128 + Ethernet1: + ipv6: fc00:a::27a/126 + bp_interface: + ipv6: fc00:b::9f/64 + + ARISTA158T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.160 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::27d + interfaces: + Loopback0: + ipv6: fc00:c:c:a0::1/128 + Ethernet1: + ipv6: fc00:a::27e/126 + bp_interface: + ipv6: fc00:b::a0/64 + + ARISTA159T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.161 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::281 + interfaces: + Loopback0: + ipv6: fc00:c:c:a1::1/128 + Ethernet1: + ipv6: fc00:a::282/126 + bp_interface: + ipv6: fc00:b::a1/64 + + ARISTA160T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.162 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::285 + interfaces: + Loopback0: + ipv6: fc00:c:c:a2::1/128 + Ethernet1: + ipv6: fc00:a::286/126 + bp_interface: + ipv6: fc00:b::a2/64 + + ARISTA161T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.163 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::289 + interfaces: + Loopback0: + ipv6: fc00:c:c:a3::1/128 + Ethernet1: + ipv6: fc00:a::28a/126 + bp_interface: + ipv6: fc00:b::a3/64 + + ARISTA162T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.164 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::28d + interfaces: + Loopback0: + ipv6: fc00:c:c:a4::1/128 + Ethernet1: + ipv6: fc00:a::28e/126 + bp_interface: + ipv6: fc00:b::a4/64 + + ARISTA163T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.165 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::291 + interfaces: + Loopback0: + ipv6: fc00:c:c:a5::1/128 + Ethernet1: + ipv6: fc00:a::292/126 + bp_interface: + ipv6: fc00:b::a5/64 + + ARISTA164T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.166 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::295 + interfaces: + Loopback0: + ipv6: fc00:c:c:a6::1/128 + Ethernet1: + ipv6: fc00:a::296/126 + bp_interface: + ipv6: fc00:b::a6/64 + + ARISTA165T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.167 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::299 + interfaces: + Loopback0: + ipv6: fc00:c:c:a7::1/128 + Ethernet1: + ipv6: fc00:a::29a/126 + bp_interface: + ipv6: fc00:b::a7/64 + + ARISTA166T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.168 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::29d + interfaces: + Loopback0: + ipv6: fc00:c:c:a8::1/128 + Ethernet1: + ipv6: fc00:a::29e/126 + bp_interface: + ipv6: fc00:b::a8/64 + + ARISTA167T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.169 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:a9::1/128 + Ethernet1: + ipv6: fc00:a::2a2/126 + bp_interface: + ipv6: fc00:b::a9/64 + + ARISTA168T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.170 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:aa::1/128 + Ethernet1: + ipv6: fc00:a::2a6/126 + bp_interface: + ipv6: fc00:b::aa/64 + + ARISTA169T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.171 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ab::1/128 + Ethernet1: + ipv6: fc00:a::2aa/126 + bp_interface: + ipv6: fc00:b::ab/64 + + ARISTA170T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.172 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ac::1/128 + Ethernet1: + ipv6: fc00:a::2ae/126 + bp_interface: + ipv6: fc00:b::ac/64 + + ARISTA171T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.173 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ad::1/128 + Ethernet1: + ipv6: fc00:a::2b2/126 + bp_interface: + ipv6: fc00:b::ad/64 + + ARISTA172T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.174 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ae::1/128 + Ethernet1: + ipv6: fc00:a::2b6/126 + bp_interface: + ipv6: fc00:b::ae/64 + + ARISTA173T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.175 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:af::1/128 + Ethernet1: + ipv6: fc00:a::2ba/126 + bp_interface: + ipv6: fc00:b::af/64 + + ARISTA174T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.176 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2bd + interfaces: + Loopback0: + ipv6: fc00:c:c:b0::1/128 + Ethernet1: + ipv6: fc00:a::2be/126 + bp_interface: + ipv6: fc00:b::b0/64 + + ARISTA175T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.177 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b1::1/128 + Ethernet1: + ipv6: fc00:a::2c2/126 + bp_interface: + ipv6: fc00:b::b1/64 + + ARISTA176T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.178 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b2::1/128 + Ethernet1: + ipv6: fc00:a::2c6/126 + bp_interface: + ipv6: fc00:b::b2/64 + + ARISTA177T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.179 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b3::1/128 + Ethernet1: + ipv6: fc00:a::2ca/126 + bp_interface: + ipv6: fc00:b::b3/64 + + ARISTA178T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.180 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2cd + interfaces: + Loopback0: + ipv6: fc00:c:c:b4::1/128 + Ethernet1: + ipv6: fc00:a::2ce/126 + bp_interface: + ipv6: fc00:b::b4/64 + + ARISTA179T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.181 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b5::1/128 + Ethernet1: + ipv6: fc00:a::2d2/126 + bp_interface: + ipv6: fc00:b::b5/64 + + ARISTA180T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.182 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b6::1/128 + Ethernet1: + ipv6: fc00:a::2d6/126 + bp_interface: + ipv6: fc00:b::b6/64 + + ARISTA181T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.183 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b7::1/128 + Ethernet1: + ipv6: fc00:a::2da/126 + bp_interface: + ipv6: fc00:b::b7/64 + + ARISTA182T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.184 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2dd + interfaces: + Loopback0: + ipv6: fc00:c:c:b8::1/128 + Ethernet1: + ipv6: fc00:a::2de/126 + bp_interface: + ipv6: fc00:b::b8/64 + + ARISTA183T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.185 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b9::1/128 + Ethernet1: + ipv6: fc00:a::2e2/126 + bp_interface: + ipv6: fc00:b::b9/64 + + ARISTA184T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.186 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ba::1/128 + Ethernet1: + ipv6: fc00:a::2e6/126 + bp_interface: + ipv6: fc00:b::ba/64 + + ARISTA185T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.187 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bb::1/128 + Ethernet1: + ipv6: fc00:a::2ea/126 + bp_interface: + ipv6: fc00:b::bb/64 + + ARISTA186T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.188 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2ed + interfaces: + Loopback0: + ipv6: fc00:c:c:bc::1/128 + Ethernet1: + ipv6: fc00:a::2ee/126 + bp_interface: + ipv6: fc00:b::bc/64 + + ARISTA187T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.189 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:bd::1/128 + Ethernet1: + ipv6: fc00:a::2f2/126 + bp_interface: + ipv6: fc00:b::bd/64 + + ARISTA188T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.190 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:be::1/128 + Ethernet1: + ipv6: fc00:a::2f6/126 + bp_interface: + ipv6: fc00:b::be/64 + + ARISTA189T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.191 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bf::1/128 + Ethernet1: + ipv6: fc00:a::2fa/126 + bp_interface: + ipv6: fc00:b::bf/64 + + ARISTA190T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.192 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2fd + interfaces: + Loopback0: + ipv6: fc00:c:c:c0::1/128 + Ethernet1: + ipv6: fc00:a::2fe/126 + bp_interface: + ipv6: fc00:b::c0/64 + + ARISTA191T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.193 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::301 + interfaces: + Loopback0: + ipv6: fc00:c:c:c1::1/128 + Ethernet1: + ipv6: fc00:a::302/126 + bp_interface: + ipv6: fc00:b::c1/64 + + ARISTA192T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.194 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::305 + interfaces: + Loopback0: + ipv6: fc00:c:c:c2::1/128 + Ethernet1: + ipv6: fc00:a::306/126 + bp_interface: + ipv6: fc00:b::c2/64 + + ARISTA193T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.195 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::309 + interfaces: + Loopback0: + ipv6: fc00:c:c:c3::1/128 + Ethernet1: + ipv6: fc00:a::30a/126 + bp_interface: + ipv6: fc00:b::c3/64 + + ARISTA194T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.196 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::30d + interfaces: + Loopback0: + ipv6: fc00:c:c:c4::1/128 + Ethernet1: + ipv6: fc00:a::30e/126 + bp_interface: + ipv6: fc00:b::c4/64 + + ARISTA195T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.197 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::311 + interfaces: + Loopback0: + ipv6: fc00:c:c:c5::1/128 + Ethernet1: + ipv6: fc00:a::312/126 + bp_interface: + ipv6: fc00:b::c5/64 + + ARISTA196T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.198 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::315 + interfaces: + Loopback0: + ipv6: fc00:c:c:c6::1/128 + Ethernet1: + ipv6: fc00:a::316/126 + bp_interface: + ipv6: fc00:b::c6/64 + + ARISTA197T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.199 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::319 + interfaces: + Loopback0: + ipv6: fc00:c:c:c7::1/128 + Ethernet1: + ipv6: fc00:a::31a/126 + bp_interface: + ipv6: fc00:b::c7/64 + + ARISTA198T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.200 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::31d + interfaces: + Loopback0: + ipv6: fc00:c:c:c8::1/128 + Ethernet1: + ipv6: fc00:a::31e/126 + bp_interface: + ipv6: fc00:b::c8/64 + + ARISTA199T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.201 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::321 + interfaces: + Loopback0: + ipv6: fc00:c:c:c9::1/128 + Ethernet1: + ipv6: fc00:a::322/126 + bp_interface: + ipv6: fc00:b::c9/64 + + ARISTA200T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.202 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::325 + interfaces: + Loopback0: + ipv6: fc00:c:c:ca::1/128 + Ethernet1: + ipv6: fc00:a::326/126 + bp_interface: + ipv6: fc00:b::ca/64 + + ARISTA201T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.203 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::329 + interfaces: + Loopback0: + ipv6: fc00:c:c:cb::1/128 + Ethernet1: + ipv6: fc00:a::32a/126 + bp_interface: + ipv6: fc00:b::cb/64 + + ARISTA202T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.204 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::32d + interfaces: + Loopback0: + ipv6: fc00:c:c:cc::1/128 + Ethernet1: + ipv6: fc00:a::32e/126 + bp_interface: + ipv6: fc00:b::cc/64 + + ARISTA203T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.205 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::331 + interfaces: + Loopback0: + ipv6: fc00:c:c:cd::1/128 + Ethernet1: + ipv6: fc00:a::332/126 + bp_interface: + ipv6: fc00:b::cd/64 + + ARISTA204T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.206 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::335 + interfaces: + Loopback0: + ipv6: fc00:c:c:ce::1/128 + Ethernet1: + ipv6: fc00:a::336/126 + bp_interface: + ipv6: fc00:b::ce/64 + + ARISTA205T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.207 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::339 + interfaces: + Loopback0: + ipv6: fc00:c:c:cf::1/128 + Ethernet1: + ipv6: fc00:a::33a/126 + bp_interface: + ipv6: fc00:b::cf/64 + + ARISTA206T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.208 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::33d + interfaces: + Loopback0: + ipv6: fc00:c:c:d0::1/128 + Ethernet1: + ipv6: fc00:a::33e/126 + bp_interface: + ipv6: fc00:b::d0/64 + + ARISTA207T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.209 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::341 + interfaces: + Loopback0: + ipv6: fc00:c:c:d1::1/128 + Ethernet1: + ipv6: fc00:a::342/126 + bp_interface: + ipv6: fc00:b::d1/64 + + ARISTA208T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.210 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::345 + interfaces: + Loopback0: + ipv6: fc00:c:c:d2::1/128 + Ethernet1: + ipv6: fc00:a::346/126 + bp_interface: + ipv6: fc00:b::d2/64 + + ARISTA209T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.211 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::349 + interfaces: + Loopback0: + ipv6: fc00:c:c:d3::1/128 + Ethernet1: + ipv6: fc00:a::34a/126 + bp_interface: + ipv6: fc00:b::d3/64 + + ARISTA210T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.212 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::34d + interfaces: + Loopback0: + ipv6: fc00:c:c:d4::1/128 + Ethernet1: + ipv6: fc00:a::34e/126 + bp_interface: + ipv6: fc00:b::d4/64 + + ARISTA211T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.213 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::351 + interfaces: + Loopback0: + ipv6: fc00:c:c:d5::1/128 + Ethernet1: + ipv6: fc00:a::352/126 + bp_interface: + ipv6: fc00:b::d5/64 + + ARISTA212T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.214 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::355 + interfaces: + Loopback0: + ipv6: fc00:c:c:d6::1/128 + Ethernet1: + ipv6: fc00:a::356/126 + bp_interface: + ipv6: fc00:b::d6/64 + + ARISTA213T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.215 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::359 + interfaces: + Loopback0: + ipv6: fc00:c:c:d7::1/128 + Ethernet1: + ipv6: fc00:a::35a/126 + bp_interface: + ipv6: fc00:b::d7/64 + + ARISTA214T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.216 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::35d + interfaces: + Loopback0: + ipv6: fc00:c:c:d8::1/128 + Ethernet1: + ipv6: fc00:a::35e/126 + bp_interface: + ipv6: fc00:b::d8/64 + + ARISTA215T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.217 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::361 + interfaces: + Loopback0: + ipv6: fc00:c:c:d9::1/128 + Ethernet1: + ipv6: fc00:a::362/126 + bp_interface: + ipv6: fc00:b::d9/64 + + ARISTA216T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.218 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::365 + interfaces: + Loopback0: + ipv6: fc00:c:c:da::1/128 + Ethernet1: + ipv6: fc00:a::366/126 + bp_interface: + ipv6: fc00:b::da/64 + + ARISTA217T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.219 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::369 + interfaces: + Loopback0: + ipv6: fc00:c:c:db::1/128 + Ethernet1: + ipv6: fc00:a::36a/126 + bp_interface: + ipv6: fc00:b::db/64 + + ARISTA218T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.220 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::36d + interfaces: + Loopback0: + ipv6: fc00:c:c:dc::1/128 + Ethernet1: + ipv6: fc00:a::36e/126 + bp_interface: + ipv6: fc00:b::dc/64 + + ARISTA219T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.221 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::371 + interfaces: + Loopback0: + ipv6: fc00:c:c:dd::1/128 + Ethernet1: + ipv6: fc00:a::372/126 + bp_interface: + ipv6: fc00:b::dd/64 + + ARISTA220T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.222 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::375 + interfaces: + Loopback0: + ipv6: fc00:c:c:de::1/128 + Ethernet1: + ipv6: fc00:a::376/126 + bp_interface: + ipv6: fc00:b::de/64 + + ARISTA221T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.223 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::379 + interfaces: + Loopback0: + ipv6: fc00:c:c:df::1/128 + Ethernet1: + ipv6: fc00:a::37a/126 + bp_interface: + ipv6: fc00:b::df/64 + + ARISTA222T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.224 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::37d + interfaces: + Loopback0: + ipv6: fc00:c:c:e0::1/128 + Ethernet1: + ipv6: fc00:a::37e/126 + bp_interface: + ipv6: fc00:b::e0/64 + + ARISTA223T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.225 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::381 + interfaces: + Loopback0: + ipv6: fc00:c:c:e1::1/128 + Ethernet1: + ipv6: fc00:a::382/126 + bp_interface: + ipv6: fc00:b::e1/64 + + ARISTA224T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.226 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::385 + interfaces: + Loopback0: + ipv6: fc00:c:c:e2::1/128 + Ethernet1: + ipv6: fc00:a::386/126 + bp_interface: + ipv6: fc00:b::e2/64 + + ARISTA225T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.227 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::389 + interfaces: + Loopback0: + ipv6: fc00:c:c:e3::1/128 + Ethernet1: + ipv6: fc00:a::38a/126 + bp_interface: + ipv6: fc00:b::e3/64 + + ARISTA226T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.228 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::38d + interfaces: + Loopback0: + ipv6: fc00:c:c:e4::1/128 + Ethernet1: + ipv6: fc00:a::38e/126 + bp_interface: + ipv6: fc00:b::e4/64 + + ARISTA227T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.229 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::391 + interfaces: + Loopback0: + ipv6: fc00:c:c:e5::1/128 + Ethernet1: + ipv6: fc00:a::392/126 + bp_interface: + ipv6: fc00:b::e5/64 + + ARISTA228T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.230 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::395 + interfaces: + Loopback0: + ipv6: fc00:c:c:e6::1/128 + Ethernet1: + ipv6: fc00:a::396/126 + bp_interface: + ipv6: fc00:b::e6/64 + + ARISTA229T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.231 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::399 + interfaces: + Loopback0: + ipv6: fc00:c:c:e7::1/128 + Ethernet1: + ipv6: fc00:a::39a/126 + bp_interface: + ipv6: fc00:b::e7/64 + + ARISTA230T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.232 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::39d + interfaces: + Loopback0: + ipv6: fc00:c:c:e8::1/128 + Ethernet1: + ipv6: fc00:a::39e/126 + bp_interface: + ipv6: fc00:b::e8/64 + + ARISTA231T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.233 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:e9::1/128 + Ethernet1: + ipv6: fc00:a::3a2/126 + bp_interface: + ipv6: fc00:b::e9/64 + + ARISTA232T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.234 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ea::1/128 + Ethernet1: + ipv6: fc00:a::3a6/126 + bp_interface: + ipv6: fc00:b::ea/64 + + ARISTA233T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.235 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:eb::1/128 + Ethernet1: + ipv6: fc00:a::3aa/126 + bp_interface: + ipv6: fc00:b::eb/64 + + ARISTA234T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.236 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ec::1/128 + Ethernet1: + ipv6: fc00:a::3ae/126 + bp_interface: + ipv6: fc00:b::ec/64 + + ARISTA235T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.237 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ed::1/128 + Ethernet1: + ipv6: fc00:a::3b2/126 + bp_interface: + ipv6: fc00:b::ed/64 + + ARISTA236T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.238 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ee::1/128 + Ethernet1: + ipv6: fc00:a::3b6/126 + bp_interface: + ipv6: fc00:b::ee/64 + + ARISTA237T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.239 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ef::1/128 + Ethernet1: + ipv6: fc00:a::3ba/126 + bp_interface: + ipv6: fc00:b::ef/64 + + ARISTA238T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.240 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3bd + interfaces: + Loopback0: + ipv6: fc00:c:c:f0::1/128 + Ethernet1: + ipv6: fc00:a::3be/126 + bp_interface: + ipv6: fc00:b::f0/64 + + ARISTA239T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.241 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f1::1/128 + Ethernet1: + ipv6: fc00:a::3c2/126 + bp_interface: + ipv6: fc00:b::f1/64 + + ARISTA240T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.242 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f2::1/128 + Ethernet1: + ipv6: fc00:a::3c6/126 + bp_interface: + ipv6: fc00:b::f2/64 + + ARISTA241T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.243 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f3::1/128 + Ethernet1: + ipv6: fc00:a::3ca/126 + bp_interface: + ipv6: fc00:b::f3/64 + + ARISTA242T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.244 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3cd + interfaces: + Loopback0: + ipv6: fc00:c:c:f4::1/128 + Ethernet1: + ipv6: fc00:a::3ce/126 + bp_interface: + ipv6: fc00:b::f4/64 + + ARISTA243T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.245 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f5::1/128 + Ethernet1: + ipv6: fc00:a::3d2/126 + bp_interface: + ipv6: fc00:b::f5/64 + + ARISTA244T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.246 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f6::1/128 + Ethernet1: + ipv6: fc00:a::3d6/126 + bp_interface: + ipv6: fc00:b::f6/64 + + ARISTA245T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.247 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f7::1/128 + Ethernet1: + ipv6: fc00:a::3da/126 + bp_interface: + ipv6: fc00:b::f7/64 + + ARISTA246T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.248 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3dd + interfaces: + Loopback0: + ipv6: fc00:c:c:f8::1/128 + Ethernet1: + ipv6: fc00:a::3de/126 + bp_interface: + ipv6: fc00:b::f8/64 + + ARISTA247T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.249 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f9::1/128 + Ethernet1: + ipv6: fc00:a::3e2/126 + bp_interface: + ipv6: fc00:b::f9/64 + + ARISTA248T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.250 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fa::1/128 + Ethernet1: + ipv6: fc00:a::3e6/126 + bp_interface: + ipv6: fc00:b::fa/64 + + ARISTA249T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.251 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:fb::1/128 + Ethernet1: + ipv6: fc00:a::3ea/126 + bp_interface: + ipv6: fc00:b::fb/64 + + ARISTA250T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.252 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3ed + interfaces: + Loopback0: + ipv6: fc00:c:c:fc::1/128 + Ethernet1: + ipv6: fc00:a::3ee/126 + bp_interface: + ipv6: fc00:b::fc/64 + + ARISTA251T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.253 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:fd::1/128 + Ethernet1: + ipv6: fc00:a::3f2/126 + bp_interface: + ipv6: fc00:b::fd/64 + + ARISTA252T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.254 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fe::1/128 + Ethernet1: + ipv6: fc00:a::3f6/126 + bp_interface: + ipv6: fc00:b::fe/64 + + ARISTA253T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.255 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ff::1/128 + Ethernet1: + ipv6: fc00:a::3fa/126 + bp_interface: + ipv6: fc00:b::ff/64 + + ARISTA254T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.0 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3fd + interfaces: + Loopback0: + ipv6: fc00:c:c:100::1/128 + Ethernet1: + ipv6: fc00:a::3fe/126 + bp_interface: + ipv6: fc00:b::100/64 diff --git a/ansible/vars/topo_t1-isolated-u2d510.yaml b/ansible/vars/topo_t1-isolated-u2d510.yaml new file mode 100644 index 00000000000..33ab6dea7d3 --- /dev/null +++ b/ansible/vars/topo_t1-isolated-u2d510.yaml @@ -0,0 +1,11282 @@ +topology: + VMs: + ARISTA01T2: + vlans: + - 0 + vm_offset: 0 + ARISTA02T2: + vlans: + - 1 + vm_offset: 1 + ARISTA01T0: + vlans: + - 2 + vm_offset: 2 + ARISTA02T0: + vlans: + - 3 + vm_offset: 3 + ARISTA03T0: + vlans: + - 4 + vm_offset: 4 + ARISTA04T0: + vlans: + - 5 + vm_offset: 5 + ARISTA05T0: + vlans: + - 6 + vm_offset: 6 + ARISTA06T0: + vlans: + - 7 + vm_offset: 7 + ARISTA07T0: + vlans: + - 8 + vm_offset: 8 + ARISTA08T0: + vlans: + - 9 + vm_offset: 9 + ARISTA09T0: + vlans: + - 10 + vm_offset: 10 + ARISTA10T0: + vlans: + - 11 + vm_offset: 11 + ARISTA11T0: + vlans: + - 12 + vm_offset: 12 + ARISTA12T0: + vlans: + - 13 + vm_offset: 13 + ARISTA13T0: + vlans: + - 14 + vm_offset: 14 + ARISTA14T0: + vlans: + - 15 + vm_offset: 15 + ARISTA15T0: + vlans: + - 16 + vm_offset: 16 + ARISTA16T0: + vlans: + - 17 + vm_offset: 17 + ARISTA17T0: + vlans: + - 18 + vm_offset: 18 + ARISTA18T0: + vlans: + - 19 + vm_offset: 19 + ARISTA19T0: + vlans: + - 20 + vm_offset: 20 + ARISTA20T0: + vlans: + - 21 + vm_offset: 21 + ARISTA21T0: + vlans: + - 22 + vm_offset: 22 + ARISTA22T0: + vlans: + - 23 + vm_offset: 23 + ARISTA23T0: + vlans: + - 24 + vm_offset: 24 + ARISTA24T0: + vlans: + - 25 + vm_offset: 25 + ARISTA25T0: + vlans: + - 26 + vm_offset: 26 + ARISTA26T0: + vlans: + - 27 + vm_offset: 27 + ARISTA27T0: + vlans: + - 28 + vm_offset: 28 + ARISTA28T0: + vlans: + - 29 + vm_offset: 29 + ARISTA29T0: + vlans: + - 30 + vm_offset: 30 + ARISTA30T0: + vlans: + - 31 + vm_offset: 31 + ARISTA31T0: + vlans: + - 32 + vm_offset: 32 + ARISTA32T0: + vlans: + - 33 + vm_offset: 33 + ARISTA33T0: + vlans: + - 34 + vm_offset: 34 + ARISTA34T0: + vlans: + - 35 + vm_offset: 35 + ARISTA35T0: + vlans: + - 36 + vm_offset: 36 + ARISTA36T0: + vlans: + - 37 + vm_offset: 37 + ARISTA37T0: + vlans: + - 38 + vm_offset: 38 + ARISTA38T0: + vlans: + - 39 + vm_offset: 39 + ARISTA39T0: + vlans: + - 40 + vm_offset: 40 + ARISTA40T0: + vlans: + - 41 + vm_offset: 41 + ARISTA41T0: + vlans: + - 42 + vm_offset: 42 + ARISTA42T0: + vlans: + - 43 + vm_offset: 43 + ARISTA43T0: + vlans: + - 44 + vm_offset: 44 + ARISTA44T0: + vlans: + - 45 + vm_offset: 45 + ARISTA45T0: + vlans: + - 46 + vm_offset: 46 + ARISTA46T0: + vlans: + - 47 + vm_offset: 47 + ARISTA47T0: + vlans: + - 48 + vm_offset: 48 + ARISTA48T0: + vlans: + - 49 + vm_offset: 49 + ARISTA49T0: + vlans: + - 50 + vm_offset: 50 + ARISTA50T0: + vlans: + - 51 + vm_offset: 51 + ARISTA51T0: + vlans: + - 52 + vm_offset: 52 + ARISTA52T0: + vlans: + - 53 + vm_offset: 53 + ARISTA53T0: + vlans: + - 54 + vm_offset: 54 + ARISTA54T0: + vlans: + - 55 + vm_offset: 55 + ARISTA55T0: + vlans: + - 56 + vm_offset: 56 + ARISTA56T0: + vlans: + - 57 + vm_offset: 57 + ARISTA57T0: + vlans: + - 58 + vm_offset: 58 + ARISTA58T0: + vlans: + - 59 + vm_offset: 59 + ARISTA59T0: + vlans: + - 60 + vm_offset: 60 + ARISTA60T0: + vlans: + - 61 + vm_offset: 61 + ARISTA61T0: + vlans: + - 62 + vm_offset: 62 + ARISTA62T0: + vlans: + - 63 + vm_offset: 63 + ARISTA63T0: + vlans: + - 64 + vm_offset: 64 + ARISTA64T0: + vlans: + - 65 + vm_offset: 65 + ARISTA65T0: + vlans: + - 66 + vm_offset: 66 + ARISTA66T0: + vlans: + - 67 + vm_offset: 67 + ARISTA67T0: + vlans: + - 68 + vm_offset: 68 + ARISTA68T0: + vlans: + - 69 + vm_offset: 69 + ARISTA69T0: + vlans: + - 70 + vm_offset: 70 + ARISTA70T0: + vlans: + - 71 + vm_offset: 71 + ARISTA71T0: + vlans: + - 72 + vm_offset: 72 + ARISTA72T0: + vlans: + - 73 + vm_offset: 73 + ARISTA73T0: + vlans: + - 74 + vm_offset: 74 + ARISTA74T0: + vlans: + - 75 + vm_offset: 75 + ARISTA75T0: + vlans: + - 76 + vm_offset: 76 + ARISTA76T0: + vlans: + - 77 + vm_offset: 77 + ARISTA77T0: + vlans: + - 78 + vm_offset: 78 + ARISTA78T0: + vlans: + - 79 + vm_offset: 79 + ARISTA79T0: + vlans: + - 80 + vm_offset: 80 + ARISTA80T0: + vlans: + - 81 + vm_offset: 81 + ARISTA81T0: + vlans: + - 82 + vm_offset: 82 + ARISTA82T0: + vlans: + - 83 + vm_offset: 83 + ARISTA83T0: + vlans: + - 84 + vm_offset: 84 + ARISTA84T0: + vlans: + - 85 + vm_offset: 85 + ARISTA85T0: + vlans: + - 86 + vm_offset: 86 + ARISTA86T0: + vlans: + - 87 + vm_offset: 87 + ARISTA87T0: + vlans: + - 88 + vm_offset: 88 + ARISTA88T0: + vlans: + - 89 + vm_offset: 89 + ARISTA89T0: + vlans: + - 90 + vm_offset: 90 + ARISTA90T0: + vlans: + - 91 + vm_offset: 91 + ARISTA91T0: + vlans: + - 92 + vm_offset: 92 + ARISTA92T0: + vlans: + - 93 + vm_offset: 93 + ARISTA93T0: + vlans: + - 94 + vm_offset: 94 + ARISTA94T0: + vlans: + - 95 + vm_offset: 95 + ARISTA95T0: + vlans: + - 96 + vm_offset: 96 + ARISTA96T0: + vlans: + - 97 + vm_offset: 97 + ARISTA97T0: + vlans: + - 98 + vm_offset: 98 + ARISTA98T0: + vlans: + - 99 + vm_offset: 99 + ARISTA99T0: + vlans: + - 100 + vm_offset: 100 + ARISTA100T0: + vlans: + - 101 + vm_offset: 101 + ARISTA101T0: + vlans: + - 102 + vm_offset: 102 + ARISTA102T0: + vlans: + - 103 + vm_offset: 103 + ARISTA103T0: + vlans: + - 104 + vm_offset: 104 + ARISTA104T0: + vlans: + - 105 + vm_offset: 105 + ARISTA105T0: + vlans: + - 106 + vm_offset: 106 + ARISTA106T0: + vlans: + - 107 + vm_offset: 107 + ARISTA107T0: + vlans: + - 108 + vm_offset: 108 + ARISTA108T0: + vlans: + - 109 + vm_offset: 109 + ARISTA109T0: + vlans: + - 110 + vm_offset: 110 + ARISTA110T0: + vlans: + - 111 + vm_offset: 111 + ARISTA111T0: + vlans: + - 112 + vm_offset: 112 + ARISTA112T0: + vlans: + - 113 + vm_offset: 113 + ARISTA113T0: + vlans: + - 114 + vm_offset: 114 + ARISTA114T0: + vlans: + - 115 + vm_offset: 115 + ARISTA115T0: + vlans: + - 116 + vm_offset: 116 + ARISTA116T0: + vlans: + - 117 + vm_offset: 117 + ARISTA117T0: + vlans: + - 118 + vm_offset: 118 + ARISTA118T0: + vlans: + - 119 + vm_offset: 119 + ARISTA119T0: + vlans: + - 120 + vm_offset: 120 + ARISTA120T0: + vlans: + - 121 + vm_offset: 121 + ARISTA121T0: + vlans: + - 122 + vm_offset: 122 + ARISTA122T0: + vlans: + - 123 + vm_offset: 123 + ARISTA123T0: + vlans: + - 124 + vm_offset: 124 + ARISTA124T0: + vlans: + - 125 + vm_offset: 125 + ARISTA125T0: + vlans: + - 126 + vm_offset: 126 + ARISTA126T0: + vlans: + - 127 + vm_offset: 127 + ARISTA127T0: + vlans: + - 128 + vm_offset: 128 + ARISTA128T0: + vlans: + - 129 + vm_offset: 129 + ARISTA129T0: + vlans: + - 130 + vm_offset: 130 + ARISTA130T0: + vlans: + - 131 + vm_offset: 131 + ARISTA131T0: + vlans: + - 132 + vm_offset: 132 + ARISTA132T0: + vlans: + - 133 + vm_offset: 133 + ARISTA133T0: + vlans: + - 134 + vm_offset: 134 + ARISTA134T0: + vlans: + - 135 + vm_offset: 135 + ARISTA135T0: + vlans: + - 136 + vm_offset: 136 + ARISTA136T0: + vlans: + - 137 + vm_offset: 137 + ARISTA137T0: + vlans: + - 138 + vm_offset: 138 + ARISTA138T0: + vlans: + - 139 + vm_offset: 139 + ARISTA139T0: + vlans: + - 140 + vm_offset: 140 + ARISTA140T0: + vlans: + - 141 + vm_offset: 141 + ARISTA141T0: + vlans: + - 142 + vm_offset: 142 + ARISTA142T0: + vlans: + - 143 + vm_offset: 143 + ARISTA143T0: + vlans: + - 144 + vm_offset: 144 + ARISTA144T0: + vlans: + - 145 + vm_offset: 145 + ARISTA145T0: + vlans: + - 146 + vm_offset: 146 + ARISTA146T0: + vlans: + - 147 + vm_offset: 147 + ARISTA147T0: + vlans: + - 148 + vm_offset: 148 + ARISTA148T0: + vlans: + - 149 + vm_offset: 149 + ARISTA149T0: + vlans: + - 150 + vm_offset: 150 + ARISTA150T0: + vlans: + - 151 + vm_offset: 151 + ARISTA151T0: + vlans: + - 152 + vm_offset: 152 + ARISTA152T0: + vlans: + - 153 + vm_offset: 153 + ARISTA153T0: + vlans: + - 154 + vm_offset: 154 + ARISTA154T0: + vlans: + - 155 + vm_offset: 155 + ARISTA155T0: + vlans: + - 156 + vm_offset: 156 + ARISTA156T0: + vlans: + - 157 + vm_offset: 157 + ARISTA157T0: + vlans: + - 158 + vm_offset: 158 + ARISTA158T0: + vlans: + - 159 + vm_offset: 159 + ARISTA159T0: + vlans: + - 160 + vm_offset: 160 + ARISTA160T0: + vlans: + - 161 + vm_offset: 161 + ARISTA161T0: + vlans: + - 162 + vm_offset: 162 + ARISTA162T0: + vlans: + - 163 + vm_offset: 163 + ARISTA163T0: + vlans: + - 164 + vm_offset: 164 + ARISTA164T0: + vlans: + - 165 + vm_offset: 165 + ARISTA165T0: + vlans: + - 166 + vm_offset: 166 + ARISTA166T0: + vlans: + - 167 + vm_offset: 167 + ARISTA167T0: + vlans: + - 168 + vm_offset: 168 + ARISTA168T0: + vlans: + - 169 + vm_offset: 169 + ARISTA169T0: + vlans: + - 170 + vm_offset: 170 + ARISTA170T0: + vlans: + - 171 + vm_offset: 171 + ARISTA171T0: + vlans: + - 172 + vm_offset: 172 + ARISTA172T0: + vlans: + - 173 + vm_offset: 173 + ARISTA173T0: + vlans: + - 174 + vm_offset: 174 + ARISTA174T0: + vlans: + - 175 + vm_offset: 175 + ARISTA175T0: + vlans: + - 176 + vm_offset: 176 + ARISTA176T0: + vlans: + - 177 + vm_offset: 177 + ARISTA177T0: + vlans: + - 178 + vm_offset: 178 + ARISTA178T0: + vlans: + - 179 + vm_offset: 179 + ARISTA179T0: + vlans: + - 180 + vm_offset: 180 + ARISTA180T0: + vlans: + - 181 + vm_offset: 181 + ARISTA181T0: + vlans: + - 182 + vm_offset: 182 + ARISTA182T0: + vlans: + - 183 + vm_offset: 183 + ARISTA183T0: + vlans: + - 184 + vm_offset: 184 + ARISTA184T0: + vlans: + - 185 + vm_offset: 185 + ARISTA185T0: + vlans: + - 186 + vm_offset: 186 + ARISTA186T0: + vlans: + - 187 + vm_offset: 187 + ARISTA187T0: + vlans: + - 188 + vm_offset: 188 + ARISTA188T0: + vlans: + - 189 + vm_offset: 189 + ARISTA189T0: + vlans: + - 190 + vm_offset: 190 + ARISTA190T0: + vlans: + - 191 + vm_offset: 191 + ARISTA191T0: + vlans: + - 192 + vm_offset: 192 + ARISTA192T0: + vlans: + - 193 + vm_offset: 193 + ARISTA193T0: + vlans: + - 194 + vm_offset: 194 + ARISTA194T0: + vlans: + - 195 + vm_offset: 195 + ARISTA195T0: + vlans: + - 196 + vm_offset: 196 + ARISTA196T0: + vlans: + - 197 + vm_offset: 197 + ARISTA197T0: + vlans: + - 198 + vm_offset: 198 + ARISTA198T0: + vlans: + - 199 + vm_offset: 199 + ARISTA199T0: + vlans: + - 200 + vm_offset: 200 + ARISTA200T0: + vlans: + - 201 + vm_offset: 201 + ARISTA201T0: + vlans: + - 202 + vm_offset: 202 + ARISTA202T0: + vlans: + - 203 + vm_offset: 203 + ARISTA203T0: + vlans: + - 204 + vm_offset: 204 + ARISTA204T0: + vlans: + - 205 + vm_offset: 205 + ARISTA205T0: + vlans: + - 206 + vm_offset: 206 + ARISTA206T0: + vlans: + - 207 + vm_offset: 207 + ARISTA207T0: + vlans: + - 208 + vm_offset: 208 + ARISTA208T0: + vlans: + - 209 + vm_offset: 209 + ARISTA209T0: + vlans: + - 210 + vm_offset: 210 + ARISTA210T0: + vlans: + - 211 + vm_offset: 211 + ARISTA211T0: + vlans: + - 212 + vm_offset: 212 + ARISTA212T0: + vlans: + - 213 + vm_offset: 213 + ARISTA213T0: + vlans: + - 214 + vm_offset: 214 + ARISTA214T0: + vlans: + - 215 + vm_offset: 215 + ARISTA215T0: + vlans: + - 216 + vm_offset: 216 + ARISTA216T0: + vlans: + - 217 + vm_offset: 217 + ARISTA217T0: + vlans: + - 218 + vm_offset: 218 + ARISTA218T0: + vlans: + - 219 + vm_offset: 219 + ARISTA219T0: + vlans: + - 220 + vm_offset: 220 + ARISTA220T0: + vlans: + - 221 + vm_offset: 221 + ARISTA221T0: + vlans: + - 222 + vm_offset: 222 + ARISTA222T0: + vlans: + - 223 + vm_offset: 223 + ARISTA223T0: + vlans: + - 224 + vm_offset: 224 + ARISTA224T0: + vlans: + - 225 + vm_offset: 225 + ARISTA225T0: + vlans: + - 226 + vm_offset: 226 + ARISTA226T0: + vlans: + - 227 + vm_offset: 227 + ARISTA227T0: + vlans: + - 228 + vm_offset: 228 + ARISTA228T0: + vlans: + - 229 + vm_offset: 229 + ARISTA229T0: + vlans: + - 230 + vm_offset: 230 + ARISTA230T0: + vlans: + - 231 + vm_offset: 231 + ARISTA231T0: + vlans: + - 232 + vm_offset: 232 + ARISTA232T0: + vlans: + - 233 + vm_offset: 233 + ARISTA233T0: + vlans: + - 234 + vm_offset: 234 + ARISTA234T0: + vlans: + - 235 + vm_offset: 235 + ARISTA235T0: + vlans: + - 236 + vm_offset: 236 + ARISTA236T0: + vlans: + - 237 + vm_offset: 237 + ARISTA237T0: + vlans: + - 238 + vm_offset: 238 + ARISTA238T0: + vlans: + - 239 + vm_offset: 239 + ARISTA239T0: + vlans: + - 240 + vm_offset: 240 + ARISTA240T0: + vlans: + - 241 + vm_offset: 241 + ARISTA241T0: + vlans: + - 242 + vm_offset: 242 + ARISTA242T0: + vlans: + - 243 + vm_offset: 243 + ARISTA243T0: + vlans: + - 244 + vm_offset: 244 + ARISTA244T0: + vlans: + - 245 + vm_offset: 245 + ARISTA245T0: + vlans: + - 246 + vm_offset: 246 + ARISTA246T0: + vlans: + - 247 + vm_offset: 247 + ARISTA247T0: + vlans: + - 248 + vm_offset: 248 + ARISTA248T0: + vlans: + - 249 + vm_offset: 249 + ARISTA249T0: + vlans: + - 250 + vm_offset: 250 + ARISTA250T0: + vlans: + - 251 + vm_offset: 251 + ARISTA251T0: + vlans: + - 252 + vm_offset: 252 + ARISTA252T0: + vlans: + - 253 + vm_offset: 253 + ARISTA253T0: + vlans: + - 254 + vm_offset: 254 + ARISTA254T0: + vlans: + - 255 + vm_offset: 255 + ARISTA255T0: + vlans: + - 256 + vm_offset: 256 + ARISTA256T0: + vlans: + - 257 + vm_offset: 257 + ARISTA257T0: + vlans: + - 258 + vm_offset: 258 + ARISTA258T0: + vlans: + - 259 + vm_offset: 259 + ARISTA259T0: + vlans: + - 260 + vm_offset: 260 + ARISTA260T0: + vlans: + - 261 + vm_offset: 261 + ARISTA261T0: + vlans: + - 262 + vm_offset: 262 + ARISTA262T0: + vlans: + - 263 + vm_offset: 263 + ARISTA263T0: + vlans: + - 264 + vm_offset: 264 + ARISTA264T0: + vlans: + - 265 + vm_offset: 265 + ARISTA265T0: + vlans: + - 266 + vm_offset: 266 + ARISTA266T0: + vlans: + - 267 + vm_offset: 267 + ARISTA267T0: + vlans: + - 268 + vm_offset: 268 + ARISTA268T0: + vlans: + - 269 + vm_offset: 269 + ARISTA269T0: + vlans: + - 270 + vm_offset: 270 + ARISTA270T0: + vlans: + - 271 + vm_offset: 271 + ARISTA271T0: + vlans: + - 272 + vm_offset: 272 + ARISTA272T0: + vlans: + - 273 + vm_offset: 273 + ARISTA273T0: + vlans: + - 274 + vm_offset: 274 + ARISTA274T0: + vlans: + - 275 + vm_offset: 275 + ARISTA275T0: + vlans: + - 276 + vm_offset: 276 + ARISTA276T0: + vlans: + - 277 + vm_offset: 277 + ARISTA277T0: + vlans: + - 278 + vm_offset: 278 + ARISTA278T0: + vlans: + - 279 + vm_offset: 279 + ARISTA279T0: + vlans: + - 280 + vm_offset: 280 + ARISTA280T0: + vlans: + - 281 + vm_offset: 281 + ARISTA281T0: + vlans: + - 282 + vm_offset: 282 + ARISTA282T0: + vlans: + - 283 + vm_offset: 283 + ARISTA283T0: + vlans: + - 284 + vm_offset: 284 + ARISTA284T0: + vlans: + - 285 + vm_offset: 285 + ARISTA285T0: + vlans: + - 286 + vm_offset: 286 + ARISTA286T0: + vlans: + - 287 + vm_offset: 287 + ARISTA287T0: + vlans: + - 288 + vm_offset: 288 + ARISTA288T0: + vlans: + - 289 + vm_offset: 289 + ARISTA289T0: + vlans: + - 290 + vm_offset: 290 + ARISTA290T0: + vlans: + - 291 + vm_offset: 291 + ARISTA291T0: + vlans: + - 292 + vm_offset: 292 + ARISTA292T0: + vlans: + - 293 + vm_offset: 293 + ARISTA293T0: + vlans: + - 294 + vm_offset: 294 + ARISTA294T0: + vlans: + - 295 + vm_offset: 295 + ARISTA295T0: + vlans: + - 296 + vm_offset: 296 + ARISTA296T0: + vlans: + - 297 + vm_offset: 297 + ARISTA297T0: + vlans: + - 298 + vm_offset: 298 + ARISTA298T0: + vlans: + - 299 + vm_offset: 299 + ARISTA299T0: + vlans: + - 300 + vm_offset: 300 + ARISTA300T0: + vlans: + - 301 + vm_offset: 301 + ARISTA301T0: + vlans: + - 302 + vm_offset: 302 + ARISTA302T0: + vlans: + - 303 + vm_offset: 303 + ARISTA303T0: + vlans: + - 304 + vm_offset: 304 + ARISTA304T0: + vlans: + - 305 + vm_offset: 305 + ARISTA305T0: + vlans: + - 306 + vm_offset: 306 + ARISTA306T0: + vlans: + - 307 + vm_offset: 307 + ARISTA307T0: + vlans: + - 308 + vm_offset: 308 + ARISTA308T0: + vlans: + - 309 + vm_offset: 309 + ARISTA309T0: + vlans: + - 310 + vm_offset: 310 + ARISTA310T0: + vlans: + - 311 + vm_offset: 311 + ARISTA311T0: + vlans: + - 312 + vm_offset: 312 + ARISTA312T0: + vlans: + - 313 + vm_offset: 313 + ARISTA313T0: + vlans: + - 314 + vm_offset: 314 + ARISTA314T0: + vlans: + - 315 + vm_offset: 315 + ARISTA315T0: + vlans: + - 316 + vm_offset: 316 + ARISTA316T0: + vlans: + - 317 + vm_offset: 317 + ARISTA317T0: + vlans: + - 318 + vm_offset: 318 + ARISTA318T0: + vlans: + - 319 + vm_offset: 319 + ARISTA319T0: + vlans: + - 320 + vm_offset: 320 + ARISTA320T0: + vlans: + - 321 + vm_offset: 321 + ARISTA321T0: + vlans: + - 322 + vm_offset: 322 + ARISTA322T0: + vlans: + - 323 + vm_offset: 323 + ARISTA323T0: + vlans: + - 324 + vm_offset: 324 + ARISTA324T0: + vlans: + - 325 + vm_offset: 325 + ARISTA325T0: + vlans: + - 326 + vm_offset: 326 + ARISTA326T0: + vlans: + - 327 + vm_offset: 327 + ARISTA327T0: + vlans: + - 328 + vm_offset: 328 + ARISTA328T0: + vlans: + - 329 + vm_offset: 329 + ARISTA329T0: + vlans: + - 330 + vm_offset: 330 + ARISTA330T0: + vlans: + - 331 + vm_offset: 331 + ARISTA331T0: + vlans: + - 332 + vm_offset: 332 + ARISTA332T0: + vlans: + - 333 + vm_offset: 333 + ARISTA333T0: + vlans: + - 334 + vm_offset: 334 + ARISTA334T0: + vlans: + - 335 + vm_offset: 335 + ARISTA335T0: + vlans: + - 336 + vm_offset: 336 + ARISTA336T0: + vlans: + - 337 + vm_offset: 337 + ARISTA337T0: + vlans: + - 338 + vm_offset: 338 + ARISTA338T0: + vlans: + - 339 + vm_offset: 339 + ARISTA339T0: + vlans: + - 340 + vm_offset: 340 + ARISTA340T0: + vlans: + - 341 + vm_offset: 341 + ARISTA341T0: + vlans: + - 342 + vm_offset: 342 + ARISTA342T0: + vlans: + - 343 + vm_offset: 343 + ARISTA343T0: + vlans: + - 344 + vm_offset: 344 + ARISTA344T0: + vlans: + - 345 + vm_offset: 345 + ARISTA345T0: + vlans: + - 346 + vm_offset: 346 + ARISTA346T0: + vlans: + - 347 + vm_offset: 347 + ARISTA347T0: + vlans: + - 348 + vm_offset: 348 + ARISTA348T0: + vlans: + - 349 + vm_offset: 349 + ARISTA349T0: + vlans: + - 350 + vm_offset: 350 + ARISTA350T0: + vlans: + - 351 + vm_offset: 351 + ARISTA351T0: + vlans: + - 352 + vm_offset: 352 + ARISTA352T0: + vlans: + - 353 + vm_offset: 353 + ARISTA353T0: + vlans: + - 354 + vm_offset: 354 + ARISTA354T0: + vlans: + - 355 + vm_offset: 355 + ARISTA355T0: + vlans: + - 356 + vm_offset: 356 + ARISTA356T0: + vlans: + - 357 + vm_offset: 357 + ARISTA357T0: + vlans: + - 358 + vm_offset: 358 + ARISTA358T0: + vlans: + - 359 + vm_offset: 359 + ARISTA359T0: + vlans: + - 360 + vm_offset: 360 + ARISTA360T0: + vlans: + - 361 + vm_offset: 361 + ARISTA361T0: + vlans: + - 362 + vm_offset: 362 + ARISTA362T0: + vlans: + - 363 + vm_offset: 363 + ARISTA363T0: + vlans: + - 364 + vm_offset: 364 + ARISTA364T0: + vlans: + - 365 + vm_offset: 365 + ARISTA365T0: + vlans: + - 366 + vm_offset: 366 + ARISTA366T0: + vlans: + - 367 + vm_offset: 367 + ARISTA367T0: + vlans: + - 368 + vm_offset: 368 + ARISTA368T0: + vlans: + - 369 + vm_offset: 369 + ARISTA369T0: + vlans: + - 370 + vm_offset: 370 + ARISTA370T0: + vlans: + - 371 + vm_offset: 371 + ARISTA371T0: + vlans: + - 372 + vm_offset: 372 + ARISTA372T0: + vlans: + - 373 + vm_offset: 373 + ARISTA373T0: + vlans: + - 374 + vm_offset: 374 + ARISTA374T0: + vlans: + - 375 + vm_offset: 375 + ARISTA375T0: + vlans: + - 376 + vm_offset: 376 + ARISTA376T0: + vlans: + - 377 + vm_offset: 377 + ARISTA377T0: + vlans: + - 378 + vm_offset: 378 + ARISTA378T0: + vlans: + - 379 + vm_offset: 379 + ARISTA379T0: + vlans: + - 380 + vm_offset: 380 + ARISTA380T0: + vlans: + - 381 + vm_offset: 381 + ARISTA381T0: + vlans: + - 382 + vm_offset: 382 + ARISTA382T0: + vlans: + - 383 + vm_offset: 383 + ARISTA383T0: + vlans: + - 384 + vm_offset: 384 + ARISTA384T0: + vlans: + - 385 + vm_offset: 385 + ARISTA385T0: + vlans: + - 386 + vm_offset: 386 + ARISTA386T0: + vlans: + - 387 + vm_offset: 387 + ARISTA387T0: + vlans: + - 388 + vm_offset: 388 + ARISTA388T0: + vlans: + - 389 + vm_offset: 389 + ARISTA389T0: + vlans: + - 390 + vm_offset: 390 + ARISTA390T0: + vlans: + - 391 + vm_offset: 391 + ARISTA391T0: + vlans: + - 392 + vm_offset: 392 + ARISTA392T0: + vlans: + - 393 + vm_offset: 393 + ARISTA393T0: + vlans: + - 394 + vm_offset: 394 + ARISTA394T0: + vlans: + - 395 + vm_offset: 395 + ARISTA395T0: + vlans: + - 396 + vm_offset: 396 + ARISTA396T0: + vlans: + - 397 + vm_offset: 397 + ARISTA397T0: + vlans: + - 398 + vm_offset: 398 + ARISTA398T0: + vlans: + - 399 + vm_offset: 399 + ARISTA399T0: + vlans: + - 400 + vm_offset: 400 + ARISTA400T0: + vlans: + - 401 + vm_offset: 401 + ARISTA401T0: + vlans: + - 402 + vm_offset: 402 + ARISTA402T0: + vlans: + - 403 + vm_offset: 403 + ARISTA403T0: + vlans: + - 404 + vm_offset: 404 + ARISTA404T0: + vlans: + - 405 + vm_offset: 405 + ARISTA405T0: + vlans: + - 406 + vm_offset: 406 + ARISTA406T0: + vlans: + - 407 + vm_offset: 407 + ARISTA407T0: + vlans: + - 408 + vm_offset: 408 + ARISTA408T0: + vlans: + - 409 + vm_offset: 409 + ARISTA409T0: + vlans: + - 410 + vm_offset: 410 + ARISTA410T0: + vlans: + - 411 + vm_offset: 411 + ARISTA411T0: + vlans: + - 412 + vm_offset: 412 + ARISTA412T0: + vlans: + - 413 + vm_offset: 413 + ARISTA413T0: + vlans: + - 414 + vm_offset: 414 + ARISTA414T0: + vlans: + - 415 + vm_offset: 415 + ARISTA415T0: + vlans: + - 416 + vm_offset: 416 + ARISTA416T0: + vlans: + - 417 + vm_offset: 417 + ARISTA417T0: + vlans: + - 418 + vm_offset: 418 + ARISTA418T0: + vlans: + - 419 + vm_offset: 419 + ARISTA419T0: + vlans: + - 420 + vm_offset: 420 + ARISTA420T0: + vlans: + - 421 + vm_offset: 421 + ARISTA421T0: + vlans: + - 422 + vm_offset: 422 + ARISTA422T0: + vlans: + - 423 + vm_offset: 423 + ARISTA423T0: + vlans: + - 424 + vm_offset: 424 + ARISTA424T0: + vlans: + - 425 + vm_offset: 425 + ARISTA425T0: + vlans: + - 426 + vm_offset: 426 + ARISTA426T0: + vlans: + - 427 + vm_offset: 427 + ARISTA427T0: + vlans: + - 428 + vm_offset: 428 + ARISTA428T0: + vlans: + - 429 + vm_offset: 429 + ARISTA429T0: + vlans: + - 430 + vm_offset: 430 + ARISTA430T0: + vlans: + - 431 + vm_offset: 431 + ARISTA431T0: + vlans: + - 432 + vm_offset: 432 + ARISTA432T0: + vlans: + - 433 + vm_offset: 433 + ARISTA433T0: + vlans: + - 434 + vm_offset: 434 + ARISTA434T0: + vlans: + - 435 + vm_offset: 435 + ARISTA435T0: + vlans: + - 436 + vm_offset: 436 + ARISTA436T0: + vlans: + - 437 + vm_offset: 437 + ARISTA437T0: + vlans: + - 438 + vm_offset: 438 + ARISTA438T0: + vlans: + - 439 + vm_offset: 439 + ARISTA439T0: + vlans: + - 440 + vm_offset: 440 + ARISTA440T0: + vlans: + - 441 + vm_offset: 441 + ARISTA441T0: + vlans: + - 442 + vm_offset: 442 + ARISTA442T0: + vlans: + - 443 + vm_offset: 443 + ARISTA443T0: + vlans: + - 444 + vm_offset: 444 + ARISTA444T0: + vlans: + - 445 + vm_offset: 445 + ARISTA445T0: + vlans: + - 446 + vm_offset: 446 + ARISTA446T0: + vlans: + - 447 + vm_offset: 447 + ARISTA447T0: + vlans: + - 448 + vm_offset: 448 + ARISTA448T0: + vlans: + - 449 + vm_offset: 449 + ARISTA449T0: + vlans: + - 450 + vm_offset: 450 + ARISTA450T0: + vlans: + - 451 + vm_offset: 451 + ARISTA451T0: + vlans: + - 452 + vm_offset: 452 + ARISTA452T0: + vlans: + - 453 + vm_offset: 453 + ARISTA453T0: + vlans: + - 454 + vm_offset: 454 + ARISTA454T0: + vlans: + - 455 + vm_offset: 455 + ARISTA455T0: + vlans: + - 456 + vm_offset: 456 + ARISTA456T0: + vlans: + - 457 + vm_offset: 457 + ARISTA457T0: + vlans: + - 458 + vm_offset: 458 + ARISTA458T0: + vlans: + - 459 + vm_offset: 459 + ARISTA459T0: + vlans: + - 460 + vm_offset: 460 + ARISTA460T0: + vlans: + - 461 + vm_offset: 461 + ARISTA461T0: + vlans: + - 462 + vm_offset: 462 + ARISTA462T0: + vlans: + - 463 + vm_offset: 463 + ARISTA463T0: + vlans: + - 464 + vm_offset: 464 + ARISTA464T0: + vlans: + - 465 + vm_offset: 465 + ARISTA465T0: + vlans: + - 466 + vm_offset: 466 + ARISTA466T0: + vlans: + - 467 + vm_offset: 467 + ARISTA467T0: + vlans: + - 468 + vm_offset: 468 + ARISTA468T0: + vlans: + - 469 + vm_offset: 469 + ARISTA469T0: + vlans: + - 470 + vm_offset: 470 + ARISTA470T0: + vlans: + - 471 + vm_offset: 471 + ARISTA471T0: + vlans: + - 472 + vm_offset: 472 + ARISTA472T0: + vlans: + - 473 + vm_offset: 473 + ARISTA473T0: + vlans: + - 474 + vm_offset: 474 + ARISTA474T0: + vlans: + - 475 + vm_offset: 475 + ARISTA475T0: + vlans: + - 476 + vm_offset: 476 + ARISTA476T0: + vlans: + - 477 + vm_offset: 477 + ARISTA477T0: + vlans: + - 478 + vm_offset: 478 + ARISTA478T0: + vlans: + - 479 + vm_offset: 479 + ARISTA479T0: + vlans: + - 480 + vm_offset: 480 + ARISTA480T0: + vlans: + - 481 + vm_offset: 481 + ARISTA481T0: + vlans: + - 482 + vm_offset: 482 + ARISTA482T0: + vlans: + - 483 + vm_offset: 483 + ARISTA483T0: + vlans: + - 484 + vm_offset: 484 + ARISTA484T0: + vlans: + - 485 + vm_offset: 485 + ARISTA485T0: + vlans: + - 486 + vm_offset: 486 + ARISTA486T0: + vlans: + - 487 + vm_offset: 487 + ARISTA487T0: + vlans: + - 488 + vm_offset: 488 + ARISTA488T0: + vlans: + - 489 + vm_offset: 489 + ARISTA489T0: + vlans: + - 490 + vm_offset: 490 + ARISTA490T0: + vlans: + - 491 + vm_offset: 491 + ARISTA491T0: + vlans: + - 492 + vm_offset: 492 + ARISTA492T0: + vlans: + - 493 + vm_offset: 493 + ARISTA493T0: + vlans: + - 494 + vm_offset: 494 + ARISTA494T0: + vlans: + - 495 + vm_offset: 495 + ARISTA495T0: + vlans: + - 496 + vm_offset: 496 + ARISTA496T0: + vlans: + - 497 + vm_offset: 497 + ARISTA497T0: + vlans: + - 498 + vm_offset: 498 + ARISTA498T0: + vlans: + - 499 + vm_offset: 499 + ARISTA499T0: + vlans: + - 500 + vm_offset: 500 + ARISTA500T0: + vlans: + - 501 + vm_offset: 501 + ARISTA501T0: + vlans: + - 502 + vm_offset: 502 + ARISTA502T0: + vlans: + - 503 + vm_offset: 503 + ARISTA503T0: + vlans: + - 504 + vm_offset: 504 + ARISTA504T0: + vlans: + - 505 + vm_offset: 505 + ARISTA505T0: + vlans: + - 506 + vm_offset: 506 + ARISTA506T0: + vlans: + - 507 + vm_offset: 507 + ARISTA507T0: + vlans: + - 508 + vm_offset: 508 + ARISTA508T0: + vlans: + - 509 + vm_offset: 509 + ARISTA509T0: + vlans: + - 510 + vm_offset: 510 + ARISTA510T0: + vlans: + - 511 + vm_offset: 511 + +configuration_properties: + common: + dut_asn: 4200100000 + dut_type: LeafRouter + podset_number: 200 + tor_number: 16 + tor_subnet_number: 2 + max_tor_subnet_number: 16 + tor_subnet_size: 128 + nhipv6: FC0A::FF + spine: + swrole: spine + tor: + swrole: tor + +configuration: + ARISTA01T2: + properties: + - common + - spine + bgp: + router-id: 0.12.0.1 + asn: 4200200000 + peers: + 4200100000: + - fc00:a::1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1::1/128 + Ethernet1: + ipv6: fc00:a::2/126 + bp_interface: + ipv6: fc00:b::1/64 + + ARISTA02T2: + properties: + - common + - spine + bgp: + router-id: 0.12.0.2 + asn: 4200200000 + peers: + 4200100000: + - fc00:a::5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2::1/128 + Ethernet1: + ipv6: fc00:a::6/126 + bp_interface: + ipv6: fc00:b::2/64 + + ARISTA01T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.3 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3::1/128 + Ethernet1: + ipv6: fc00:a::a/126 + bp_interface: + ipv6: fc00:b::3/64 + + ARISTA02T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.4 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d + interfaces: + Loopback0: + ipv6: fc00:c:c:4::1/128 + Ethernet1: + ipv6: fc00:a::e/126 + bp_interface: + ipv6: fc00:b::4/64 + + ARISTA03T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.5 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::11 + interfaces: + Loopback0: + ipv6: fc00:c:c:5::1/128 + Ethernet1: + ipv6: fc00:a::12/126 + bp_interface: + ipv6: fc00:b::5/64 + + ARISTA04T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.6 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::15 + interfaces: + Loopback0: + ipv6: fc00:c:c:6::1/128 + Ethernet1: + ipv6: fc00:a::16/126 + bp_interface: + ipv6: fc00:b::6/64 + + ARISTA05T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.7 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::19 + interfaces: + Loopback0: + ipv6: fc00:c:c:7::1/128 + Ethernet1: + ipv6: fc00:a::1a/126 + bp_interface: + ipv6: fc00:b::7/64 + + ARISTA06T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.8 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d + interfaces: + Loopback0: + ipv6: fc00:c:c:8::1/128 + Ethernet1: + ipv6: fc00:a::1e/126 + bp_interface: + ipv6: fc00:b::8/64 + + ARISTA07T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.9 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::21 + interfaces: + Loopback0: + ipv6: fc00:c:c:9::1/128 + Ethernet1: + ipv6: fc00:a::22/126 + bp_interface: + ipv6: fc00:b::9/64 + + ARISTA08T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.10 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::25 + interfaces: + Loopback0: + ipv6: fc00:c:c:a::1/128 + Ethernet1: + ipv6: fc00:a::26/126 + bp_interface: + ipv6: fc00:b::a/64 + + ARISTA09T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.11 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::29 + interfaces: + Loopback0: + ipv6: fc00:c:c:b::1/128 + Ethernet1: + ipv6: fc00:a::2a/126 + bp_interface: + ipv6: fc00:b::b/64 + + ARISTA10T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.12 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d + interfaces: + Loopback0: + ipv6: fc00:c:c:c::1/128 + Ethernet1: + ipv6: fc00:a::2e/126 + bp_interface: + ipv6: fc00:b::c/64 + + ARISTA11T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.13 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::31 + interfaces: + Loopback0: + ipv6: fc00:c:c:d::1/128 + Ethernet1: + ipv6: fc00:a::32/126 + bp_interface: + ipv6: fc00:b::d/64 + + ARISTA12T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.14 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::35 + interfaces: + Loopback0: + ipv6: fc00:c:c:e::1/128 + Ethernet1: + ipv6: fc00:a::36/126 + bp_interface: + ipv6: fc00:b::e/64 + + ARISTA13T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.15 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::39 + interfaces: + Loopback0: + ipv6: fc00:c:c:f::1/128 + Ethernet1: + ipv6: fc00:a::3a/126 + bp_interface: + ipv6: fc00:b::f/64 + + ARISTA14T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.16 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d + interfaces: + Loopback0: + ipv6: fc00:c:c:10::1/128 + Ethernet1: + ipv6: fc00:a::3e/126 + bp_interface: + ipv6: fc00:b::10/64 + + ARISTA15T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.17 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::41 + interfaces: + Loopback0: + ipv6: fc00:c:c:11::1/128 + Ethernet1: + ipv6: fc00:a::42/126 + bp_interface: + ipv6: fc00:b::11/64 + + ARISTA16T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.18 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::45 + interfaces: + Loopback0: + ipv6: fc00:c:c:12::1/128 + Ethernet1: + ipv6: fc00:a::46/126 + bp_interface: + ipv6: fc00:b::12/64 + + ARISTA17T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.19 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::49 + interfaces: + Loopback0: + ipv6: fc00:c:c:13::1/128 + Ethernet1: + ipv6: fc00:a::4a/126 + bp_interface: + ipv6: fc00:b::13/64 + + ARISTA18T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.20 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4d + interfaces: + Loopback0: + ipv6: fc00:c:c:14::1/128 + Ethernet1: + ipv6: fc00:a::4e/126 + bp_interface: + ipv6: fc00:b::14/64 + + ARISTA19T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.21 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::51 + interfaces: + Loopback0: + ipv6: fc00:c:c:15::1/128 + Ethernet1: + ipv6: fc00:a::52/126 + bp_interface: + ipv6: fc00:b::15/64 + + ARISTA20T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.22 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::55 + interfaces: + Loopback0: + ipv6: fc00:c:c:16::1/128 + Ethernet1: + ipv6: fc00:a::56/126 + bp_interface: + ipv6: fc00:b::16/64 + + ARISTA21T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.23 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::59 + interfaces: + Loopback0: + ipv6: fc00:c:c:17::1/128 + Ethernet1: + ipv6: fc00:a::5a/126 + bp_interface: + ipv6: fc00:b::17/64 + + ARISTA22T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.24 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5d + interfaces: + Loopback0: + ipv6: fc00:c:c:18::1/128 + Ethernet1: + ipv6: fc00:a::5e/126 + bp_interface: + ipv6: fc00:b::18/64 + + ARISTA23T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.25 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::61 + interfaces: + Loopback0: + ipv6: fc00:c:c:19::1/128 + Ethernet1: + ipv6: fc00:a::62/126 + bp_interface: + ipv6: fc00:b::19/64 + + ARISTA24T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.26 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::65 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a::1/128 + Ethernet1: + ipv6: fc00:a::66/126 + bp_interface: + ipv6: fc00:b::1a/64 + + ARISTA25T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.27 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::69 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b::1/128 + Ethernet1: + ipv6: fc00:a::6a/126 + bp_interface: + ipv6: fc00:b::1b/64 + + ARISTA26T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.28 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c::1/128 + Ethernet1: + ipv6: fc00:a::6e/126 + bp_interface: + ipv6: fc00:b::1c/64 + + ARISTA27T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.29 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::71 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d::1/128 + Ethernet1: + ipv6: fc00:a::72/126 + bp_interface: + ipv6: fc00:b::1d/64 + + ARISTA28T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.30 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::75 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e::1/128 + Ethernet1: + ipv6: fc00:a::76/126 + bp_interface: + ipv6: fc00:b::1e/64 + + ARISTA29T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.31 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::79 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f::1/128 + Ethernet1: + ipv6: fc00:a::7a/126 + bp_interface: + ipv6: fc00:b::1f/64 + + ARISTA30T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.32 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7d + interfaces: + Loopback0: + ipv6: fc00:c:c:20::1/128 + Ethernet1: + ipv6: fc00:a::7e/126 + bp_interface: + ipv6: fc00:b::20/64 + + ARISTA31T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.33 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::81 + interfaces: + Loopback0: + ipv6: fc00:c:c:21::1/128 + Ethernet1: + ipv6: fc00:a::82/126 + bp_interface: + ipv6: fc00:b::21/64 + + ARISTA32T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.34 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::85 + interfaces: + Loopback0: + ipv6: fc00:c:c:22::1/128 + Ethernet1: + ipv6: fc00:a::86/126 + bp_interface: + ipv6: fc00:b::22/64 + + ARISTA33T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.35 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::89 + interfaces: + Loopback0: + ipv6: fc00:c:c:23::1/128 + Ethernet1: + ipv6: fc00:a::8a/126 + bp_interface: + ipv6: fc00:b::23/64 + + ARISTA34T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.36 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::8d + interfaces: + Loopback0: + ipv6: fc00:c:c:24::1/128 + Ethernet1: + ipv6: fc00:a::8e/126 + bp_interface: + ipv6: fc00:b::24/64 + + ARISTA35T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.37 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::91 + interfaces: + Loopback0: + ipv6: fc00:c:c:25::1/128 + Ethernet1: + ipv6: fc00:a::92/126 + bp_interface: + ipv6: fc00:b::25/64 + + ARISTA36T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.38 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::95 + interfaces: + Loopback0: + ipv6: fc00:c:c:26::1/128 + Ethernet1: + ipv6: fc00:a::96/126 + bp_interface: + ipv6: fc00:b::26/64 + + ARISTA37T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.39 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::99 + interfaces: + Loopback0: + ipv6: fc00:c:c:27::1/128 + Ethernet1: + ipv6: fc00:a::9a/126 + bp_interface: + ipv6: fc00:b::27/64 + + ARISTA38T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.40 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::9d + interfaces: + Loopback0: + ipv6: fc00:c:c:28::1/128 + Ethernet1: + ipv6: fc00:a::9e/126 + bp_interface: + ipv6: fc00:b::28/64 + + ARISTA39T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.41 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:29::1/128 + Ethernet1: + ipv6: fc00:a::a2/126 + bp_interface: + ipv6: fc00:b::29/64 + + ARISTA40T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.42 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2a::1/128 + Ethernet1: + ipv6: fc00:a::a6/126 + bp_interface: + ipv6: fc00:b::2a/64 + + ARISTA41T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.43 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2b::1/128 + Ethernet1: + ipv6: fc00:a::aa/126 + bp_interface: + ipv6: fc00:b::2b/64 + + ARISTA42T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.44 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::ad + interfaces: + Loopback0: + ipv6: fc00:c:c:2c::1/128 + Ethernet1: + ipv6: fc00:a::ae/126 + bp_interface: + ipv6: fc00:b::2c/64 + + ARISTA43T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.45 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:2d::1/128 + Ethernet1: + ipv6: fc00:a::b2/126 + bp_interface: + ipv6: fc00:b::2d/64 + + ARISTA44T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.46 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:2e::1/128 + Ethernet1: + ipv6: fc00:a::b6/126 + bp_interface: + ipv6: fc00:b::2e/64 + + ARISTA45T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.47 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:2f::1/128 + Ethernet1: + ipv6: fc00:a::ba/126 + bp_interface: + ipv6: fc00:b::2f/64 + + ARISTA46T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.48 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::bd + interfaces: + Loopback0: + ipv6: fc00:c:c:30::1/128 + Ethernet1: + ipv6: fc00:a::be/126 + bp_interface: + ipv6: fc00:b::30/64 + + ARISTA47T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.49 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:31::1/128 + Ethernet1: + ipv6: fc00:a::c2/126 + bp_interface: + ipv6: fc00:b::31/64 + + ARISTA48T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.50 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:32::1/128 + Ethernet1: + ipv6: fc00:a::c6/126 + bp_interface: + ipv6: fc00:b::32/64 + + ARISTA49T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.51 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:33::1/128 + Ethernet1: + ipv6: fc00:a::ca/126 + bp_interface: + ipv6: fc00:b::33/64 + + ARISTA50T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.52 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::cd + interfaces: + Loopback0: + ipv6: fc00:c:c:34::1/128 + Ethernet1: + ipv6: fc00:a::ce/126 + bp_interface: + ipv6: fc00:b::34/64 + + ARISTA51T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.53 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:35::1/128 + Ethernet1: + ipv6: fc00:a::d2/126 + bp_interface: + ipv6: fc00:b::35/64 + + ARISTA52T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.54 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:36::1/128 + Ethernet1: + ipv6: fc00:a::d6/126 + bp_interface: + ipv6: fc00:b::36/64 + + ARISTA53T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.55 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:37::1/128 + Ethernet1: + ipv6: fc00:a::da/126 + bp_interface: + ipv6: fc00:b::37/64 + + ARISTA54T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.56 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::dd + interfaces: + Loopback0: + ipv6: fc00:c:c:38::1/128 + Ethernet1: + ipv6: fc00:a::de/126 + bp_interface: + ipv6: fc00:b::38/64 + + ARISTA55T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.57 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:39::1/128 + Ethernet1: + ipv6: fc00:a::e2/126 + bp_interface: + ipv6: fc00:b::39/64 + + ARISTA56T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.58 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3a::1/128 + Ethernet1: + ipv6: fc00:a::e6/126 + bp_interface: + ipv6: fc00:b::3a/64 + + ARISTA57T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.59 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3b::1/128 + Ethernet1: + ipv6: fc00:a::ea/126 + bp_interface: + ipv6: fc00:b::3b/64 + + ARISTA58T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.60 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::ed + interfaces: + Loopback0: + ipv6: fc00:c:c:3c::1/128 + Ethernet1: + ipv6: fc00:a::ee/126 + bp_interface: + ipv6: fc00:b::3c/64 + + ARISTA59T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.61 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:3d::1/128 + Ethernet1: + ipv6: fc00:a::f2/126 + bp_interface: + ipv6: fc00:b::3d/64 + + ARISTA60T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.62 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:3e::1/128 + Ethernet1: + ipv6: fc00:a::f6/126 + bp_interface: + ipv6: fc00:b::3e/64 + + ARISTA61T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.63 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:3f::1/128 + Ethernet1: + ipv6: fc00:a::fa/126 + bp_interface: + ipv6: fc00:b::3f/64 + + ARISTA62T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.64 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::fd + interfaces: + Loopback0: + ipv6: fc00:c:c:40::1/128 + Ethernet1: + ipv6: fc00:a::fe/126 + bp_interface: + ipv6: fc00:b::40/64 + + ARISTA63T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.65 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::101 + interfaces: + Loopback0: + ipv6: fc00:c:c:41::1/128 + Ethernet1: + ipv6: fc00:a::102/126 + bp_interface: + ipv6: fc00:b::41/64 + + ARISTA64T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.66 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::105 + interfaces: + Loopback0: + ipv6: fc00:c:c:42::1/128 + Ethernet1: + ipv6: fc00:a::106/126 + bp_interface: + ipv6: fc00:b::42/64 + + ARISTA65T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.67 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::109 + interfaces: + Loopback0: + ipv6: fc00:c:c:43::1/128 + Ethernet1: + ipv6: fc00:a::10a/126 + bp_interface: + ipv6: fc00:b::43/64 + + ARISTA66T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.68 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::10d + interfaces: + Loopback0: + ipv6: fc00:c:c:44::1/128 + Ethernet1: + ipv6: fc00:a::10e/126 + bp_interface: + ipv6: fc00:b::44/64 + + ARISTA67T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.69 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::111 + interfaces: + Loopback0: + ipv6: fc00:c:c:45::1/128 + Ethernet1: + ipv6: fc00:a::112/126 + bp_interface: + ipv6: fc00:b::45/64 + + ARISTA68T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.70 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::115 + interfaces: + Loopback0: + ipv6: fc00:c:c:46::1/128 + Ethernet1: + ipv6: fc00:a::116/126 + bp_interface: + ipv6: fc00:b::46/64 + + ARISTA69T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.71 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::119 + interfaces: + Loopback0: + ipv6: fc00:c:c:47::1/128 + Ethernet1: + ipv6: fc00:a::11a/126 + bp_interface: + ipv6: fc00:b::47/64 + + ARISTA70T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.72 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::11d + interfaces: + Loopback0: + ipv6: fc00:c:c:48::1/128 + Ethernet1: + ipv6: fc00:a::11e/126 + bp_interface: + ipv6: fc00:b::48/64 + + ARISTA71T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.73 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::121 + interfaces: + Loopback0: + ipv6: fc00:c:c:49::1/128 + Ethernet1: + ipv6: fc00:a::122/126 + bp_interface: + ipv6: fc00:b::49/64 + + ARISTA72T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.74 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::125 + interfaces: + Loopback0: + ipv6: fc00:c:c:4a::1/128 + Ethernet1: + ipv6: fc00:a::126/126 + bp_interface: + ipv6: fc00:b::4a/64 + + ARISTA73T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.75 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::129 + interfaces: + Loopback0: + ipv6: fc00:c:c:4b::1/128 + Ethernet1: + ipv6: fc00:a::12a/126 + bp_interface: + ipv6: fc00:b::4b/64 + + ARISTA74T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.76 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::12d + interfaces: + Loopback0: + ipv6: fc00:c:c:4c::1/128 + Ethernet1: + ipv6: fc00:a::12e/126 + bp_interface: + ipv6: fc00:b::4c/64 + + ARISTA75T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.77 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::131 + interfaces: + Loopback0: + ipv6: fc00:c:c:4d::1/128 + Ethernet1: + ipv6: fc00:a::132/126 + bp_interface: + ipv6: fc00:b::4d/64 + + ARISTA76T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.78 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::135 + interfaces: + Loopback0: + ipv6: fc00:c:c:4e::1/128 + Ethernet1: + ipv6: fc00:a::136/126 + bp_interface: + ipv6: fc00:b::4e/64 + + ARISTA77T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.79 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::139 + interfaces: + Loopback0: + ipv6: fc00:c:c:4f::1/128 + Ethernet1: + ipv6: fc00:a::13a/126 + bp_interface: + ipv6: fc00:b::4f/64 + + ARISTA78T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.80 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::13d + interfaces: + Loopback0: + ipv6: fc00:c:c:50::1/128 + Ethernet1: + ipv6: fc00:a::13e/126 + bp_interface: + ipv6: fc00:b::50/64 + + ARISTA79T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.81 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::141 + interfaces: + Loopback0: + ipv6: fc00:c:c:51::1/128 + Ethernet1: + ipv6: fc00:a::142/126 + bp_interface: + ipv6: fc00:b::51/64 + + ARISTA80T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.82 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::145 + interfaces: + Loopback0: + ipv6: fc00:c:c:52::1/128 + Ethernet1: + ipv6: fc00:a::146/126 + bp_interface: + ipv6: fc00:b::52/64 + + ARISTA81T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.83 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::149 + interfaces: + Loopback0: + ipv6: fc00:c:c:53::1/128 + Ethernet1: + ipv6: fc00:a::14a/126 + bp_interface: + ipv6: fc00:b::53/64 + + ARISTA82T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.84 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::14d + interfaces: + Loopback0: + ipv6: fc00:c:c:54::1/128 + Ethernet1: + ipv6: fc00:a::14e/126 + bp_interface: + ipv6: fc00:b::54/64 + + ARISTA83T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.85 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::151 + interfaces: + Loopback0: + ipv6: fc00:c:c:55::1/128 + Ethernet1: + ipv6: fc00:a::152/126 + bp_interface: + ipv6: fc00:b::55/64 + + ARISTA84T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.86 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::155 + interfaces: + Loopback0: + ipv6: fc00:c:c:56::1/128 + Ethernet1: + ipv6: fc00:a::156/126 + bp_interface: + ipv6: fc00:b::56/64 + + ARISTA85T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.87 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::159 + interfaces: + Loopback0: + ipv6: fc00:c:c:57::1/128 + Ethernet1: + ipv6: fc00:a::15a/126 + bp_interface: + ipv6: fc00:b::57/64 + + ARISTA86T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.88 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::15d + interfaces: + Loopback0: + ipv6: fc00:c:c:58::1/128 + Ethernet1: + ipv6: fc00:a::15e/126 + bp_interface: + ipv6: fc00:b::58/64 + + ARISTA87T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.89 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::161 + interfaces: + Loopback0: + ipv6: fc00:c:c:59::1/128 + Ethernet1: + ipv6: fc00:a::162/126 + bp_interface: + ipv6: fc00:b::59/64 + + ARISTA88T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.90 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::165 + interfaces: + Loopback0: + ipv6: fc00:c:c:5a::1/128 + Ethernet1: + ipv6: fc00:a::166/126 + bp_interface: + ipv6: fc00:b::5a/64 + + ARISTA89T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.91 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::169 + interfaces: + Loopback0: + ipv6: fc00:c:c:5b::1/128 + Ethernet1: + ipv6: fc00:a::16a/126 + bp_interface: + ipv6: fc00:b::5b/64 + + ARISTA90T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.92 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::16d + interfaces: + Loopback0: + ipv6: fc00:c:c:5c::1/128 + Ethernet1: + ipv6: fc00:a::16e/126 + bp_interface: + ipv6: fc00:b::5c/64 + + ARISTA91T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.93 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::171 + interfaces: + Loopback0: + ipv6: fc00:c:c:5d::1/128 + Ethernet1: + ipv6: fc00:a::172/126 + bp_interface: + ipv6: fc00:b::5d/64 + + ARISTA92T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.94 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::175 + interfaces: + Loopback0: + ipv6: fc00:c:c:5e::1/128 + Ethernet1: + ipv6: fc00:a::176/126 + bp_interface: + ipv6: fc00:b::5e/64 + + ARISTA93T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.95 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::179 + interfaces: + Loopback0: + ipv6: fc00:c:c:5f::1/128 + Ethernet1: + ipv6: fc00:a::17a/126 + bp_interface: + ipv6: fc00:b::5f/64 + + ARISTA94T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.96 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::17d + interfaces: + Loopback0: + ipv6: fc00:c:c:60::1/128 + Ethernet1: + ipv6: fc00:a::17e/126 + bp_interface: + ipv6: fc00:b::60/64 + + ARISTA95T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.97 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::181 + interfaces: + Loopback0: + ipv6: fc00:c:c:61::1/128 + Ethernet1: + ipv6: fc00:a::182/126 + bp_interface: + ipv6: fc00:b::61/64 + + ARISTA96T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.98 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::185 + interfaces: + Loopback0: + ipv6: fc00:c:c:62::1/128 + Ethernet1: + ipv6: fc00:a::186/126 + bp_interface: + ipv6: fc00:b::62/64 + + ARISTA97T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.99 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::189 + interfaces: + Loopback0: + ipv6: fc00:c:c:63::1/128 + Ethernet1: + ipv6: fc00:a::18a/126 + bp_interface: + ipv6: fc00:b::63/64 + + ARISTA98T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.100 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::18d + interfaces: + Loopback0: + ipv6: fc00:c:c:64::1/128 + Ethernet1: + ipv6: fc00:a::18e/126 + bp_interface: + ipv6: fc00:b::64/64 + + ARISTA99T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.101 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::191 + interfaces: + Loopback0: + ipv6: fc00:c:c:65::1/128 + Ethernet1: + ipv6: fc00:a::192/126 + bp_interface: + ipv6: fc00:b::65/64 + + ARISTA100T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.102 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::195 + interfaces: + Loopback0: + ipv6: fc00:c:c:66::1/128 + Ethernet1: + ipv6: fc00:a::196/126 + bp_interface: + ipv6: fc00:b::66/64 + + ARISTA101T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.103 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::199 + interfaces: + Loopback0: + ipv6: fc00:c:c:67::1/128 + Ethernet1: + ipv6: fc00:a::19a/126 + bp_interface: + ipv6: fc00:b::67/64 + + ARISTA102T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.104 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::19d + interfaces: + Loopback0: + ipv6: fc00:c:c:68::1/128 + Ethernet1: + ipv6: fc00:a::19e/126 + bp_interface: + ipv6: fc00:b::68/64 + + ARISTA103T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.105 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:69::1/128 + Ethernet1: + ipv6: fc00:a::1a2/126 + bp_interface: + ipv6: fc00:b::69/64 + + ARISTA104T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.106 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6a::1/128 + Ethernet1: + ipv6: fc00:a::1a6/126 + bp_interface: + ipv6: fc00:b::6a/64 + + ARISTA105T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.107 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6b::1/128 + Ethernet1: + ipv6: fc00:a::1aa/126 + bp_interface: + ipv6: fc00:b::6b/64 + + ARISTA106T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.108 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1ad + interfaces: + Loopback0: + ipv6: fc00:c:c:6c::1/128 + Ethernet1: + ipv6: fc00:a::1ae/126 + bp_interface: + ipv6: fc00:b::6c/64 + + ARISTA107T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.109 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:6d::1/128 + Ethernet1: + ipv6: fc00:a::1b2/126 + bp_interface: + ipv6: fc00:b::6d/64 + + ARISTA108T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.110 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:6e::1/128 + Ethernet1: + ipv6: fc00:a::1b6/126 + bp_interface: + ipv6: fc00:b::6e/64 + + ARISTA109T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.111 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:6f::1/128 + Ethernet1: + ipv6: fc00:a::1ba/126 + bp_interface: + ipv6: fc00:b::6f/64 + + ARISTA110T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.112 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1bd + interfaces: + Loopback0: + ipv6: fc00:c:c:70::1/128 + Ethernet1: + ipv6: fc00:a::1be/126 + bp_interface: + ipv6: fc00:b::70/64 + + ARISTA111T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.113 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:71::1/128 + Ethernet1: + ipv6: fc00:a::1c2/126 + bp_interface: + ipv6: fc00:b::71/64 + + ARISTA112T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.114 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:72::1/128 + Ethernet1: + ipv6: fc00:a::1c6/126 + bp_interface: + ipv6: fc00:b::72/64 + + ARISTA113T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.115 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:73::1/128 + Ethernet1: + ipv6: fc00:a::1ca/126 + bp_interface: + ipv6: fc00:b::73/64 + + ARISTA114T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.116 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1cd + interfaces: + Loopback0: + ipv6: fc00:c:c:74::1/128 + Ethernet1: + ipv6: fc00:a::1ce/126 + bp_interface: + ipv6: fc00:b::74/64 + + ARISTA115T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.117 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:75::1/128 + Ethernet1: + ipv6: fc00:a::1d2/126 + bp_interface: + ipv6: fc00:b::75/64 + + ARISTA116T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.118 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:76::1/128 + Ethernet1: + ipv6: fc00:a::1d6/126 + bp_interface: + ipv6: fc00:b::76/64 + + ARISTA117T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.119 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:77::1/128 + Ethernet1: + ipv6: fc00:a::1da/126 + bp_interface: + ipv6: fc00:b::77/64 + + ARISTA118T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.120 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1dd + interfaces: + Loopback0: + ipv6: fc00:c:c:78::1/128 + Ethernet1: + ipv6: fc00:a::1de/126 + bp_interface: + ipv6: fc00:b::78/64 + + ARISTA119T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.121 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:79::1/128 + Ethernet1: + ipv6: fc00:a::1e2/126 + bp_interface: + ipv6: fc00:b::79/64 + + ARISTA120T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.122 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7a::1/128 + Ethernet1: + ipv6: fc00:a::1e6/126 + bp_interface: + ipv6: fc00:b::7a/64 + + ARISTA121T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.123 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7b::1/128 + Ethernet1: + ipv6: fc00:a::1ea/126 + bp_interface: + ipv6: fc00:b::7b/64 + + ARISTA122T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.124 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1ed + interfaces: + Loopback0: + ipv6: fc00:c:c:7c::1/128 + Ethernet1: + ipv6: fc00:a::1ee/126 + bp_interface: + ipv6: fc00:b::7c/64 + + ARISTA123T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.125 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:7d::1/128 + Ethernet1: + ipv6: fc00:a::1f2/126 + bp_interface: + ipv6: fc00:b::7d/64 + + ARISTA124T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.126 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:7e::1/128 + Ethernet1: + ipv6: fc00:a::1f6/126 + bp_interface: + ipv6: fc00:b::7e/64 + + ARISTA125T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.127 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:7f::1/128 + Ethernet1: + ipv6: fc00:a::1fa/126 + bp_interface: + ipv6: fc00:b::7f/64 + + ARISTA126T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.128 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::1fd + interfaces: + Loopback0: + ipv6: fc00:c:c:80::1/128 + Ethernet1: + ipv6: fc00:a::1fe/126 + bp_interface: + ipv6: fc00:b::80/64 + + ARISTA127T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.129 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::201 + interfaces: + Loopback0: + ipv6: fc00:c:c:81::1/128 + Ethernet1: + ipv6: fc00:a::202/126 + bp_interface: + ipv6: fc00:b::81/64 + + ARISTA128T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.130 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::205 + interfaces: + Loopback0: + ipv6: fc00:c:c:82::1/128 + Ethernet1: + ipv6: fc00:a::206/126 + bp_interface: + ipv6: fc00:b::82/64 + + ARISTA129T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.131 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::209 + interfaces: + Loopback0: + ipv6: fc00:c:c:83::1/128 + Ethernet1: + ipv6: fc00:a::20a/126 + bp_interface: + ipv6: fc00:b::83/64 + + ARISTA130T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.132 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::20d + interfaces: + Loopback0: + ipv6: fc00:c:c:84::1/128 + Ethernet1: + ipv6: fc00:a::20e/126 + bp_interface: + ipv6: fc00:b::84/64 + + ARISTA131T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.133 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::211 + interfaces: + Loopback0: + ipv6: fc00:c:c:85::1/128 + Ethernet1: + ipv6: fc00:a::212/126 + bp_interface: + ipv6: fc00:b::85/64 + + ARISTA132T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.134 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::215 + interfaces: + Loopback0: + ipv6: fc00:c:c:86::1/128 + Ethernet1: + ipv6: fc00:a::216/126 + bp_interface: + ipv6: fc00:b::86/64 + + ARISTA133T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.135 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::219 + interfaces: + Loopback0: + ipv6: fc00:c:c:87::1/128 + Ethernet1: + ipv6: fc00:a::21a/126 + bp_interface: + ipv6: fc00:b::87/64 + + ARISTA134T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.136 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::21d + interfaces: + Loopback0: + ipv6: fc00:c:c:88::1/128 + Ethernet1: + ipv6: fc00:a::21e/126 + bp_interface: + ipv6: fc00:b::88/64 + + ARISTA135T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.137 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::221 + interfaces: + Loopback0: + ipv6: fc00:c:c:89::1/128 + Ethernet1: + ipv6: fc00:a::222/126 + bp_interface: + ipv6: fc00:b::89/64 + + ARISTA136T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.138 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::225 + interfaces: + Loopback0: + ipv6: fc00:c:c:8a::1/128 + Ethernet1: + ipv6: fc00:a::226/126 + bp_interface: + ipv6: fc00:b::8a/64 + + ARISTA137T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.139 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::229 + interfaces: + Loopback0: + ipv6: fc00:c:c:8b::1/128 + Ethernet1: + ipv6: fc00:a::22a/126 + bp_interface: + ipv6: fc00:b::8b/64 + + ARISTA138T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.140 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::22d + interfaces: + Loopback0: + ipv6: fc00:c:c:8c::1/128 + Ethernet1: + ipv6: fc00:a::22e/126 + bp_interface: + ipv6: fc00:b::8c/64 + + ARISTA139T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.141 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::231 + interfaces: + Loopback0: + ipv6: fc00:c:c:8d::1/128 + Ethernet1: + ipv6: fc00:a::232/126 + bp_interface: + ipv6: fc00:b::8d/64 + + ARISTA140T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.142 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::235 + interfaces: + Loopback0: + ipv6: fc00:c:c:8e::1/128 + Ethernet1: + ipv6: fc00:a::236/126 + bp_interface: + ipv6: fc00:b::8e/64 + + ARISTA141T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.143 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::239 + interfaces: + Loopback0: + ipv6: fc00:c:c:8f::1/128 + Ethernet1: + ipv6: fc00:a::23a/126 + bp_interface: + ipv6: fc00:b::8f/64 + + ARISTA142T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.144 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::23d + interfaces: + Loopback0: + ipv6: fc00:c:c:90::1/128 + Ethernet1: + ipv6: fc00:a::23e/126 + bp_interface: + ipv6: fc00:b::90/64 + + ARISTA143T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.145 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::241 + interfaces: + Loopback0: + ipv6: fc00:c:c:91::1/128 + Ethernet1: + ipv6: fc00:a::242/126 + bp_interface: + ipv6: fc00:b::91/64 + + ARISTA144T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.146 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::245 + interfaces: + Loopback0: + ipv6: fc00:c:c:92::1/128 + Ethernet1: + ipv6: fc00:a::246/126 + bp_interface: + ipv6: fc00:b::92/64 + + ARISTA145T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.147 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::249 + interfaces: + Loopback0: + ipv6: fc00:c:c:93::1/128 + Ethernet1: + ipv6: fc00:a::24a/126 + bp_interface: + ipv6: fc00:b::93/64 + + ARISTA146T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.148 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::24d + interfaces: + Loopback0: + ipv6: fc00:c:c:94::1/128 + Ethernet1: + ipv6: fc00:a::24e/126 + bp_interface: + ipv6: fc00:b::94/64 + + ARISTA147T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.149 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::251 + interfaces: + Loopback0: + ipv6: fc00:c:c:95::1/128 + Ethernet1: + ipv6: fc00:a::252/126 + bp_interface: + ipv6: fc00:b::95/64 + + ARISTA148T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.150 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::255 + interfaces: + Loopback0: + ipv6: fc00:c:c:96::1/128 + Ethernet1: + ipv6: fc00:a::256/126 + bp_interface: + ipv6: fc00:b::96/64 + + ARISTA149T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.151 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::259 + interfaces: + Loopback0: + ipv6: fc00:c:c:97::1/128 + Ethernet1: + ipv6: fc00:a::25a/126 + bp_interface: + ipv6: fc00:b::97/64 + + ARISTA150T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.152 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::25d + interfaces: + Loopback0: + ipv6: fc00:c:c:98::1/128 + Ethernet1: + ipv6: fc00:a::25e/126 + bp_interface: + ipv6: fc00:b::98/64 + + ARISTA151T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.153 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::261 + interfaces: + Loopback0: + ipv6: fc00:c:c:99::1/128 + Ethernet1: + ipv6: fc00:a::262/126 + bp_interface: + ipv6: fc00:b::99/64 + + ARISTA152T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.154 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::265 + interfaces: + Loopback0: + ipv6: fc00:c:c:9a::1/128 + Ethernet1: + ipv6: fc00:a::266/126 + bp_interface: + ipv6: fc00:b::9a/64 + + ARISTA153T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.155 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::269 + interfaces: + Loopback0: + ipv6: fc00:c:c:9b::1/128 + Ethernet1: + ipv6: fc00:a::26a/126 + bp_interface: + ipv6: fc00:b::9b/64 + + ARISTA154T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.156 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::26d + interfaces: + Loopback0: + ipv6: fc00:c:c:9c::1/128 + Ethernet1: + ipv6: fc00:a::26e/126 + bp_interface: + ipv6: fc00:b::9c/64 + + ARISTA155T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.157 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::271 + interfaces: + Loopback0: + ipv6: fc00:c:c:9d::1/128 + Ethernet1: + ipv6: fc00:a::272/126 + bp_interface: + ipv6: fc00:b::9d/64 + + ARISTA156T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.158 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::275 + interfaces: + Loopback0: + ipv6: fc00:c:c:9e::1/128 + Ethernet1: + ipv6: fc00:a::276/126 + bp_interface: + ipv6: fc00:b::9e/64 + + ARISTA157T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.159 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::279 + interfaces: + Loopback0: + ipv6: fc00:c:c:9f::1/128 + Ethernet1: + ipv6: fc00:a::27a/126 + bp_interface: + ipv6: fc00:b::9f/64 + + ARISTA158T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.160 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::27d + interfaces: + Loopback0: + ipv6: fc00:c:c:a0::1/128 + Ethernet1: + ipv6: fc00:a::27e/126 + bp_interface: + ipv6: fc00:b::a0/64 + + ARISTA159T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.161 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::281 + interfaces: + Loopback0: + ipv6: fc00:c:c:a1::1/128 + Ethernet1: + ipv6: fc00:a::282/126 + bp_interface: + ipv6: fc00:b::a1/64 + + ARISTA160T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.162 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::285 + interfaces: + Loopback0: + ipv6: fc00:c:c:a2::1/128 + Ethernet1: + ipv6: fc00:a::286/126 + bp_interface: + ipv6: fc00:b::a2/64 + + ARISTA161T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.163 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::289 + interfaces: + Loopback0: + ipv6: fc00:c:c:a3::1/128 + Ethernet1: + ipv6: fc00:a::28a/126 + bp_interface: + ipv6: fc00:b::a3/64 + + ARISTA162T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.164 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::28d + interfaces: + Loopback0: + ipv6: fc00:c:c:a4::1/128 + Ethernet1: + ipv6: fc00:a::28e/126 + bp_interface: + ipv6: fc00:b::a4/64 + + ARISTA163T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.165 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::291 + interfaces: + Loopback0: + ipv6: fc00:c:c:a5::1/128 + Ethernet1: + ipv6: fc00:a::292/126 + bp_interface: + ipv6: fc00:b::a5/64 + + ARISTA164T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.166 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::295 + interfaces: + Loopback0: + ipv6: fc00:c:c:a6::1/128 + Ethernet1: + ipv6: fc00:a::296/126 + bp_interface: + ipv6: fc00:b::a6/64 + + ARISTA165T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.167 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::299 + interfaces: + Loopback0: + ipv6: fc00:c:c:a7::1/128 + Ethernet1: + ipv6: fc00:a::29a/126 + bp_interface: + ipv6: fc00:b::a7/64 + + ARISTA166T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.168 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::29d + interfaces: + Loopback0: + ipv6: fc00:c:c:a8::1/128 + Ethernet1: + ipv6: fc00:a::29e/126 + bp_interface: + ipv6: fc00:b::a8/64 + + ARISTA167T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.169 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:a9::1/128 + Ethernet1: + ipv6: fc00:a::2a2/126 + bp_interface: + ipv6: fc00:b::a9/64 + + ARISTA168T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.170 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:aa::1/128 + Ethernet1: + ipv6: fc00:a::2a6/126 + bp_interface: + ipv6: fc00:b::aa/64 + + ARISTA169T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.171 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ab::1/128 + Ethernet1: + ipv6: fc00:a::2aa/126 + bp_interface: + ipv6: fc00:b::ab/64 + + ARISTA170T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.172 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ac::1/128 + Ethernet1: + ipv6: fc00:a::2ae/126 + bp_interface: + ipv6: fc00:b::ac/64 + + ARISTA171T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.173 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ad::1/128 + Ethernet1: + ipv6: fc00:a::2b2/126 + bp_interface: + ipv6: fc00:b::ad/64 + + ARISTA172T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.174 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ae::1/128 + Ethernet1: + ipv6: fc00:a::2b6/126 + bp_interface: + ipv6: fc00:b::ae/64 + + ARISTA173T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.175 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:af::1/128 + Ethernet1: + ipv6: fc00:a::2ba/126 + bp_interface: + ipv6: fc00:b::af/64 + + ARISTA174T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.176 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2bd + interfaces: + Loopback0: + ipv6: fc00:c:c:b0::1/128 + Ethernet1: + ipv6: fc00:a::2be/126 + bp_interface: + ipv6: fc00:b::b0/64 + + ARISTA175T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.177 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b1::1/128 + Ethernet1: + ipv6: fc00:a::2c2/126 + bp_interface: + ipv6: fc00:b::b1/64 + + ARISTA176T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.178 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b2::1/128 + Ethernet1: + ipv6: fc00:a::2c6/126 + bp_interface: + ipv6: fc00:b::b2/64 + + ARISTA177T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.179 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b3::1/128 + Ethernet1: + ipv6: fc00:a::2ca/126 + bp_interface: + ipv6: fc00:b::b3/64 + + ARISTA178T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.180 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2cd + interfaces: + Loopback0: + ipv6: fc00:c:c:b4::1/128 + Ethernet1: + ipv6: fc00:a::2ce/126 + bp_interface: + ipv6: fc00:b::b4/64 + + ARISTA179T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.181 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b5::1/128 + Ethernet1: + ipv6: fc00:a::2d2/126 + bp_interface: + ipv6: fc00:b::b5/64 + + ARISTA180T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.182 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:b6::1/128 + Ethernet1: + ipv6: fc00:a::2d6/126 + bp_interface: + ipv6: fc00:b::b6/64 + + ARISTA181T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.183 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:b7::1/128 + Ethernet1: + ipv6: fc00:a::2da/126 + bp_interface: + ipv6: fc00:b::b7/64 + + ARISTA182T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.184 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2dd + interfaces: + Loopback0: + ipv6: fc00:c:c:b8::1/128 + Ethernet1: + ipv6: fc00:a::2de/126 + bp_interface: + ipv6: fc00:b::b8/64 + + ARISTA183T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.185 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:b9::1/128 + Ethernet1: + ipv6: fc00:a::2e2/126 + bp_interface: + ipv6: fc00:b::b9/64 + + ARISTA184T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.186 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ba::1/128 + Ethernet1: + ipv6: fc00:a::2e6/126 + bp_interface: + ipv6: fc00:b::ba/64 + + ARISTA185T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.187 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bb::1/128 + Ethernet1: + ipv6: fc00:a::2ea/126 + bp_interface: + ipv6: fc00:b::bb/64 + + ARISTA186T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.188 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2ed + interfaces: + Loopback0: + ipv6: fc00:c:c:bc::1/128 + Ethernet1: + ipv6: fc00:a::2ee/126 + bp_interface: + ipv6: fc00:b::bc/64 + + ARISTA187T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.189 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:bd::1/128 + Ethernet1: + ipv6: fc00:a::2f2/126 + bp_interface: + ipv6: fc00:b::bd/64 + + ARISTA188T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.190 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:be::1/128 + Ethernet1: + ipv6: fc00:a::2f6/126 + bp_interface: + ipv6: fc00:b::be/64 + + ARISTA189T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.191 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:bf::1/128 + Ethernet1: + ipv6: fc00:a::2fa/126 + bp_interface: + ipv6: fc00:b::bf/64 + + ARISTA190T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.192 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::2fd + interfaces: + Loopback0: + ipv6: fc00:c:c:c0::1/128 + Ethernet1: + ipv6: fc00:a::2fe/126 + bp_interface: + ipv6: fc00:b::c0/64 + + ARISTA191T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.193 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::301 + interfaces: + Loopback0: + ipv6: fc00:c:c:c1::1/128 + Ethernet1: + ipv6: fc00:a::302/126 + bp_interface: + ipv6: fc00:b::c1/64 + + ARISTA192T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.194 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::305 + interfaces: + Loopback0: + ipv6: fc00:c:c:c2::1/128 + Ethernet1: + ipv6: fc00:a::306/126 + bp_interface: + ipv6: fc00:b::c2/64 + + ARISTA193T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.195 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::309 + interfaces: + Loopback0: + ipv6: fc00:c:c:c3::1/128 + Ethernet1: + ipv6: fc00:a::30a/126 + bp_interface: + ipv6: fc00:b::c3/64 + + ARISTA194T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.196 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::30d + interfaces: + Loopback0: + ipv6: fc00:c:c:c4::1/128 + Ethernet1: + ipv6: fc00:a::30e/126 + bp_interface: + ipv6: fc00:b::c4/64 + + ARISTA195T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.197 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::311 + interfaces: + Loopback0: + ipv6: fc00:c:c:c5::1/128 + Ethernet1: + ipv6: fc00:a::312/126 + bp_interface: + ipv6: fc00:b::c5/64 + + ARISTA196T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.198 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::315 + interfaces: + Loopback0: + ipv6: fc00:c:c:c6::1/128 + Ethernet1: + ipv6: fc00:a::316/126 + bp_interface: + ipv6: fc00:b::c6/64 + + ARISTA197T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.199 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::319 + interfaces: + Loopback0: + ipv6: fc00:c:c:c7::1/128 + Ethernet1: + ipv6: fc00:a::31a/126 + bp_interface: + ipv6: fc00:b::c7/64 + + ARISTA198T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.200 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::31d + interfaces: + Loopback0: + ipv6: fc00:c:c:c8::1/128 + Ethernet1: + ipv6: fc00:a::31e/126 + bp_interface: + ipv6: fc00:b::c8/64 + + ARISTA199T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.201 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::321 + interfaces: + Loopback0: + ipv6: fc00:c:c:c9::1/128 + Ethernet1: + ipv6: fc00:a::322/126 + bp_interface: + ipv6: fc00:b::c9/64 + + ARISTA200T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.202 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::325 + interfaces: + Loopback0: + ipv6: fc00:c:c:ca::1/128 + Ethernet1: + ipv6: fc00:a::326/126 + bp_interface: + ipv6: fc00:b::ca/64 + + ARISTA201T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.203 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::329 + interfaces: + Loopback0: + ipv6: fc00:c:c:cb::1/128 + Ethernet1: + ipv6: fc00:a::32a/126 + bp_interface: + ipv6: fc00:b::cb/64 + + ARISTA202T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.204 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::32d + interfaces: + Loopback0: + ipv6: fc00:c:c:cc::1/128 + Ethernet1: + ipv6: fc00:a::32e/126 + bp_interface: + ipv6: fc00:b::cc/64 + + ARISTA203T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.205 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::331 + interfaces: + Loopback0: + ipv6: fc00:c:c:cd::1/128 + Ethernet1: + ipv6: fc00:a::332/126 + bp_interface: + ipv6: fc00:b::cd/64 + + ARISTA204T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.206 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::335 + interfaces: + Loopback0: + ipv6: fc00:c:c:ce::1/128 + Ethernet1: + ipv6: fc00:a::336/126 + bp_interface: + ipv6: fc00:b::ce/64 + + ARISTA205T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.207 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::339 + interfaces: + Loopback0: + ipv6: fc00:c:c:cf::1/128 + Ethernet1: + ipv6: fc00:a::33a/126 + bp_interface: + ipv6: fc00:b::cf/64 + + ARISTA206T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.208 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::33d + interfaces: + Loopback0: + ipv6: fc00:c:c:d0::1/128 + Ethernet1: + ipv6: fc00:a::33e/126 + bp_interface: + ipv6: fc00:b::d0/64 + + ARISTA207T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.209 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::341 + interfaces: + Loopback0: + ipv6: fc00:c:c:d1::1/128 + Ethernet1: + ipv6: fc00:a::342/126 + bp_interface: + ipv6: fc00:b::d1/64 + + ARISTA208T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.210 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::345 + interfaces: + Loopback0: + ipv6: fc00:c:c:d2::1/128 + Ethernet1: + ipv6: fc00:a::346/126 + bp_interface: + ipv6: fc00:b::d2/64 + + ARISTA209T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.211 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::349 + interfaces: + Loopback0: + ipv6: fc00:c:c:d3::1/128 + Ethernet1: + ipv6: fc00:a::34a/126 + bp_interface: + ipv6: fc00:b::d3/64 + + ARISTA210T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.212 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::34d + interfaces: + Loopback0: + ipv6: fc00:c:c:d4::1/128 + Ethernet1: + ipv6: fc00:a::34e/126 + bp_interface: + ipv6: fc00:b::d4/64 + + ARISTA211T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.213 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::351 + interfaces: + Loopback0: + ipv6: fc00:c:c:d5::1/128 + Ethernet1: + ipv6: fc00:a::352/126 + bp_interface: + ipv6: fc00:b::d5/64 + + ARISTA212T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.214 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::355 + interfaces: + Loopback0: + ipv6: fc00:c:c:d6::1/128 + Ethernet1: + ipv6: fc00:a::356/126 + bp_interface: + ipv6: fc00:b::d6/64 + + ARISTA213T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.215 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::359 + interfaces: + Loopback0: + ipv6: fc00:c:c:d7::1/128 + Ethernet1: + ipv6: fc00:a::35a/126 + bp_interface: + ipv6: fc00:b::d7/64 + + ARISTA214T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.216 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::35d + interfaces: + Loopback0: + ipv6: fc00:c:c:d8::1/128 + Ethernet1: + ipv6: fc00:a::35e/126 + bp_interface: + ipv6: fc00:b::d8/64 + + ARISTA215T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.217 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::361 + interfaces: + Loopback0: + ipv6: fc00:c:c:d9::1/128 + Ethernet1: + ipv6: fc00:a::362/126 + bp_interface: + ipv6: fc00:b::d9/64 + + ARISTA216T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.218 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::365 + interfaces: + Loopback0: + ipv6: fc00:c:c:da::1/128 + Ethernet1: + ipv6: fc00:a::366/126 + bp_interface: + ipv6: fc00:b::da/64 + + ARISTA217T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.219 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::369 + interfaces: + Loopback0: + ipv6: fc00:c:c:db::1/128 + Ethernet1: + ipv6: fc00:a::36a/126 + bp_interface: + ipv6: fc00:b::db/64 + + ARISTA218T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.220 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::36d + interfaces: + Loopback0: + ipv6: fc00:c:c:dc::1/128 + Ethernet1: + ipv6: fc00:a::36e/126 + bp_interface: + ipv6: fc00:b::dc/64 + + ARISTA219T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.221 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::371 + interfaces: + Loopback0: + ipv6: fc00:c:c:dd::1/128 + Ethernet1: + ipv6: fc00:a::372/126 + bp_interface: + ipv6: fc00:b::dd/64 + + ARISTA220T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.222 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::375 + interfaces: + Loopback0: + ipv6: fc00:c:c:de::1/128 + Ethernet1: + ipv6: fc00:a::376/126 + bp_interface: + ipv6: fc00:b::de/64 + + ARISTA221T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.223 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::379 + interfaces: + Loopback0: + ipv6: fc00:c:c:df::1/128 + Ethernet1: + ipv6: fc00:a::37a/126 + bp_interface: + ipv6: fc00:b::df/64 + + ARISTA222T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.224 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::37d + interfaces: + Loopback0: + ipv6: fc00:c:c:e0::1/128 + Ethernet1: + ipv6: fc00:a::37e/126 + bp_interface: + ipv6: fc00:b::e0/64 + + ARISTA223T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.225 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::381 + interfaces: + Loopback0: + ipv6: fc00:c:c:e1::1/128 + Ethernet1: + ipv6: fc00:a::382/126 + bp_interface: + ipv6: fc00:b::e1/64 + + ARISTA224T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.226 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::385 + interfaces: + Loopback0: + ipv6: fc00:c:c:e2::1/128 + Ethernet1: + ipv6: fc00:a::386/126 + bp_interface: + ipv6: fc00:b::e2/64 + + ARISTA225T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.227 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::389 + interfaces: + Loopback0: + ipv6: fc00:c:c:e3::1/128 + Ethernet1: + ipv6: fc00:a::38a/126 + bp_interface: + ipv6: fc00:b::e3/64 + + ARISTA226T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.228 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::38d + interfaces: + Loopback0: + ipv6: fc00:c:c:e4::1/128 + Ethernet1: + ipv6: fc00:a::38e/126 + bp_interface: + ipv6: fc00:b::e4/64 + + ARISTA227T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.229 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::391 + interfaces: + Loopback0: + ipv6: fc00:c:c:e5::1/128 + Ethernet1: + ipv6: fc00:a::392/126 + bp_interface: + ipv6: fc00:b::e5/64 + + ARISTA228T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.230 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::395 + interfaces: + Loopback0: + ipv6: fc00:c:c:e6::1/128 + Ethernet1: + ipv6: fc00:a::396/126 + bp_interface: + ipv6: fc00:b::e6/64 + + ARISTA229T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.231 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::399 + interfaces: + Loopback0: + ipv6: fc00:c:c:e7::1/128 + Ethernet1: + ipv6: fc00:a::39a/126 + bp_interface: + ipv6: fc00:b::e7/64 + + ARISTA230T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.232 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::39d + interfaces: + Loopback0: + ipv6: fc00:c:c:e8::1/128 + Ethernet1: + ipv6: fc00:a::39e/126 + bp_interface: + ipv6: fc00:b::e8/64 + + ARISTA231T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.233 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:e9::1/128 + Ethernet1: + ipv6: fc00:a::3a2/126 + bp_interface: + ipv6: fc00:b::e9/64 + + ARISTA232T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.234 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ea::1/128 + Ethernet1: + ipv6: fc00:a::3a6/126 + bp_interface: + ipv6: fc00:b::ea/64 + + ARISTA233T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.235 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:eb::1/128 + Ethernet1: + ipv6: fc00:a::3aa/126 + bp_interface: + ipv6: fc00:b::eb/64 + + ARISTA234T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.236 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3ad + interfaces: + Loopback0: + ipv6: fc00:c:c:ec::1/128 + Ethernet1: + ipv6: fc00:a::3ae/126 + bp_interface: + ipv6: fc00:b::ec/64 + + ARISTA235T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.237 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:ed::1/128 + Ethernet1: + ipv6: fc00:a::3b2/126 + bp_interface: + ipv6: fc00:b::ed/64 + + ARISTA236T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.238 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:ee::1/128 + Ethernet1: + ipv6: fc00:a::3b6/126 + bp_interface: + ipv6: fc00:b::ee/64 + + ARISTA237T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.239 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ef::1/128 + Ethernet1: + ipv6: fc00:a::3ba/126 + bp_interface: + ipv6: fc00:b::ef/64 + + ARISTA238T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.240 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3bd + interfaces: + Loopback0: + ipv6: fc00:c:c:f0::1/128 + Ethernet1: + ipv6: fc00:a::3be/126 + bp_interface: + ipv6: fc00:b::f0/64 + + ARISTA239T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.241 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f1::1/128 + Ethernet1: + ipv6: fc00:a::3c2/126 + bp_interface: + ipv6: fc00:b::f1/64 + + ARISTA240T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.242 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f2::1/128 + Ethernet1: + ipv6: fc00:a::3c6/126 + bp_interface: + ipv6: fc00:b::f2/64 + + ARISTA241T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.243 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f3::1/128 + Ethernet1: + ipv6: fc00:a::3ca/126 + bp_interface: + ipv6: fc00:b::f3/64 + + ARISTA242T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.244 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3cd + interfaces: + Loopback0: + ipv6: fc00:c:c:f4::1/128 + Ethernet1: + ipv6: fc00:a::3ce/126 + bp_interface: + ipv6: fc00:b::f4/64 + + ARISTA243T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.245 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f5::1/128 + Ethernet1: + ipv6: fc00:a::3d2/126 + bp_interface: + ipv6: fc00:b::f5/64 + + ARISTA244T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.246 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:f6::1/128 + Ethernet1: + ipv6: fc00:a::3d6/126 + bp_interface: + ipv6: fc00:b::f6/64 + + ARISTA245T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.247 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:f7::1/128 + Ethernet1: + ipv6: fc00:a::3da/126 + bp_interface: + ipv6: fc00:b::f7/64 + + ARISTA246T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.248 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3dd + interfaces: + Loopback0: + ipv6: fc00:c:c:f8::1/128 + Ethernet1: + ipv6: fc00:a::3de/126 + bp_interface: + ipv6: fc00:b::f8/64 + + ARISTA247T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.249 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:f9::1/128 + Ethernet1: + ipv6: fc00:a::3e2/126 + bp_interface: + ipv6: fc00:b::f9/64 + + ARISTA248T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.250 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fa::1/128 + Ethernet1: + ipv6: fc00:a::3e6/126 + bp_interface: + ipv6: fc00:b::fa/64 + + ARISTA249T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.251 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:fb::1/128 + Ethernet1: + ipv6: fc00:a::3ea/126 + bp_interface: + ipv6: fc00:b::fb/64 + + ARISTA250T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.252 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3ed + interfaces: + Loopback0: + ipv6: fc00:c:c:fc::1/128 + Ethernet1: + ipv6: fc00:a::3ee/126 + bp_interface: + ipv6: fc00:b::fc/64 + + ARISTA251T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.253 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:fd::1/128 + Ethernet1: + ipv6: fc00:a::3f2/126 + bp_interface: + ipv6: fc00:b::fd/64 + + ARISTA252T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.254 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:fe::1/128 + Ethernet1: + ipv6: fc00:a::3f6/126 + bp_interface: + ipv6: fc00:b::fe/64 + + ARISTA253T0: + properties: + - common + - tor + bgp: + router-id: 0.12.0.255 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:ff::1/128 + Ethernet1: + ipv6: fc00:a::3fa/126 + bp_interface: + ipv6: fc00:b::ff/64 + + ARISTA254T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.0 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::3fd + interfaces: + Loopback0: + ipv6: fc00:c:c:100::1/128 + Ethernet1: + ipv6: fc00:a::3fe/126 + bp_interface: + ipv6: fc00:b::100/64 + + ARISTA255T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.1 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::401 + interfaces: + Loopback0: + ipv6: fc00:c:c:101::1/128 + Ethernet1: + ipv6: fc00:a::402/126 + bp_interface: + ipv6: fc00:b::101/64 + + ARISTA256T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.2 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::405 + interfaces: + Loopback0: + ipv6: fc00:c:c:102::1/128 + Ethernet1: + ipv6: fc00:a::406/126 + bp_interface: + ipv6: fc00:b::102/64 + + ARISTA257T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.3 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::409 + interfaces: + Loopback0: + ipv6: fc00:c:c:103::1/128 + Ethernet1: + ipv6: fc00:a::40a/126 + bp_interface: + ipv6: fc00:b::103/64 + + ARISTA258T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.4 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::40d + interfaces: + Loopback0: + ipv6: fc00:c:c:104::1/128 + Ethernet1: + ipv6: fc00:a::40e/126 + bp_interface: + ipv6: fc00:b::104/64 + + ARISTA259T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.5 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::411 + interfaces: + Loopback0: + ipv6: fc00:c:c:105::1/128 + Ethernet1: + ipv6: fc00:a::412/126 + bp_interface: + ipv6: fc00:b::105/64 + + ARISTA260T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.6 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::415 + interfaces: + Loopback0: + ipv6: fc00:c:c:106::1/128 + Ethernet1: + ipv6: fc00:a::416/126 + bp_interface: + ipv6: fc00:b::106/64 + + ARISTA261T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.7 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::419 + interfaces: + Loopback0: + ipv6: fc00:c:c:107::1/128 + Ethernet1: + ipv6: fc00:a::41a/126 + bp_interface: + ipv6: fc00:b::107/64 + + ARISTA262T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.8 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::41d + interfaces: + Loopback0: + ipv6: fc00:c:c:108::1/128 + Ethernet1: + ipv6: fc00:a::41e/126 + bp_interface: + ipv6: fc00:b::108/64 + + ARISTA263T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.9 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::421 + interfaces: + Loopback0: + ipv6: fc00:c:c:109::1/128 + Ethernet1: + ipv6: fc00:a::422/126 + bp_interface: + ipv6: fc00:b::109/64 + + ARISTA264T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.10 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::425 + interfaces: + Loopback0: + ipv6: fc00:c:c:10a::1/128 + Ethernet1: + ipv6: fc00:a::426/126 + bp_interface: + ipv6: fc00:b::10a/64 + + ARISTA265T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.11 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::429 + interfaces: + Loopback0: + ipv6: fc00:c:c:10b::1/128 + Ethernet1: + ipv6: fc00:a::42a/126 + bp_interface: + ipv6: fc00:b::10b/64 + + ARISTA266T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.12 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::42d + interfaces: + Loopback0: + ipv6: fc00:c:c:10c::1/128 + Ethernet1: + ipv6: fc00:a::42e/126 + bp_interface: + ipv6: fc00:b::10c/64 + + ARISTA267T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.13 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::431 + interfaces: + Loopback0: + ipv6: fc00:c:c:10d::1/128 + Ethernet1: + ipv6: fc00:a::432/126 + bp_interface: + ipv6: fc00:b::10d/64 + + ARISTA268T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.14 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::435 + interfaces: + Loopback0: + ipv6: fc00:c:c:10e::1/128 + Ethernet1: + ipv6: fc00:a::436/126 + bp_interface: + ipv6: fc00:b::10e/64 + + ARISTA269T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.15 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::439 + interfaces: + Loopback0: + ipv6: fc00:c:c:10f::1/128 + Ethernet1: + ipv6: fc00:a::43a/126 + bp_interface: + ipv6: fc00:b::10f/64 + + ARISTA270T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.16 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::43d + interfaces: + Loopback0: + ipv6: fc00:c:c:110::1/128 + Ethernet1: + ipv6: fc00:a::43e/126 + bp_interface: + ipv6: fc00:b::110/64 + + ARISTA271T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.17 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::441 + interfaces: + Loopback0: + ipv6: fc00:c:c:111::1/128 + Ethernet1: + ipv6: fc00:a::442/126 + bp_interface: + ipv6: fc00:b::111/64 + + ARISTA272T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.18 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::445 + interfaces: + Loopback0: + ipv6: fc00:c:c:112::1/128 + Ethernet1: + ipv6: fc00:a::446/126 + bp_interface: + ipv6: fc00:b::112/64 + + ARISTA273T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.19 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::449 + interfaces: + Loopback0: + ipv6: fc00:c:c:113::1/128 + Ethernet1: + ipv6: fc00:a::44a/126 + bp_interface: + ipv6: fc00:b::113/64 + + ARISTA274T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.20 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::44d + interfaces: + Loopback0: + ipv6: fc00:c:c:114::1/128 + Ethernet1: + ipv6: fc00:a::44e/126 + bp_interface: + ipv6: fc00:b::114/64 + + ARISTA275T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.21 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::451 + interfaces: + Loopback0: + ipv6: fc00:c:c:115::1/128 + Ethernet1: + ipv6: fc00:a::452/126 + bp_interface: + ipv6: fc00:b::115/64 + + ARISTA276T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.22 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::455 + interfaces: + Loopback0: + ipv6: fc00:c:c:116::1/128 + Ethernet1: + ipv6: fc00:a::456/126 + bp_interface: + ipv6: fc00:b::116/64 + + ARISTA277T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.23 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::459 + interfaces: + Loopback0: + ipv6: fc00:c:c:117::1/128 + Ethernet1: + ipv6: fc00:a::45a/126 + bp_interface: + ipv6: fc00:b::117/64 + + ARISTA278T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.24 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::45d + interfaces: + Loopback0: + ipv6: fc00:c:c:118::1/128 + Ethernet1: + ipv6: fc00:a::45e/126 + bp_interface: + ipv6: fc00:b::118/64 + + ARISTA279T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.25 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::461 + interfaces: + Loopback0: + ipv6: fc00:c:c:119::1/128 + Ethernet1: + ipv6: fc00:a::462/126 + bp_interface: + ipv6: fc00:b::119/64 + + ARISTA280T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.26 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::465 + interfaces: + Loopback0: + ipv6: fc00:c:c:11a::1/128 + Ethernet1: + ipv6: fc00:a::466/126 + bp_interface: + ipv6: fc00:b::11a/64 + + ARISTA281T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.27 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::469 + interfaces: + Loopback0: + ipv6: fc00:c:c:11b::1/128 + Ethernet1: + ipv6: fc00:a::46a/126 + bp_interface: + ipv6: fc00:b::11b/64 + + ARISTA282T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.28 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::46d + interfaces: + Loopback0: + ipv6: fc00:c:c:11c::1/128 + Ethernet1: + ipv6: fc00:a::46e/126 + bp_interface: + ipv6: fc00:b::11c/64 + + ARISTA283T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.29 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::471 + interfaces: + Loopback0: + ipv6: fc00:c:c:11d::1/128 + Ethernet1: + ipv6: fc00:a::472/126 + bp_interface: + ipv6: fc00:b::11d/64 + + ARISTA284T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.30 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::475 + interfaces: + Loopback0: + ipv6: fc00:c:c:11e::1/128 + Ethernet1: + ipv6: fc00:a::476/126 + bp_interface: + ipv6: fc00:b::11e/64 + + ARISTA285T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.31 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::479 + interfaces: + Loopback0: + ipv6: fc00:c:c:11f::1/128 + Ethernet1: + ipv6: fc00:a::47a/126 + bp_interface: + ipv6: fc00:b::11f/64 + + ARISTA286T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.32 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::47d + interfaces: + Loopback0: + ipv6: fc00:c:c:120::1/128 + Ethernet1: + ipv6: fc00:a::47e/126 + bp_interface: + ipv6: fc00:b::120/64 + + ARISTA287T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.33 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::481 + interfaces: + Loopback0: + ipv6: fc00:c:c:121::1/128 + Ethernet1: + ipv6: fc00:a::482/126 + bp_interface: + ipv6: fc00:b::121/64 + + ARISTA288T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.34 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::485 + interfaces: + Loopback0: + ipv6: fc00:c:c:122::1/128 + Ethernet1: + ipv6: fc00:a::486/126 + bp_interface: + ipv6: fc00:b::122/64 + + ARISTA289T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.35 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::489 + interfaces: + Loopback0: + ipv6: fc00:c:c:123::1/128 + Ethernet1: + ipv6: fc00:a::48a/126 + bp_interface: + ipv6: fc00:b::123/64 + + ARISTA290T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.36 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::48d + interfaces: + Loopback0: + ipv6: fc00:c:c:124::1/128 + Ethernet1: + ipv6: fc00:a::48e/126 + bp_interface: + ipv6: fc00:b::124/64 + + ARISTA291T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.37 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::491 + interfaces: + Loopback0: + ipv6: fc00:c:c:125::1/128 + Ethernet1: + ipv6: fc00:a::492/126 + bp_interface: + ipv6: fc00:b::125/64 + + ARISTA292T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.38 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::495 + interfaces: + Loopback0: + ipv6: fc00:c:c:126::1/128 + Ethernet1: + ipv6: fc00:a::496/126 + bp_interface: + ipv6: fc00:b::126/64 + + ARISTA293T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.39 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::499 + interfaces: + Loopback0: + ipv6: fc00:c:c:127::1/128 + Ethernet1: + ipv6: fc00:a::49a/126 + bp_interface: + ipv6: fc00:b::127/64 + + ARISTA294T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.40 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::49d + interfaces: + Loopback0: + ipv6: fc00:c:c:128::1/128 + Ethernet1: + ipv6: fc00:a::49e/126 + bp_interface: + ipv6: fc00:b::128/64 + + ARISTA295T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.41 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:129::1/128 + Ethernet1: + ipv6: fc00:a::4a2/126 + bp_interface: + ipv6: fc00:b::129/64 + + ARISTA296T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.42 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:12a::1/128 + Ethernet1: + ipv6: fc00:a::4a6/126 + bp_interface: + ipv6: fc00:b::12a/64 + + ARISTA297T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.43 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:12b::1/128 + Ethernet1: + ipv6: fc00:a::4aa/126 + bp_interface: + ipv6: fc00:b::12b/64 + + ARISTA298T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.44 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4ad + interfaces: + Loopback0: + ipv6: fc00:c:c:12c::1/128 + Ethernet1: + ipv6: fc00:a::4ae/126 + bp_interface: + ipv6: fc00:b::12c/64 + + ARISTA299T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.45 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:12d::1/128 + Ethernet1: + ipv6: fc00:a::4b2/126 + bp_interface: + ipv6: fc00:b::12d/64 + + ARISTA300T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.46 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:12e::1/128 + Ethernet1: + ipv6: fc00:a::4b6/126 + bp_interface: + ipv6: fc00:b::12e/64 + + ARISTA301T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.47 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:12f::1/128 + Ethernet1: + ipv6: fc00:a::4ba/126 + bp_interface: + ipv6: fc00:b::12f/64 + + ARISTA302T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.48 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4bd + interfaces: + Loopback0: + ipv6: fc00:c:c:130::1/128 + Ethernet1: + ipv6: fc00:a::4be/126 + bp_interface: + ipv6: fc00:b::130/64 + + ARISTA303T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.49 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:131::1/128 + Ethernet1: + ipv6: fc00:a::4c2/126 + bp_interface: + ipv6: fc00:b::131/64 + + ARISTA304T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.50 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:132::1/128 + Ethernet1: + ipv6: fc00:a::4c6/126 + bp_interface: + ipv6: fc00:b::132/64 + + ARISTA305T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.51 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:133::1/128 + Ethernet1: + ipv6: fc00:a::4ca/126 + bp_interface: + ipv6: fc00:b::133/64 + + ARISTA306T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.52 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4cd + interfaces: + Loopback0: + ipv6: fc00:c:c:134::1/128 + Ethernet1: + ipv6: fc00:a::4ce/126 + bp_interface: + ipv6: fc00:b::134/64 + + ARISTA307T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.53 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:135::1/128 + Ethernet1: + ipv6: fc00:a::4d2/126 + bp_interface: + ipv6: fc00:b::135/64 + + ARISTA308T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.54 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:136::1/128 + Ethernet1: + ipv6: fc00:a::4d6/126 + bp_interface: + ipv6: fc00:b::136/64 + + ARISTA309T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.55 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:137::1/128 + Ethernet1: + ipv6: fc00:a::4da/126 + bp_interface: + ipv6: fc00:b::137/64 + + ARISTA310T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.56 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4dd + interfaces: + Loopback0: + ipv6: fc00:c:c:138::1/128 + Ethernet1: + ipv6: fc00:a::4de/126 + bp_interface: + ipv6: fc00:b::138/64 + + ARISTA311T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.57 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:139::1/128 + Ethernet1: + ipv6: fc00:a::4e2/126 + bp_interface: + ipv6: fc00:b::139/64 + + ARISTA312T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.58 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:13a::1/128 + Ethernet1: + ipv6: fc00:a::4e6/126 + bp_interface: + ipv6: fc00:b::13a/64 + + ARISTA313T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.59 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:13b::1/128 + Ethernet1: + ipv6: fc00:a::4ea/126 + bp_interface: + ipv6: fc00:b::13b/64 + + ARISTA314T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.60 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4ed + interfaces: + Loopback0: + ipv6: fc00:c:c:13c::1/128 + Ethernet1: + ipv6: fc00:a::4ee/126 + bp_interface: + ipv6: fc00:b::13c/64 + + ARISTA315T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.61 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:13d::1/128 + Ethernet1: + ipv6: fc00:a::4f2/126 + bp_interface: + ipv6: fc00:b::13d/64 + + ARISTA316T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.62 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:13e::1/128 + Ethernet1: + ipv6: fc00:a::4f6/126 + bp_interface: + ipv6: fc00:b::13e/64 + + ARISTA317T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.63 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:13f::1/128 + Ethernet1: + ipv6: fc00:a::4fa/126 + bp_interface: + ipv6: fc00:b::13f/64 + + ARISTA318T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.64 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::4fd + interfaces: + Loopback0: + ipv6: fc00:c:c:140::1/128 + Ethernet1: + ipv6: fc00:a::4fe/126 + bp_interface: + ipv6: fc00:b::140/64 + + ARISTA319T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.65 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::501 + interfaces: + Loopback0: + ipv6: fc00:c:c:141::1/128 + Ethernet1: + ipv6: fc00:a::502/126 + bp_interface: + ipv6: fc00:b::141/64 + + ARISTA320T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.66 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::505 + interfaces: + Loopback0: + ipv6: fc00:c:c:142::1/128 + Ethernet1: + ipv6: fc00:a::506/126 + bp_interface: + ipv6: fc00:b::142/64 + + ARISTA321T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.67 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::509 + interfaces: + Loopback0: + ipv6: fc00:c:c:143::1/128 + Ethernet1: + ipv6: fc00:a::50a/126 + bp_interface: + ipv6: fc00:b::143/64 + + ARISTA322T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.68 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::50d + interfaces: + Loopback0: + ipv6: fc00:c:c:144::1/128 + Ethernet1: + ipv6: fc00:a::50e/126 + bp_interface: + ipv6: fc00:b::144/64 + + ARISTA323T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.69 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::511 + interfaces: + Loopback0: + ipv6: fc00:c:c:145::1/128 + Ethernet1: + ipv6: fc00:a::512/126 + bp_interface: + ipv6: fc00:b::145/64 + + ARISTA324T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.70 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::515 + interfaces: + Loopback0: + ipv6: fc00:c:c:146::1/128 + Ethernet1: + ipv6: fc00:a::516/126 + bp_interface: + ipv6: fc00:b::146/64 + + ARISTA325T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.71 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::519 + interfaces: + Loopback0: + ipv6: fc00:c:c:147::1/128 + Ethernet1: + ipv6: fc00:a::51a/126 + bp_interface: + ipv6: fc00:b::147/64 + + ARISTA326T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.72 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::51d + interfaces: + Loopback0: + ipv6: fc00:c:c:148::1/128 + Ethernet1: + ipv6: fc00:a::51e/126 + bp_interface: + ipv6: fc00:b::148/64 + + ARISTA327T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.73 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::521 + interfaces: + Loopback0: + ipv6: fc00:c:c:149::1/128 + Ethernet1: + ipv6: fc00:a::522/126 + bp_interface: + ipv6: fc00:b::149/64 + + ARISTA328T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.74 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::525 + interfaces: + Loopback0: + ipv6: fc00:c:c:14a::1/128 + Ethernet1: + ipv6: fc00:a::526/126 + bp_interface: + ipv6: fc00:b::14a/64 + + ARISTA329T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.75 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::529 + interfaces: + Loopback0: + ipv6: fc00:c:c:14b::1/128 + Ethernet1: + ipv6: fc00:a::52a/126 + bp_interface: + ipv6: fc00:b::14b/64 + + ARISTA330T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.76 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::52d + interfaces: + Loopback0: + ipv6: fc00:c:c:14c::1/128 + Ethernet1: + ipv6: fc00:a::52e/126 + bp_interface: + ipv6: fc00:b::14c/64 + + ARISTA331T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.77 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::531 + interfaces: + Loopback0: + ipv6: fc00:c:c:14d::1/128 + Ethernet1: + ipv6: fc00:a::532/126 + bp_interface: + ipv6: fc00:b::14d/64 + + ARISTA332T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.78 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::535 + interfaces: + Loopback0: + ipv6: fc00:c:c:14e::1/128 + Ethernet1: + ipv6: fc00:a::536/126 + bp_interface: + ipv6: fc00:b::14e/64 + + ARISTA333T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.79 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::539 + interfaces: + Loopback0: + ipv6: fc00:c:c:14f::1/128 + Ethernet1: + ipv6: fc00:a::53a/126 + bp_interface: + ipv6: fc00:b::14f/64 + + ARISTA334T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.80 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::53d + interfaces: + Loopback0: + ipv6: fc00:c:c:150::1/128 + Ethernet1: + ipv6: fc00:a::53e/126 + bp_interface: + ipv6: fc00:b::150/64 + + ARISTA335T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.81 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::541 + interfaces: + Loopback0: + ipv6: fc00:c:c:151::1/128 + Ethernet1: + ipv6: fc00:a::542/126 + bp_interface: + ipv6: fc00:b::151/64 + + ARISTA336T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.82 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::545 + interfaces: + Loopback0: + ipv6: fc00:c:c:152::1/128 + Ethernet1: + ipv6: fc00:a::546/126 + bp_interface: + ipv6: fc00:b::152/64 + + ARISTA337T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.83 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::549 + interfaces: + Loopback0: + ipv6: fc00:c:c:153::1/128 + Ethernet1: + ipv6: fc00:a::54a/126 + bp_interface: + ipv6: fc00:b::153/64 + + ARISTA338T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.84 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::54d + interfaces: + Loopback0: + ipv6: fc00:c:c:154::1/128 + Ethernet1: + ipv6: fc00:a::54e/126 + bp_interface: + ipv6: fc00:b::154/64 + + ARISTA339T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.85 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::551 + interfaces: + Loopback0: + ipv6: fc00:c:c:155::1/128 + Ethernet1: + ipv6: fc00:a::552/126 + bp_interface: + ipv6: fc00:b::155/64 + + ARISTA340T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.86 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::555 + interfaces: + Loopback0: + ipv6: fc00:c:c:156::1/128 + Ethernet1: + ipv6: fc00:a::556/126 + bp_interface: + ipv6: fc00:b::156/64 + + ARISTA341T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.87 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::559 + interfaces: + Loopback0: + ipv6: fc00:c:c:157::1/128 + Ethernet1: + ipv6: fc00:a::55a/126 + bp_interface: + ipv6: fc00:b::157/64 + + ARISTA342T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.88 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::55d + interfaces: + Loopback0: + ipv6: fc00:c:c:158::1/128 + Ethernet1: + ipv6: fc00:a::55e/126 + bp_interface: + ipv6: fc00:b::158/64 + + ARISTA343T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.89 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::561 + interfaces: + Loopback0: + ipv6: fc00:c:c:159::1/128 + Ethernet1: + ipv6: fc00:a::562/126 + bp_interface: + ipv6: fc00:b::159/64 + + ARISTA344T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.90 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::565 + interfaces: + Loopback0: + ipv6: fc00:c:c:15a::1/128 + Ethernet1: + ipv6: fc00:a::566/126 + bp_interface: + ipv6: fc00:b::15a/64 + + ARISTA345T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.91 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::569 + interfaces: + Loopback0: + ipv6: fc00:c:c:15b::1/128 + Ethernet1: + ipv6: fc00:a::56a/126 + bp_interface: + ipv6: fc00:b::15b/64 + + ARISTA346T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.92 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::56d + interfaces: + Loopback0: + ipv6: fc00:c:c:15c::1/128 + Ethernet1: + ipv6: fc00:a::56e/126 + bp_interface: + ipv6: fc00:b::15c/64 + + ARISTA347T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.93 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::571 + interfaces: + Loopback0: + ipv6: fc00:c:c:15d::1/128 + Ethernet1: + ipv6: fc00:a::572/126 + bp_interface: + ipv6: fc00:b::15d/64 + + ARISTA348T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.94 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::575 + interfaces: + Loopback0: + ipv6: fc00:c:c:15e::1/128 + Ethernet1: + ipv6: fc00:a::576/126 + bp_interface: + ipv6: fc00:b::15e/64 + + ARISTA349T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.95 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::579 + interfaces: + Loopback0: + ipv6: fc00:c:c:15f::1/128 + Ethernet1: + ipv6: fc00:a::57a/126 + bp_interface: + ipv6: fc00:b::15f/64 + + ARISTA350T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.96 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::57d + interfaces: + Loopback0: + ipv6: fc00:c:c:160::1/128 + Ethernet1: + ipv6: fc00:a::57e/126 + bp_interface: + ipv6: fc00:b::160/64 + + ARISTA351T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.97 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::581 + interfaces: + Loopback0: + ipv6: fc00:c:c:161::1/128 + Ethernet1: + ipv6: fc00:a::582/126 + bp_interface: + ipv6: fc00:b::161/64 + + ARISTA352T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.98 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::585 + interfaces: + Loopback0: + ipv6: fc00:c:c:162::1/128 + Ethernet1: + ipv6: fc00:a::586/126 + bp_interface: + ipv6: fc00:b::162/64 + + ARISTA353T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.99 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::589 + interfaces: + Loopback0: + ipv6: fc00:c:c:163::1/128 + Ethernet1: + ipv6: fc00:a::58a/126 + bp_interface: + ipv6: fc00:b::163/64 + + ARISTA354T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.100 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::58d + interfaces: + Loopback0: + ipv6: fc00:c:c:164::1/128 + Ethernet1: + ipv6: fc00:a::58e/126 + bp_interface: + ipv6: fc00:b::164/64 + + ARISTA355T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.101 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::591 + interfaces: + Loopback0: + ipv6: fc00:c:c:165::1/128 + Ethernet1: + ipv6: fc00:a::592/126 + bp_interface: + ipv6: fc00:b::165/64 + + ARISTA356T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.102 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::595 + interfaces: + Loopback0: + ipv6: fc00:c:c:166::1/128 + Ethernet1: + ipv6: fc00:a::596/126 + bp_interface: + ipv6: fc00:b::166/64 + + ARISTA357T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.103 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::599 + interfaces: + Loopback0: + ipv6: fc00:c:c:167::1/128 + Ethernet1: + ipv6: fc00:a::59a/126 + bp_interface: + ipv6: fc00:b::167/64 + + ARISTA358T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.104 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::59d + interfaces: + Loopback0: + ipv6: fc00:c:c:168::1/128 + Ethernet1: + ipv6: fc00:a::59e/126 + bp_interface: + ipv6: fc00:b::168/64 + + ARISTA359T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.105 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:169::1/128 + Ethernet1: + ipv6: fc00:a::5a2/126 + bp_interface: + ipv6: fc00:b::169/64 + + ARISTA360T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.106 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:16a::1/128 + Ethernet1: + ipv6: fc00:a::5a6/126 + bp_interface: + ipv6: fc00:b::16a/64 + + ARISTA361T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.107 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:16b::1/128 + Ethernet1: + ipv6: fc00:a::5aa/126 + bp_interface: + ipv6: fc00:b::16b/64 + + ARISTA362T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.108 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5ad + interfaces: + Loopback0: + ipv6: fc00:c:c:16c::1/128 + Ethernet1: + ipv6: fc00:a::5ae/126 + bp_interface: + ipv6: fc00:b::16c/64 + + ARISTA363T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.109 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:16d::1/128 + Ethernet1: + ipv6: fc00:a::5b2/126 + bp_interface: + ipv6: fc00:b::16d/64 + + ARISTA364T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.110 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:16e::1/128 + Ethernet1: + ipv6: fc00:a::5b6/126 + bp_interface: + ipv6: fc00:b::16e/64 + + ARISTA365T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.111 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:16f::1/128 + Ethernet1: + ipv6: fc00:a::5ba/126 + bp_interface: + ipv6: fc00:b::16f/64 + + ARISTA366T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.112 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5bd + interfaces: + Loopback0: + ipv6: fc00:c:c:170::1/128 + Ethernet1: + ipv6: fc00:a::5be/126 + bp_interface: + ipv6: fc00:b::170/64 + + ARISTA367T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.113 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:171::1/128 + Ethernet1: + ipv6: fc00:a::5c2/126 + bp_interface: + ipv6: fc00:b::171/64 + + ARISTA368T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.114 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:172::1/128 + Ethernet1: + ipv6: fc00:a::5c6/126 + bp_interface: + ipv6: fc00:b::172/64 + + ARISTA369T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.115 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:173::1/128 + Ethernet1: + ipv6: fc00:a::5ca/126 + bp_interface: + ipv6: fc00:b::173/64 + + ARISTA370T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.116 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5cd + interfaces: + Loopback0: + ipv6: fc00:c:c:174::1/128 + Ethernet1: + ipv6: fc00:a::5ce/126 + bp_interface: + ipv6: fc00:b::174/64 + + ARISTA371T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.117 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:175::1/128 + Ethernet1: + ipv6: fc00:a::5d2/126 + bp_interface: + ipv6: fc00:b::175/64 + + ARISTA372T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.118 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:176::1/128 + Ethernet1: + ipv6: fc00:a::5d6/126 + bp_interface: + ipv6: fc00:b::176/64 + + ARISTA373T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.119 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:177::1/128 + Ethernet1: + ipv6: fc00:a::5da/126 + bp_interface: + ipv6: fc00:b::177/64 + + ARISTA374T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.120 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5dd + interfaces: + Loopback0: + ipv6: fc00:c:c:178::1/128 + Ethernet1: + ipv6: fc00:a::5de/126 + bp_interface: + ipv6: fc00:b::178/64 + + ARISTA375T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.121 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:179::1/128 + Ethernet1: + ipv6: fc00:a::5e2/126 + bp_interface: + ipv6: fc00:b::179/64 + + ARISTA376T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.122 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:17a::1/128 + Ethernet1: + ipv6: fc00:a::5e6/126 + bp_interface: + ipv6: fc00:b::17a/64 + + ARISTA377T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.123 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:17b::1/128 + Ethernet1: + ipv6: fc00:a::5ea/126 + bp_interface: + ipv6: fc00:b::17b/64 + + ARISTA378T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.124 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5ed + interfaces: + Loopback0: + ipv6: fc00:c:c:17c::1/128 + Ethernet1: + ipv6: fc00:a::5ee/126 + bp_interface: + ipv6: fc00:b::17c/64 + + ARISTA379T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.125 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:17d::1/128 + Ethernet1: + ipv6: fc00:a::5f2/126 + bp_interface: + ipv6: fc00:b::17d/64 + + ARISTA380T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.126 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:17e::1/128 + Ethernet1: + ipv6: fc00:a::5f6/126 + bp_interface: + ipv6: fc00:b::17e/64 + + ARISTA381T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.127 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:17f::1/128 + Ethernet1: + ipv6: fc00:a::5fa/126 + bp_interface: + ipv6: fc00:b::17f/64 + + ARISTA382T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.128 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::5fd + interfaces: + Loopback0: + ipv6: fc00:c:c:180::1/128 + Ethernet1: + ipv6: fc00:a::5fe/126 + bp_interface: + ipv6: fc00:b::180/64 + + ARISTA383T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.129 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::601 + interfaces: + Loopback0: + ipv6: fc00:c:c:181::1/128 + Ethernet1: + ipv6: fc00:a::602/126 + bp_interface: + ipv6: fc00:b::181/64 + + ARISTA384T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.130 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::605 + interfaces: + Loopback0: + ipv6: fc00:c:c:182::1/128 + Ethernet1: + ipv6: fc00:a::606/126 + bp_interface: + ipv6: fc00:b::182/64 + + ARISTA385T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.131 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::609 + interfaces: + Loopback0: + ipv6: fc00:c:c:183::1/128 + Ethernet1: + ipv6: fc00:a::60a/126 + bp_interface: + ipv6: fc00:b::183/64 + + ARISTA386T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.132 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::60d + interfaces: + Loopback0: + ipv6: fc00:c:c:184::1/128 + Ethernet1: + ipv6: fc00:a::60e/126 + bp_interface: + ipv6: fc00:b::184/64 + + ARISTA387T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.133 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::611 + interfaces: + Loopback0: + ipv6: fc00:c:c:185::1/128 + Ethernet1: + ipv6: fc00:a::612/126 + bp_interface: + ipv6: fc00:b::185/64 + + ARISTA388T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.134 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::615 + interfaces: + Loopback0: + ipv6: fc00:c:c:186::1/128 + Ethernet1: + ipv6: fc00:a::616/126 + bp_interface: + ipv6: fc00:b::186/64 + + ARISTA389T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.135 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::619 + interfaces: + Loopback0: + ipv6: fc00:c:c:187::1/128 + Ethernet1: + ipv6: fc00:a::61a/126 + bp_interface: + ipv6: fc00:b::187/64 + + ARISTA390T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.136 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::61d + interfaces: + Loopback0: + ipv6: fc00:c:c:188::1/128 + Ethernet1: + ipv6: fc00:a::61e/126 + bp_interface: + ipv6: fc00:b::188/64 + + ARISTA391T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.137 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::621 + interfaces: + Loopback0: + ipv6: fc00:c:c:189::1/128 + Ethernet1: + ipv6: fc00:a::622/126 + bp_interface: + ipv6: fc00:b::189/64 + + ARISTA392T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.138 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::625 + interfaces: + Loopback0: + ipv6: fc00:c:c:18a::1/128 + Ethernet1: + ipv6: fc00:a::626/126 + bp_interface: + ipv6: fc00:b::18a/64 + + ARISTA393T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.139 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::629 + interfaces: + Loopback0: + ipv6: fc00:c:c:18b::1/128 + Ethernet1: + ipv6: fc00:a::62a/126 + bp_interface: + ipv6: fc00:b::18b/64 + + ARISTA394T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.140 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::62d + interfaces: + Loopback0: + ipv6: fc00:c:c:18c::1/128 + Ethernet1: + ipv6: fc00:a::62e/126 + bp_interface: + ipv6: fc00:b::18c/64 + + ARISTA395T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.141 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::631 + interfaces: + Loopback0: + ipv6: fc00:c:c:18d::1/128 + Ethernet1: + ipv6: fc00:a::632/126 + bp_interface: + ipv6: fc00:b::18d/64 + + ARISTA396T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.142 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::635 + interfaces: + Loopback0: + ipv6: fc00:c:c:18e::1/128 + Ethernet1: + ipv6: fc00:a::636/126 + bp_interface: + ipv6: fc00:b::18e/64 + + ARISTA397T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.143 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::639 + interfaces: + Loopback0: + ipv6: fc00:c:c:18f::1/128 + Ethernet1: + ipv6: fc00:a::63a/126 + bp_interface: + ipv6: fc00:b::18f/64 + + ARISTA398T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.144 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::63d + interfaces: + Loopback0: + ipv6: fc00:c:c:190::1/128 + Ethernet1: + ipv6: fc00:a::63e/126 + bp_interface: + ipv6: fc00:b::190/64 + + ARISTA399T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.145 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::641 + interfaces: + Loopback0: + ipv6: fc00:c:c:191::1/128 + Ethernet1: + ipv6: fc00:a::642/126 + bp_interface: + ipv6: fc00:b::191/64 + + ARISTA400T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.146 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::645 + interfaces: + Loopback0: + ipv6: fc00:c:c:192::1/128 + Ethernet1: + ipv6: fc00:a::646/126 + bp_interface: + ipv6: fc00:b::192/64 + + ARISTA401T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.147 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::649 + interfaces: + Loopback0: + ipv6: fc00:c:c:193::1/128 + Ethernet1: + ipv6: fc00:a::64a/126 + bp_interface: + ipv6: fc00:b::193/64 + + ARISTA402T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.148 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::64d + interfaces: + Loopback0: + ipv6: fc00:c:c:194::1/128 + Ethernet1: + ipv6: fc00:a::64e/126 + bp_interface: + ipv6: fc00:b::194/64 + + ARISTA403T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.149 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::651 + interfaces: + Loopback0: + ipv6: fc00:c:c:195::1/128 + Ethernet1: + ipv6: fc00:a::652/126 + bp_interface: + ipv6: fc00:b::195/64 + + ARISTA404T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.150 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::655 + interfaces: + Loopback0: + ipv6: fc00:c:c:196::1/128 + Ethernet1: + ipv6: fc00:a::656/126 + bp_interface: + ipv6: fc00:b::196/64 + + ARISTA405T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.151 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::659 + interfaces: + Loopback0: + ipv6: fc00:c:c:197::1/128 + Ethernet1: + ipv6: fc00:a::65a/126 + bp_interface: + ipv6: fc00:b::197/64 + + ARISTA406T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.152 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::65d + interfaces: + Loopback0: + ipv6: fc00:c:c:198::1/128 + Ethernet1: + ipv6: fc00:a::65e/126 + bp_interface: + ipv6: fc00:b::198/64 + + ARISTA407T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.153 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::661 + interfaces: + Loopback0: + ipv6: fc00:c:c:199::1/128 + Ethernet1: + ipv6: fc00:a::662/126 + bp_interface: + ipv6: fc00:b::199/64 + + ARISTA408T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.154 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::665 + interfaces: + Loopback0: + ipv6: fc00:c:c:19a::1/128 + Ethernet1: + ipv6: fc00:a::666/126 + bp_interface: + ipv6: fc00:b::19a/64 + + ARISTA409T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.155 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::669 + interfaces: + Loopback0: + ipv6: fc00:c:c:19b::1/128 + Ethernet1: + ipv6: fc00:a::66a/126 + bp_interface: + ipv6: fc00:b::19b/64 + + ARISTA410T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.156 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::66d + interfaces: + Loopback0: + ipv6: fc00:c:c:19c::1/128 + Ethernet1: + ipv6: fc00:a::66e/126 + bp_interface: + ipv6: fc00:b::19c/64 + + ARISTA411T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.157 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::671 + interfaces: + Loopback0: + ipv6: fc00:c:c:19d::1/128 + Ethernet1: + ipv6: fc00:a::672/126 + bp_interface: + ipv6: fc00:b::19d/64 + + ARISTA412T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.158 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::675 + interfaces: + Loopback0: + ipv6: fc00:c:c:19e::1/128 + Ethernet1: + ipv6: fc00:a::676/126 + bp_interface: + ipv6: fc00:b::19e/64 + + ARISTA413T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.159 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::679 + interfaces: + Loopback0: + ipv6: fc00:c:c:19f::1/128 + Ethernet1: + ipv6: fc00:a::67a/126 + bp_interface: + ipv6: fc00:b::19f/64 + + ARISTA414T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.160 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::67d + interfaces: + Loopback0: + ipv6: fc00:c:c:1a0::1/128 + Ethernet1: + ipv6: fc00:a::67e/126 + bp_interface: + ipv6: fc00:b::1a0/64 + + ARISTA415T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.161 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::681 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a1::1/128 + Ethernet1: + ipv6: fc00:a::682/126 + bp_interface: + ipv6: fc00:b::1a1/64 + + ARISTA416T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.162 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::685 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a2::1/128 + Ethernet1: + ipv6: fc00:a::686/126 + bp_interface: + ipv6: fc00:b::1a2/64 + + ARISTA417T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.163 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::689 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a3::1/128 + Ethernet1: + ipv6: fc00:a::68a/126 + bp_interface: + ipv6: fc00:b::1a3/64 + + ARISTA418T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.164 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::68d + interfaces: + Loopback0: + ipv6: fc00:c:c:1a4::1/128 + Ethernet1: + ipv6: fc00:a::68e/126 + bp_interface: + ipv6: fc00:b::1a4/64 + + ARISTA419T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.165 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::691 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a5::1/128 + Ethernet1: + ipv6: fc00:a::692/126 + bp_interface: + ipv6: fc00:b::1a5/64 + + ARISTA420T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.166 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::695 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a6::1/128 + Ethernet1: + ipv6: fc00:a::696/126 + bp_interface: + ipv6: fc00:b::1a6/64 + + ARISTA421T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.167 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::699 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a7::1/128 + Ethernet1: + ipv6: fc00:a::69a/126 + bp_interface: + ipv6: fc00:b::1a7/64 + + ARISTA422T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.168 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::69d + interfaces: + Loopback0: + ipv6: fc00:c:c:1a8::1/128 + Ethernet1: + ipv6: fc00:a::69e/126 + bp_interface: + ipv6: fc00:b::1a8/64 + + ARISTA423T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.169 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1a9::1/128 + Ethernet1: + ipv6: fc00:a::6a2/126 + bp_interface: + ipv6: fc00:b::1a9/64 + + ARISTA424T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.170 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1aa::1/128 + Ethernet1: + ipv6: fc00:a::6a6/126 + bp_interface: + ipv6: fc00:b::1aa/64 + + ARISTA425T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.171 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ab::1/128 + Ethernet1: + ipv6: fc00:a::6aa/126 + bp_interface: + ipv6: fc00:b::1ab/64 + + ARISTA426T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.172 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6ad + interfaces: + Loopback0: + ipv6: fc00:c:c:1ac::1/128 + Ethernet1: + ipv6: fc00:a::6ae/126 + bp_interface: + ipv6: fc00:b::1ac/64 + + ARISTA427T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.173 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ad::1/128 + Ethernet1: + ipv6: fc00:a::6b2/126 + bp_interface: + ipv6: fc00:b::1ad/64 + + ARISTA428T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.174 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ae::1/128 + Ethernet1: + ipv6: fc00:a::6b6/126 + bp_interface: + ipv6: fc00:b::1ae/64 + + ARISTA429T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.175 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1af::1/128 + Ethernet1: + ipv6: fc00:a::6ba/126 + bp_interface: + ipv6: fc00:b::1af/64 + + ARISTA430T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.176 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6bd + interfaces: + Loopback0: + ipv6: fc00:c:c:1b0::1/128 + Ethernet1: + ipv6: fc00:a::6be/126 + bp_interface: + ipv6: fc00:b::1b0/64 + + ARISTA431T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.177 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b1::1/128 + Ethernet1: + ipv6: fc00:a::6c2/126 + bp_interface: + ipv6: fc00:b::1b1/64 + + ARISTA432T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.178 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b2::1/128 + Ethernet1: + ipv6: fc00:a::6c6/126 + bp_interface: + ipv6: fc00:b::1b2/64 + + ARISTA433T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.179 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b3::1/128 + Ethernet1: + ipv6: fc00:a::6ca/126 + bp_interface: + ipv6: fc00:b::1b3/64 + + ARISTA434T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.180 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6cd + interfaces: + Loopback0: + ipv6: fc00:c:c:1b4::1/128 + Ethernet1: + ipv6: fc00:a::6ce/126 + bp_interface: + ipv6: fc00:b::1b4/64 + + ARISTA435T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.181 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b5::1/128 + Ethernet1: + ipv6: fc00:a::6d2/126 + bp_interface: + ipv6: fc00:b::1b5/64 + + ARISTA436T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.182 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b6::1/128 + Ethernet1: + ipv6: fc00:a::6d6/126 + bp_interface: + ipv6: fc00:b::1b6/64 + + ARISTA437T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.183 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b7::1/128 + Ethernet1: + ipv6: fc00:a::6da/126 + bp_interface: + ipv6: fc00:b::1b7/64 + + ARISTA438T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.184 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6dd + interfaces: + Loopback0: + ipv6: fc00:c:c:1b8::1/128 + Ethernet1: + ipv6: fc00:a::6de/126 + bp_interface: + ipv6: fc00:b::1b8/64 + + ARISTA439T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.185 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1b9::1/128 + Ethernet1: + ipv6: fc00:a::6e2/126 + bp_interface: + ipv6: fc00:b::1b9/64 + + ARISTA440T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.186 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ba::1/128 + Ethernet1: + ipv6: fc00:a::6e6/126 + bp_interface: + ipv6: fc00:b::1ba/64 + + ARISTA441T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.187 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1bb::1/128 + Ethernet1: + ipv6: fc00:a::6ea/126 + bp_interface: + ipv6: fc00:b::1bb/64 + + ARISTA442T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.188 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6ed + interfaces: + Loopback0: + ipv6: fc00:c:c:1bc::1/128 + Ethernet1: + ipv6: fc00:a::6ee/126 + bp_interface: + ipv6: fc00:b::1bc/64 + + ARISTA443T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.189 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1bd::1/128 + Ethernet1: + ipv6: fc00:a::6f2/126 + bp_interface: + ipv6: fc00:b::1bd/64 + + ARISTA444T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.190 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1be::1/128 + Ethernet1: + ipv6: fc00:a::6f6/126 + bp_interface: + ipv6: fc00:b::1be/64 + + ARISTA445T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.191 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1bf::1/128 + Ethernet1: + ipv6: fc00:a::6fa/126 + bp_interface: + ipv6: fc00:b::1bf/64 + + ARISTA446T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.192 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::6fd + interfaces: + Loopback0: + ipv6: fc00:c:c:1c0::1/128 + Ethernet1: + ipv6: fc00:a::6fe/126 + bp_interface: + ipv6: fc00:b::1c0/64 + + ARISTA447T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.193 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::701 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c1::1/128 + Ethernet1: + ipv6: fc00:a::702/126 + bp_interface: + ipv6: fc00:b::1c1/64 + + ARISTA448T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.194 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::705 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c2::1/128 + Ethernet1: + ipv6: fc00:a::706/126 + bp_interface: + ipv6: fc00:b::1c2/64 + + ARISTA449T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.195 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::709 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c3::1/128 + Ethernet1: + ipv6: fc00:a::70a/126 + bp_interface: + ipv6: fc00:b::1c3/64 + + ARISTA450T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.196 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::70d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c4::1/128 + Ethernet1: + ipv6: fc00:a::70e/126 + bp_interface: + ipv6: fc00:b::1c4/64 + + ARISTA451T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.197 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::711 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c5::1/128 + Ethernet1: + ipv6: fc00:a::712/126 + bp_interface: + ipv6: fc00:b::1c5/64 + + ARISTA452T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.198 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::715 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c6::1/128 + Ethernet1: + ipv6: fc00:a::716/126 + bp_interface: + ipv6: fc00:b::1c6/64 + + ARISTA453T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.199 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::719 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c7::1/128 + Ethernet1: + ipv6: fc00:a::71a/126 + bp_interface: + ipv6: fc00:b::1c7/64 + + ARISTA454T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.200 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::71d + interfaces: + Loopback0: + ipv6: fc00:c:c:1c8::1/128 + Ethernet1: + ipv6: fc00:a::71e/126 + bp_interface: + ipv6: fc00:b::1c8/64 + + ARISTA455T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.201 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::721 + interfaces: + Loopback0: + ipv6: fc00:c:c:1c9::1/128 + Ethernet1: + ipv6: fc00:a::722/126 + bp_interface: + ipv6: fc00:b::1c9/64 + + ARISTA456T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.202 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::725 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ca::1/128 + Ethernet1: + ipv6: fc00:a::726/126 + bp_interface: + ipv6: fc00:b::1ca/64 + + ARISTA457T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.203 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::729 + interfaces: + Loopback0: + ipv6: fc00:c:c:1cb::1/128 + Ethernet1: + ipv6: fc00:a::72a/126 + bp_interface: + ipv6: fc00:b::1cb/64 + + ARISTA458T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.204 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::72d + interfaces: + Loopback0: + ipv6: fc00:c:c:1cc::1/128 + Ethernet1: + ipv6: fc00:a::72e/126 + bp_interface: + ipv6: fc00:b::1cc/64 + + ARISTA459T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.205 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::731 + interfaces: + Loopback0: + ipv6: fc00:c:c:1cd::1/128 + Ethernet1: + ipv6: fc00:a::732/126 + bp_interface: + ipv6: fc00:b::1cd/64 + + ARISTA460T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.206 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::735 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ce::1/128 + Ethernet1: + ipv6: fc00:a::736/126 + bp_interface: + ipv6: fc00:b::1ce/64 + + ARISTA461T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.207 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::739 + interfaces: + Loopback0: + ipv6: fc00:c:c:1cf::1/128 + Ethernet1: + ipv6: fc00:a::73a/126 + bp_interface: + ipv6: fc00:b::1cf/64 + + ARISTA462T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.208 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::73d + interfaces: + Loopback0: + ipv6: fc00:c:c:1d0::1/128 + Ethernet1: + ipv6: fc00:a::73e/126 + bp_interface: + ipv6: fc00:b::1d0/64 + + ARISTA463T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.209 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::741 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d1::1/128 + Ethernet1: + ipv6: fc00:a::742/126 + bp_interface: + ipv6: fc00:b::1d1/64 + + ARISTA464T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.210 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::745 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d2::1/128 + Ethernet1: + ipv6: fc00:a::746/126 + bp_interface: + ipv6: fc00:b::1d2/64 + + ARISTA465T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.211 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::749 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d3::1/128 + Ethernet1: + ipv6: fc00:a::74a/126 + bp_interface: + ipv6: fc00:b::1d3/64 + + ARISTA466T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.212 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::74d + interfaces: + Loopback0: + ipv6: fc00:c:c:1d4::1/128 + Ethernet1: + ipv6: fc00:a::74e/126 + bp_interface: + ipv6: fc00:b::1d4/64 + + ARISTA467T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.213 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::751 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d5::1/128 + Ethernet1: + ipv6: fc00:a::752/126 + bp_interface: + ipv6: fc00:b::1d5/64 + + ARISTA468T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.214 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::755 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d6::1/128 + Ethernet1: + ipv6: fc00:a::756/126 + bp_interface: + ipv6: fc00:b::1d6/64 + + ARISTA469T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.215 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::759 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d7::1/128 + Ethernet1: + ipv6: fc00:a::75a/126 + bp_interface: + ipv6: fc00:b::1d7/64 + + ARISTA470T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.216 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::75d + interfaces: + Loopback0: + ipv6: fc00:c:c:1d8::1/128 + Ethernet1: + ipv6: fc00:a::75e/126 + bp_interface: + ipv6: fc00:b::1d8/64 + + ARISTA471T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.217 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::761 + interfaces: + Loopback0: + ipv6: fc00:c:c:1d9::1/128 + Ethernet1: + ipv6: fc00:a::762/126 + bp_interface: + ipv6: fc00:b::1d9/64 + + ARISTA472T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.218 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::765 + interfaces: + Loopback0: + ipv6: fc00:c:c:1da::1/128 + Ethernet1: + ipv6: fc00:a::766/126 + bp_interface: + ipv6: fc00:b::1da/64 + + ARISTA473T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.219 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::769 + interfaces: + Loopback0: + ipv6: fc00:c:c:1db::1/128 + Ethernet1: + ipv6: fc00:a::76a/126 + bp_interface: + ipv6: fc00:b::1db/64 + + ARISTA474T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.220 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::76d + interfaces: + Loopback0: + ipv6: fc00:c:c:1dc::1/128 + Ethernet1: + ipv6: fc00:a::76e/126 + bp_interface: + ipv6: fc00:b::1dc/64 + + ARISTA475T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.221 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::771 + interfaces: + Loopback0: + ipv6: fc00:c:c:1dd::1/128 + Ethernet1: + ipv6: fc00:a::772/126 + bp_interface: + ipv6: fc00:b::1dd/64 + + ARISTA476T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.222 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::775 + interfaces: + Loopback0: + ipv6: fc00:c:c:1de::1/128 + Ethernet1: + ipv6: fc00:a::776/126 + bp_interface: + ipv6: fc00:b::1de/64 + + ARISTA477T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.223 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::779 + interfaces: + Loopback0: + ipv6: fc00:c:c:1df::1/128 + Ethernet1: + ipv6: fc00:a::77a/126 + bp_interface: + ipv6: fc00:b::1df/64 + + ARISTA478T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.224 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::77d + interfaces: + Loopback0: + ipv6: fc00:c:c:1e0::1/128 + Ethernet1: + ipv6: fc00:a::77e/126 + bp_interface: + ipv6: fc00:b::1e0/64 + + ARISTA479T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.225 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::781 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e1::1/128 + Ethernet1: + ipv6: fc00:a::782/126 + bp_interface: + ipv6: fc00:b::1e1/64 + + ARISTA480T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.226 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::785 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e2::1/128 + Ethernet1: + ipv6: fc00:a::786/126 + bp_interface: + ipv6: fc00:b::1e2/64 + + ARISTA481T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.227 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::789 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e3::1/128 + Ethernet1: + ipv6: fc00:a::78a/126 + bp_interface: + ipv6: fc00:b::1e3/64 + + ARISTA482T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.228 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::78d + interfaces: + Loopback0: + ipv6: fc00:c:c:1e4::1/128 + Ethernet1: + ipv6: fc00:a::78e/126 + bp_interface: + ipv6: fc00:b::1e4/64 + + ARISTA483T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.229 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::791 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e5::1/128 + Ethernet1: + ipv6: fc00:a::792/126 + bp_interface: + ipv6: fc00:b::1e5/64 + + ARISTA484T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.230 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::795 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e6::1/128 + Ethernet1: + ipv6: fc00:a::796/126 + bp_interface: + ipv6: fc00:b::1e6/64 + + ARISTA485T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.231 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::799 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e7::1/128 + Ethernet1: + ipv6: fc00:a::79a/126 + bp_interface: + ipv6: fc00:b::1e7/64 + + ARISTA486T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.232 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::79d + interfaces: + Loopback0: + ipv6: fc00:c:c:1e8::1/128 + Ethernet1: + ipv6: fc00:a::79e/126 + bp_interface: + ipv6: fc00:b::1e8/64 + + ARISTA487T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.233 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7a1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1e9::1/128 + Ethernet1: + ipv6: fc00:a::7a2/126 + bp_interface: + ipv6: fc00:b::1e9/64 + + ARISTA488T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.234 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7a5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ea::1/128 + Ethernet1: + ipv6: fc00:a::7a6/126 + bp_interface: + ipv6: fc00:b::1ea/64 + + ARISTA489T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.235 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7a9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1eb::1/128 + Ethernet1: + ipv6: fc00:a::7aa/126 + bp_interface: + ipv6: fc00:b::1eb/64 + + ARISTA490T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.236 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7ad + interfaces: + Loopback0: + ipv6: fc00:c:c:1ec::1/128 + Ethernet1: + ipv6: fc00:a::7ae/126 + bp_interface: + ipv6: fc00:b::1ec/64 + + ARISTA491T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.237 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7b1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ed::1/128 + Ethernet1: + ipv6: fc00:a::7b2/126 + bp_interface: + ipv6: fc00:b::1ed/64 + + ARISTA492T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.238 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7b5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ee::1/128 + Ethernet1: + ipv6: fc00:a::7b6/126 + bp_interface: + ipv6: fc00:b::1ee/64 + + ARISTA493T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.239 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7b9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ef::1/128 + Ethernet1: + ipv6: fc00:a::7ba/126 + bp_interface: + ipv6: fc00:b::1ef/64 + + ARISTA494T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.240 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7bd + interfaces: + Loopback0: + ipv6: fc00:c:c:1f0::1/128 + Ethernet1: + ipv6: fc00:a::7be/126 + bp_interface: + ipv6: fc00:b::1f0/64 + + ARISTA495T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.241 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7c1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f1::1/128 + Ethernet1: + ipv6: fc00:a::7c2/126 + bp_interface: + ipv6: fc00:b::1f1/64 + + ARISTA496T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.242 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7c5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f2::1/128 + Ethernet1: + ipv6: fc00:a::7c6/126 + bp_interface: + ipv6: fc00:b::1f2/64 + + ARISTA497T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.243 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7c9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f3::1/128 + Ethernet1: + ipv6: fc00:a::7ca/126 + bp_interface: + ipv6: fc00:b::1f3/64 + + ARISTA498T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.244 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7cd + interfaces: + Loopback0: + ipv6: fc00:c:c:1f4::1/128 + Ethernet1: + ipv6: fc00:a::7ce/126 + bp_interface: + ipv6: fc00:b::1f4/64 + + ARISTA499T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.245 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7d1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f5::1/128 + Ethernet1: + ipv6: fc00:a::7d2/126 + bp_interface: + ipv6: fc00:b::1f5/64 + + ARISTA500T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.246 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7d5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f6::1/128 + Ethernet1: + ipv6: fc00:a::7d6/126 + bp_interface: + ipv6: fc00:b::1f6/64 + + ARISTA501T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.247 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7d9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f7::1/128 + Ethernet1: + ipv6: fc00:a::7da/126 + bp_interface: + ipv6: fc00:b::1f7/64 + + ARISTA502T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.248 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7dd + interfaces: + Loopback0: + ipv6: fc00:c:c:1f8::1/128 + Ethernet1: + ipv6: fc00:a::7de/126 + bp_interface: + ipv6: fc00:b::1f8/64 + + ARISTA503T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.249 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7e1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1f9::1/128 + Ethernet1: + ipv6: fc00:a::7e2/126 + bp_interface: + ipv6: fc00:b::1f9/64 + + ARISTA504T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.250 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7e5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fa::1/128 + Ethernet1: + ipv6: fc00:a::7e6/126 + bp_interface: + ipv6: fc00:b::1fa/64 + + ARISTA505T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.251 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7e9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fb::1/128 + Ethernet1: + ipv6: fc00:a::7ea/126 + bp_interface: + ipv6: fc00:b::1fb/64 + + ARISTA506T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.252 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7ed + interfaces: + Loopback0: + ipv6: fc00:c:c:1fc::1/128 + Ethernet1: + ipv6: fc00:a::7ee/126 + bp_interface: + ipv6: fc00:b::1fc/64 + + ARISTA507T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.253 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7f1 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fd::1/128 + Ethernet1: + ipv6: fc00:a::7f2/126 + bp_interface: + ipv6: fc00:b::1fd/64 + + ARISTA508T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.254 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7f5 + interfaces: + Loopback0: + ipv6: fc00:c:c:1fe::1/128 + Ethernet1: + ipv6: fc00:a::7f6/126 + bp_interface: + ipv6: fc00:b::1fe/64 + + ARISTA509T0: + properties: + - common + - tor + bgp: + router-id: 0.12.1.255 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7f9 + interfaces: + Loopback0: + ipv6: fc00:c:c:1ff::1/128 + Ethernet1: + ipv6: fc00:a::7fa/126 + bp_interface: + ipv6: fc00:b::1ff/64 + + ARISTA510T0: + properties: + - common + - tor + bgp: + router-id: 0.12.2.0 + asn: 4200000000 + peers: + 4200100000: + - fc00:a::7fd + interfaces: + Loopback0: + ipv6: fc00:c:c:200::1/128 + Ethernet1: + ipv6: fc00:a::7fe/126 + bp_interface: + ipv6: fc00:b::200/64 From ea1108a414688d239be633b93b9635832c016376 Mon Sep 17 00:00:00 2001 From: Zhijian Li Date: Wed, 20 Nov 2024 19:20:31 -0800 Subject: [PATCH 136/175] [M0/Mx] Fix test_route_flap (#15641) PR #14804 caused test_route_flap fail on M0/Mx with below error: > pytest.fail("Did not find a dut in duthosts that for topo type {} that has upstream nbr type {}". format(tbinfo["topo"]["type"], upstream_nbr_type)) E Failed: Did not find a dut in duthosts that for topo type m0 that has upstream nbr type T3 What is the motivation for this PR? Fix test_route_flap on M0/Mx topo. How did you do it? Support upstream neighbor on M0/Mx. How did you verify/test it? Verified on Nokia-7215 M0. Verified on Arista-720DT M0. --- tests/conftest.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 4885d240aaa..b8b4e0c15c0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1875,7 +1875,11 @@ def enum_rand_one_frontend_asic_index(request): @pytest.fixture(scope='module') def enum_upstream_dut_hostname(duthosts, tbinfo): - if tbinfo["topo"]["type"] == "t0": + if tbinfo["topo"]["type"] == "m0": + upstream_nbr_type = "M1" + elif tbinfo["topo"]["type"] == "mx": + upstream_nbr_type = "M0" + elif tbinfo["topo"]["type"] == "t0": upstream_nbr_type = "T1" elif tbinfo["topo"]["type"] == "t1": upstream_nbr_type = "T2" From b038a6328e9b19324a591b4233148086bf863993 Mon Sep 17 00:00:00 2001 From: Chris <156943338+ccroy-arista@users.noreply.github.com> Date: Wed, 20 Nov 2024 20:08:00 -0800 Subject: [PATCH 137/175] sonic-mgmt: fix port toggle timeout on many ports (#15573) For topologies leveraging many ports, such as in the case of t0-isolated-d128u128s2, the timeout for non-mellanox fixed-chassis devices is a static value and is too low for the number of ports being configured. In contrast, Mellanox devices use a timeout proportional to the number of ports being toggled. This change moves fixed-chassis broadcom devices to use a proportional timeout as well. --- tests/common/port_toggle.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/common/port_toggle.py b/tests/common/port_toggle.py index 890288394b7..d387c7229d4 100644 --- a/tests/common/port_toggle.py +++ b/tests/common/port_toggle.py @@ -121,14 +121,16 @@ def default_port_toggle_wait_time(duthost, port_count): port_down_wait_time, port_up_wait_time = 120, 180 asic_type = duthost.facts["asic_type"] - if asic_type == "mellanox": + is_modular_chassis = duthost.get_facts().get("modular_chassis") + + if (asic_type == "mellanox") or (asic_type == "broadcom" and not is_modular_chassis): if port_count <= BASE_PORT_COUNT: port_count = BASE_PORT_COUNT port_count_factor = port_count / BASE_PORT_COUNT port_down_wait_time = int(port_down_wait_time * port_count_factor) port_up_wait_time = int(port_up_wait_time * port_count_factor) - elif duthost.get_facts().get("modular_chassis"): + elif is_modular_chassis: port_down_wait_time = 300 port_up_wait_time = 300 From f265a734f5c77838b336a58e1bd99a4845a03fa0 Mon Sep 17 00:00:00 2001 From: vkjammala-arista <152394203+vkjammala-arista@users.noreply.github.com> Date: Thu, 21 Nov 2024 22:11:00 +0530 Subject: [PATCH 138/175] [sonic-mgmt] Fix "enum_dut_lossy_prio_with_completeness_level" collection failure (#15626) PR#15057 has introduced logic to select dut queue priority list based on the completeness_level. If completeness_level is "debug", we are selecting one queue priority randomly from the dut priority list which can be empty also, and this will cause "ValueError: Sample larger than population or is negative". --- tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index b8b4e0c15c0..bff93f580c3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1550,7 +1550,7 @@ def generate_priority_lists(request, prio_scope, with_completeness_level=False): # if completeness_level in ["debug"], only select one item # if completeness_level in ["basic", "confident"], select 1 priority per DUT - if completeness_level in ["debug"]: + if completeness_level in ["debug"] and ret: ret = random.sample(ret, 1) elif completeness_level in ["basic", "confident"]: ret = [] From 0bcec187d50fbe07d3abef4aad426b2910d0744e Mon Sep 17 00:00:00 2001 From: Chun'ang Li <39114813+lerry-lee@users.noreply.github.com> Date: Fri, 22 Nov 2024 09:41:46 +0800 Subject: [PATCH 139/175] [CI]Add new parameter retry_cases_include and retry_cases_exclude to template for Elastictest specific retry (#15635) What is the motivation for this PR? [CI]Add new parameter retry_cases_include and retry_cases_exclude to template for Elastictest specific retry. How did you do it? Add new parameters retry_cases_include and retry_cases_exclude and set default value. How did you verify/test it? Add new parameters wouldn't block current normal runnings. This PR test need passed. Signed-off-by: Chun'ang Li --- .../run-test-elastictest-template.yml | 13 ++++++++++ .azure-pipelines/test_plan.py | 26 +++++++++++++++++++ azure-pipelines.yml | 20 +++++++------- 3 files changed, 49 insertions(+), 10 deletions(-) diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml index 740bbc8db7b..4d1092e50eb 100644 --- a/.azure-pipelines/run-test-elastictest-template.yml +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -115,10 +115,21 @@ parameters: type: string default: "" + # The number of retries when the script fails. Global retry if retry_cases_include and retry_cases_exclude are both empty, otherwise specific retry - name: RETRY_TIMES type: string default: "" + # Retry cases to include, works when retry_times>0, support both feature and script level, such as "bgp,test_features.py" + - name: RETRY_CASES_INCLUDE + type: string + default: "" + + # Retry cases to exclude, works when retry_times>0, support both feature and script level, such as "bgp,test_features.py" + - name: RETRY_CASES_EXCLUDE + type: string + default: "" + - name: DUMP_KVM_IF_FAIL type: string default: "False" # KVM dump has beed deleted @@ -248,6 +259,8 @@ steps: --mgmt-branch ${{ parameters.MGMT_BRANCH }} \ --stop-on-failure ${{ parameters.STOP_ON_FAILURE }} \ --retry-times ${{ parameters.RETRY_TIMES }} \ + --retry-cases-include ${{ parameters.RETRY_CASES_INCLUDE }} \ + --retry-cases-exclude ${{ parameters.RETRY_CASES_EXCLUDE }} \ --dump-kvm-if-fail ${{ parameters.DUMP_KVM_IF_FAIL }} \ --requester "${{ parameters.REQUESTER }}" \ --max-execute-seconds $((${{ parameters.MAX_RUN_TEST_MINUTES }} * 60)) \ diff --git a/.azure-pipelines/test_plan.py b/.azure-pipelines/test_plan.py index f4b07bb2d18..4052be78e3d 100644 --- a/.azure-pipelines/test_plan.py +++ b/.azure-pipelines/test_plan.py @@ -227,6 +227,8 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params features = parse_list_from_str(kwargs.get("features", None)) scripts_exclude = parse_list_from_str(kwargs.get("scripts_exclude", None)) features_exclude = parse_list_from_str(kwargs.get("features_exclude", None)) + retry_cases_include = parse_list_from_str(kwargs.get("retry_cases_include", None)) + retry_cases_exclude = parse_list_from_str(kwargs.get("retry_cases_exclude", None)) ptf_image_tag = kwargs.get("ptf_image_tag", None) print("Creating test plan, topology: {}, name: {}, build info:{} {} {}".format(topology, test_plan_name, @@ -284,6 +286,8 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params "test_option": { "stop_on_failure": kwargs.get("stop_on_failure", True), "retry_times": kwargs.get("retry_times", 2), + "retry_cases_include": retry_cases_include, + "retry_cases_exclude": retry_cases_exclude, "test_cases": { "features": features, "scripts": scripts, @@ -829,6 +833,26 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte required=False, help="Retry times after tests failed." ) + parser_create.add_argument( + "--retry-cases-include", + type=str, + dest="retry_cases_include", + nargs='?', + const=None, + default=None, + required=False, + help="Include testcases to retry, support feature/script. Split by ',', like: 'bgp, lldp, ecmp/test_fgnhg.py'" + ) + parser_create.add_argument( + "--retry-cases-exclude", + type=str, + dest="retry_cases_exclude", + nargs='?', + const=None, + default=None, + required=False, + help="Exclude testcases to retry, support feature/script. Split by ',', like: 'bgp, lldp, ecmp/test_fgnhg.py'" + ) parser_create.add_argument( "--dump-kvm-if-fail", type=ast.literal_eval, @@ -1022,6 +1046,8 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte platform=args.platform, stop_on_failure=args.stop_on_failure, retry_times=args.retry_times, + retry_cases_include=args.retry_cases_include, + retry_cases_exclude=args.retry_cases_exclude, dump_kvm_if_fail=args.dump_kvm_if_fail, requester=args.requester, max_execute_seconds=args.max_execute_seconds, diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 76cacd39c1d..bd19abd9c7a 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -80,7 +80,7 @@ stages: MIN_WORKER: $(T0_INSTANCE_NUM) MAX_WORKER: $(T0_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: t0_2vlans_elastictest displayName: "kvmtest-t0-2vlans by Elastictest" @@ -96,7 +96,7 @@ stages: MAX_WORKER: $(T0_2VLANS_INSTANCE_NUM) DEPLOY_MG_EXTRA_PARAMS: "-e vlan_config=two_vlan_a" KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: t1_lag_elastictest displayName: "kvmtest-t1-lag by Elastictest" @@ -110,7 +110,7 @@ stages: MIN_WORKER: $(T1_LAG_INSTANCE_NUM) MAX_WORKER: $(T1_LAG_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: dualtor_elastictest displayName: "kvmtest-dualtor-t0 by Elastictest" @@ -125,7 +125,7 @@ stages: MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM) COMMON_EXTRA_PARAMS: "--disable_loganalyzer " KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: multi_asic_elastictest displayName: "kvmtest-multi-asic-t1-lag by Elastictest" @@ -141,7 +141,7 @@ stages: MAX_WORKER: $(MULTI_ASIC_INSTANCE_NUM) NUM_ASIC: 4 KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: sonic_t0_elastictest displayName: "kvmtest-t0-sonic by Elastictest" @@ -158,7 +158,7 @@ stages: COMMON_EXTRA_PARAMS: "--neighbor_type=sonic " VM_TYPE: vsonic KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: dpu_elastictest displayName: "kvmtest-dpu by Elastictest" @@ -172,7 +172,7 @@ stages: MIN_WORKER: $(T0_SONIC_INSTANCE_NUM) MAX_WORKER: $(T0_SONIC_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" - job: onboarding_elastictest_t0 displayName: "onboarding t0 testcases by Elastictest - optional" @@ -188,7 +188,7 @@ stages: MIN_WORKER: $(T0_ONBOARDING_SONIC_INSTANCE_NUM) MAX_WORKER: $(T0_ONBOARDING_SONIC_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" TEST_SET: onboarding_t0 - job: onboarding_elastictest_t1 @@ -205,7 +205,7 @@ stages: MIN_WORKER: $(T1_LAG_ONBOARDING_INSTANCE_NUM) MAX_WORKER: $(T1_LAG_ONBOARDING_INSTANCE_NUM) KVM_IMAGE_BRANCH: $(BUILD_BRANCH) - MGMT_BRANCH: $(BUILD_BRANCH) + MGMT_BRANCH: "master" TEST_SET: onboarding_t1 # - job: onboarding_elastictest_dualtor @@ -222,7 +222,7 @@ stages: # MIN_WORKER: $(T0_DUALTOR_INSTANCE_NUM) # MAX_WORKER: $(T0_DUALTOR_INSTANCE_NUM) # KVM_IMAGE_BRANCH: $(BUILD_BRANCH) -# MGMT_BRANCH: $(BUILD_BRANCH) +# MGMT_BRANCH: "master" # TEST_SET: onboarding_dualtor # - job: wan_elastictest From 579f7ba37abdfd4b5a0d32ac7f9d41c52e94e776 Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 22 Nov 2024 09:42:47 +0800 Subject: [PATCH 140/175] Refactor the shared scripts under `configlet` to a common place. (#15606) The functions in the script tests/configlet/util/common.py were being used by common utilities, resulting in cross-feature dependencies. To address this issue and improve code modularity, we refactored and relocated the script to tests/common/configlet/utils.py. Similarly, the helper.py script under tests/configlet/util, which also contained shared functions, was refactored and moved to tests/common/configlet to align with the updated structure and reduce cross-feature dependencies. --- tests/common/config_reload.py | 2 +- tests/common/configlet/__init__.py | 0 .../{configlet/util => common/configlet}/helpers.py | 0 .../util/common.py => common/configlet/utils.py} | 0 tests/configlet/test_add_rack.py | 2 +- tests/configlet/util/base_test.py | 13 +++++++------ tests/configlet/util/configlet.py | 6 +++--- tests/configlet/util/generic_patch.py | 2 +- tests/configlet/util/mock_for_switch.py | 2 +- tests/configlet/util/run_test_in_switch.py | 2 +- tests/configlet/util/strip.py | 4 ++-- 11 files changed, 17 insertions(+), 16 deletions(-) create mode 100644 tests/common/configlet/__init__.py rename tests/{configlet/util => common/configlet}/helpers.py (100%) rename tests/{configlet/util/common.py => common/configlet/utils.py} (100%) mode change 100755 => 100644 diff --git a/tests/common/config_reload.py b/tests/common/config_reload.py index b6e2542bece..5916a63b2bf 100644 --- a/tests/common/config_reload.py +++ b/tests/common/config_reload.py @@ -6,7 +6,7 @@ from tests.common.plugins.loganalyzer.utils import ignore_loganalyzer from tests.common.platform.processes_utils import wait_critical_processes from tests.common.utilities import wait_until -from tests.configlet.util.common import chk_for_pfc_wd +from tests.common.configlet.utils import chk_for_pfc_wd from tests.common.platform.interface_utils import check_interface_status_of_up_ports from tests.common.helpers.dut_utils import ignore_t2_syslog_msgs diff --git a/tests/common/configlet/__init__.py b/tests/common/configlet/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/configlet/util/helpers.py b/tests/common/configlet/helpers.py similarity index 100% rename from tests/configlet/util/helpers.py rename to tests/common/configlet/helpers.py diff --git a/tests/configlet/util/common.py b/tests/common/configlet/utils.py old mode 100755 new mode 100644 similarity index 100% rename from tests/configlet/util/common.py rename to tests/common/configlet/utils.py diff --git a/tests/configlet/test_add_rack.py b/tests/configlet/test_add_rack.py index a2b66d716af..32e568ee529 100644 --- a/tests/configlet/test_add_rack.py +++ b/tests/configlet/test_add_rack.py @@ -4,7 +4,7 @@ import sys from tests.common.utilities import skip_release from .util.base_test import do_test_add_rack, backup_minigraph, restore_orig_minigraph -from .util.helpers import log_info +from tests.common.configlet.helpers import log_info pytestmark = [ pytest.mark.topology("t1") diff --git a/tests/configlet/util/base_test.py b/tests/configlet/util/base_test.py index 60bc3024307..32f60172bbe 100644 --- a/tests/configlet/util/base_test.py +++ b/tests/configlet/util/base_test.py @@ -3,15 +3,16 @@ import json import os -from helpers import set_log_prefix_msg, get_prefix_lvl, set_prefix_lvl, append_log_prefix_msg,\ +from tests.configlet.util import strip +from tests.configlet.util import generic_patch +from tests.configlet.util import configlet +from tests.common.configlet.helpers import set_log_prefix_msg, get_prefix_lvl, set_prefix_lvl, append_log_prefix_msg,\ log_info, log_debug -from common import base_dir, data_dir, orig_db_dir, no_t0_db_dir, clet_db_dir, managed_files,\ - patch_add_t0_dir, patch_rm_t0_dir, files_dir, tor_data, init_data,\ +from tests.common.configlet.utils import base_dir, data_dir, orig_db_dir, no_t0_db_dir, clet_db_dir, managed_files,\ + patch_add_t0_dir, patch_rm_t0_dir, files_dir, tor_data, init_data, \ RELOAD_WAIT_TIME, PAUSE_INTF_DOWN, PAUSE_INTF_UP, PAUSE_CLET_APPLY, DB_COMP_WAIT_TIME,\ do_pause, db_comp, chk_bgp_session, chk_for_pfc_wd, report_error, take_DB_dumps, init_global_data -import strip -import configlet -import generic_patch + if os.path.exists("/etc/sonic/sonic-environment"): from mock_for_switch import config_reload, wait_until diff --git a/tests/configlet/util/configlet.py b/tests/configlet/util/configlet.py index b03b55cfd58..48348c17369 100755 --- a/tests/configlet/util/configlet.py +++ b/tests/configlet/util/configlet.py @@ -3,10 +3,10 @@ import json from tempfile import mkstemp -from helpers import log_info, log_debug -from common import tor_data, init_data, config_db_data_orig, managed_files # noqa F401 +from tests.common.configlet.helpers import log_info, log_debug +from tests.common.configlet.utils import tor_data, init_data, config_db_data_orig, managed_files # noqa F401 -import strip +from tests.configlet.util import strip orig_config = None diff --git a/tests/configlet/util/generic_patch.py b/tests/configlet/util/generic_patch.py index f734f1ff835..ce0833368fe 100644 --- a/tests/configlet/util/generic_patch.py +++ b/tests/configlet/util/generic_patch.py @@ -6,7 +6,7 @@ import os import re -from common import orig_db_dir, no_t0_db_dir, patch_add_t0_dir, patch_rm_t0_dir, tor_data,\ +from tests.common.configlet.utils import orig_db_dir, no_t0_db_dir, patch_add_t0_dir, patch_rm_t0_dir, tor_data,\ RELOAD_WAIT_TIME, PAUSE_INTF_DOWN, PAUSE_INTF_UP, PAUSE_CLET_APPLY, DB_COMP_WAIT_TIME,\ do_pause, db_comp, chk_bgp_session diff --git a/tests/configlet/util/mock_for_switch.py b/tests/configlet/util/mock_for_switch.py index df13b3b420a..7170a3162c9 100644 --- a/tests/configlet/util/mock_for_switch.py +++ b/tests/configlet/util/mock_for_switch.py @@ -11,7 +11,7 @@ import time import traceback -from helpers import log_error, log_info, log_debug +from tests.common.configlet.helpers import log_error, log_info, log_debug class DutHost: diff --git a/tests/configlet/util/run_test_in_switch.py b/tests/configlet/util/run_test_in_switch.py index fc88d0450d5..1c61da8b623 100644 --- a/tests/configlet/util/run_test_in_switch.py +++ b/tests/configlet/util/run_test_in_switch.py @@ -8,7 +8,7 @@ from mock_for_switch import get_duthost from base_test import do_test_add_rack, backup_minigraph, restore_orig_minigraph -from helpers import log_error, set_print +from tests.common.configlet.helpers import log_error, set_print # To run test in switch: # Copy all files in this dir (tests/configlet/util) into switch diff --git a/tests/configlet/util/strip.py b/tests/configlet/util/strip.py index 5a7eb139177..8ac435682b9 100755 --- a/tests/configlet/util/strip.py +++ b/tests/configlet/util/strip.py @@ -4,8 +4,8 @@ import sys import xml.etree.ElementTree as ET -from helpers import log_info, log_debug -from common import tor_data, config_db_data_orig, managed_files, report_error # noqa F401 +from tests.common.configlet.helpers import log_info, log_debug +from tests.common.configlet.utils import tor_data, config_db_data_orig, managed_files, report_error # noqa F401 from tempfile import mkstemp ns_val = "Microsoft.Search.Autopilot.Evolution" From e1b82f39ec507660ab5e9b50d75593f30c5c9654 Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 22 Nov 2024 09:44:49 +0800 Subject: [PATCH 141/175] [Bugfix] Del wrong condition of case qos/test_qos_sai.py::TestQosSai::testQosSaiPgSharedWatermark[None-wm_pg_shared_lossy] (#15660) --- tests/common/plugins/conditional_mark/tests_mark_conditions.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index cd5255da248..3ed679e9de3 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1571,7 +1571,6 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiPgSharedWatermark[None-wm_pg_shared_l reason: "Image issue on Arista platforms / Unsupported testbed type." conditions: - "platform in ['x86_64-arista_7050cx3_32s']" - - "topo_name not in ['t0', 't0-64', 't0-116', 't0-35', 't0-56', 't0-standalone-32', 't0-standalone-64', 't0-standalone-128', 't0-standalone-256', 'dualtor-56', 'dualtor-120', 'dualtor', 't0-80', 't0-backend', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 't2', 't2_2lc_36p-masic', 't2_2lc_min_ports-masic'] and asic_type not in ['mellanox']" qos/test_qos_sai.py::TestQosSai::testQosSaiQWatermarkAllPorts: skip: From 475f52f47fe2e8ecb9d786d3194e1c04fbcc883c Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 22 Nov 2024 09:47:46 +0800 Subject: [PATCH 142/175] Move fixture `platform_api_conn` to common place. (#15605) What is the motivation for this PR? The platform_api_conn fixture is utilized by scripts in both the smatswitch and platform_tests directories. To reduce cross-feature dependencies and improve code organization, I relocated the fixture to a shared common location, making it accessible without creating unnecessary interdependencies between these feature-specific directories. How did you do it? I relocated the fixture platform_api_conn to a shared common location --- tests/common/platform/device_utils.py | 13 +++++ tests/platform_tests/api/conftest.py | 13 ----- tests/platform_tests/api/test_chassis.py | 52 ++++++++++--------- tests/platform_tests/api/test_component.py | 39 +++++++------- tests/platform_tests/api/test_fan_drawer.py | 26 +++++----- tests/platform_tests/api/test_psu.py | 33 ++++++------ tests/platform_tests/api/test_psu_fans.py | 34 ++++++------ tests/platform_tests/api/test_watchdog.py | 21 +++++--- tests/smartswitch/common/device_utils_dpu.py | 16 +++--- .../platform_tests/test_reload_dpu.py | 8 ++- .../platform_tests/test_show_platform_dpu.py | 11 ++-- 11 files changed, 134 insertions(+), 132 deletions(-) diff --git a/tests/common/platform/device_utils.py b/tests/common/platform/device_utils.py index 6676b2f6afa..b74cf94e908 100644 --- a/tests/common/platform/device_utils.py +++ b/tests/common/platform/device_utils.py @@ -6,6 +6,7 @@ import os import json import glob +import http.client from datetime import datetime from collections import OrderedDict from tests.common.utilities import wait_until @@ -938,3 +939,15 @@ def advanceboot_neighbor_restore(duthosts, enum_rand_one_per_hwsku_frontend_host duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] from tests.common.plugins.sanity_check.recover import neighbor_vm_restore neighbor_vm_restore(duthost, nbrhosts, tbinfo) + + +@pytest.fixture(scope='function') +def platform_api_conn(duthosts, enum_rand_one_per_hwsku_hostname, start_platform_api_service): + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + dut_ip = duthost.mgmt_ip + + conn = http.client.HTTPConnection(dut_ip, 8000) + try: + yield conn + finally: + conn.close() diff --git a/tests/platform_tests/api/conftest.py b/tests/platform_tests/api/conftest.py index a6471834a8b..5fc3640ffa9 100644 --- a/tests/platform_tests/api/conftest.py +++ b/tests/platform_tests/api/conftest.py @@ -1,6 +1,5 @@ import os import pytest -import http.client from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer @@ -86,18 +85,6 @@ def stop_platform_api_service(duthosts): duthost.command(IPTABLES_DELETE_RULE_CMD, module_ignore_errors=True) -@pytest.fixture(scope='function') -def platform_api_conn(duthosts, enum_rand_one_per_hwsku_hostname, start_platform_api_service): - duthost = duthosts[enum_rand_one_per_hwsku_hostname] - dut_ip = duthost.mgmt_ip - - conn = http.client.HTTPConnection(dut_ip, SERVER_PORT) - try: - yield conn - finally: - conn.close() - - @pytest.fixture(autouse=True) def check_not_implemented_warnings(duthosts, enum_rand_one_per_hwsku_hostname): duthost = duthosts[enum_rand_one_per_hwsku_hostname] diff --git a/tests/platform_tests/api/test_chassis.py b/tests/platform_tests/api/test_chassis.py index 7f823331466..6ad2a1b2f43 100644 --- a/tests/platform_tests/api/test_chassis.py +++ b/tests/platform_tests/api/test_chassis.py @@ -11,6 +11,7 @@ from tests.common.utilities import get_host_visible_vars from tests.common.utilities import skip_release from tests.common.platform.interface_utils import get_physical_port_indices +from tests.common.platform.device_utils import platform_api_conn # noqa F401 from tests.platform_tests.cli.util import get_skip_mod_list from .platform_api_test_base import PlatformApiTestBase @@ -121,53 +122,53 @@ def compare_value_with_device_facts(self, duthost, key, value, case_sensitive=Tr # Functions to test methods inherited from DeviceBase class # - def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] name = chassis.get_name(platform_api_conn) pytest_assert(name is not None, "Unable to retrieve chassis name") pytest_assert(isinstance(name, STRING_TYPE), "Chassis name appears incorrect") self.compare_value_with_platform_facts(duthost, 'name', name) - def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 presence = chassis.get_presence(platform_api_conn) pytest_assert(presence is not None, "Unable to retrieve chassis presence") pytest_assert(isinstance(presence, bool), "Chassis presence appears incorrect") # Chassis should always be present pytest_assert(presence is True, "Chassis is not present") - def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] model = chassis.get_model(platform_api_conn) pytest_assert(model is not None, "Unable to retrieve chassis model") pytest_assert(isinstance(model, STRING_TYPE), "Chassis model appears incorrect") self.compare_value_with_device_facts(duthost, 'model', model) - def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] serial = chassis.get_serial(platform_api_conn) pytest_assert(serial is not None, "Unable to retrieve chassis serial number") pytest_assert(isinstance(serial, STRING_TYPE), "Chassis serial number appears incorrect") self.compare_value_with_device_facts(duthost, 'serial', serial) - def test_get_revision(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_revision(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release(duthost, ["201811", "201911", "202012"]) revision = chassis.get_revision(platform_api_conn) pytest_assert(revision is not None, "Unable to retrieve chassis revision") pytest_assert(isinstance(revision, STRING_TYPE), "Revision appears incorrect") - def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 status = chassis.get_status(platform_api_conn) pytest_assert(status is not None, "Unable to retrieve chassis status") pytest_assert(isinstance(status, bool), "Chassis status appears incorrect") - def test_get_position_in_parent(self, platform_api_conn): + def test_get_position_in_parent(self, platform_api_conn): # noqa F811 position = chassis.get_position_in_parent(platform_api_conn) if self.expect(position is not None, "Failed to perform get_position_in_parent"): self.expect(isinstance(position, int), "Position value must be an integer value") self.assert_expectations() - def test_is_replaceable(self, platform_api_conn): + def test_is_replaceable(self, platform_api_conn): # noqa F811 replaceable = chassis.is_replaceable(platform_api_conn) if self.expect(replaceable is not None, "Failed to perform is_replaceable"): self.expect(isinstance(replaceable, bool), "Replaceable value must be a bool value") @@ -177,7 +178,7 @@ def test_is_replaceable(self, platform_api_conn): # Functions to test methods defined in ChassisBase class # - def test_get_base_mac(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_base_mac(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 # Ensure the base MAC address is sane duthost = duthosts[enum_rand_one_per_hwsku_hostname] base_mac = chassis.get_base_mac(platform_api_conn) @@ -185,7 +186,8 @@ def test_get_base_mac(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos pytest_assert(re.match(REGEX_MAC_ADDRESS, base_mac), "Base MAC address appears to be incorrect") self.compare_value_with_device_facts(duthost, 'base_mac', base_mac, False) - def test_get_system_eeprom_info(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_system_eeprom_info(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 ''' Test that we can retrieve sane system EEPROM info from the DUT via the platform API ''' # OCP ONIE TlvInfo EEPROM type codes defined here: @@ -258,7 +260,8 @@ def test_get_system_eeprom_info(self, duthosts, enum_rand_one_per_hwsku_hostname format(field, syseeprom_info_dict[field], expected_syseeprom_info_dict[field], duthost.hostname)) - def test_get_reboot_cause(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_reboot_cause(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 # TODO: Compare return values to potential combinations reboot_cause = chassis.get_reboot_cause(platform_api_conn) @@ -268,7 +271,7 @@ def test_get_reboot_cause(self, duthosts, enum_rand_one_per_hwsku_hostname, loca pytest_assert(isinstance(reboot_cause, list) and len(reboot_cause) == 2, "Reboot cause appears to be incorrect") - def test_components(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_components(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] try: @@ -297,7 +300,7 @@ def test_components(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, "Component {} is incorrect".format(i)) self.assert_expectations() - def test_modules(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_modules(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 try: num_modules = int(chassis.get_num_modules(platform_api_conn)) except Exception: @@ -319,7 +322,7 @@ def test_modules(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, pl self.expect(module_index == i, "Module index {} is not correct".format(module_index)) self.assert_expectations() - def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] try: num_fans = int(chassis.get_num_fans(platform_api_conn)) @@ -344,7 +347,7 @@ def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platf self.expect(fan and fan == fan_list[i], "Fan {} is incorrect".format(i)) self.assert_expectations() - def test_fan_drawers(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_fan_drawers(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] try: num_fan_drawers = int(chassis.get_num_fan_drawers(platform_api_conn)) @@ -371,7 +374,7 @@ def test_fan_drawers(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost "Fan drawer {} is incorrect".format(i)) self.assert_expectations() - def test_psus(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_psus(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] try: num_psus = int(chassis.get_num_psus(platform_api_conn)) @@ -396,7 +399,7 @@ def test_psus(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platf self.expect(psu and psu == psu_list[i], "PSU {} is incorrect".format(i)) self.assert_expectations() - def test_thermals(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_thermals(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] try: num_thermals = int(chassis.get_num_thermals(platform_api_conn)) @@ -424,7 +427,7 @@ def test_thermals(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, p self.assert_expectations() def test_sfps(self, duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn, physical_port_indices): + localhost, platform_api_conn, physical_port_indices): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] if duthost.is_supervisor_node(): pytest.skip("skipping for supervisor node") @@ -463,7 +466,7 @@ def test_sfps(self, duthosts, enum_rand_one_per_hwsku_hostname, self.expect(sfp and sfp in sfp_list, "SFP object for PORT{} NOT found".format(index)) self.assert_expectations() - def test_status_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_status_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] # TODO: Get a platform-specific list of available colors for the status LED @@ -537,19 +540,20 @@ def test_status_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_thermal_manager(self, localhost, platform_api_conn, thermal_manager_enabled): + def test_get_thermal_manager(self, localhost, platform_api_conn, thermal_manager_enabled): # noqa F811 thermal_mgr = chassis.get_thermal_manager(platform_api_conn) pytest_assert(thermal_mgr is not None, "Failed to retrieve thermal manager") - def test_get_watchdog(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_watchdog(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 watchdog = chassis.get_watchdog(platform_api_conn) pytest_assert(watchdog is not None, "Failed to retrieve watchdog") - def test_get_eeprom(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_eeprom(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 eeprom = chassis.get_eeprom(platform_api_conn) pytest_assert(eeprom is not None, "Failed to retrieve system EEPROM") - def test_get_supervisor_slot(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_supervisor_slot(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 if chassis.is_modular_chassis(platform_api_conn): sup_slot = chassis.get_supervisor_slot(platform_api_conn) pytest_assert(isinstance(sup_slot, int) or isinstance(sup_slot, STRING_TYPE), @@ -557,7 +561,7 @@ def test_get_supervisor_slot(self, duthosts, enum_rand_one_per_hwsku_hostname, l else: pytest.skip("skipped as this test is applicable to modular chassis only") - def test_get_my_slot(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_my_slot(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 if chassis.is_modular_chassis(platform_api_conn): my_slot = chassis.get_my_slot(platform_api_conn) pytest_assert(isinstance(my_slot, int) or isinstance(my_slot, STRING_TYPE), diff --git a/tests/platform_tests/api/test_component.py b/tests/platform_tests/api/test_component.py index 826be7eaa53..67f37b5c6a0 100644 --- a/tests/platform_tests/api/test_component.py +++ b/tests/platform_tests/api/test_component.py @@ -5,6 +5,7 @@ from tests.common.helpers.platform_api import chassis, component from .platform_api_test_base import PlatformApiTestBase from tests.common.utilities import skip_release_for_platform +from tests.common.platform.device_utils import platform_api_conn # noqa F401 ################################################### # TODO: Remove this after we transition to Python 3 @@ -41,7 +42,7 @@ class TestComponentApi(PlatformApiTestBase): # it relies on the platform_api_conn fixture, which is scoped at the function # level, so we must do the same here to prevent a scope mismatch. @pytest.fixture(scope="function", autouse=True) - def setup(self, platform_api_conn): + def setup(self, platform_api_conn): # noqa F811 if self.num_components is None: try: self.num_components = int(chassis.get_num_components(platform_api_conn)) @@ -73,7 +74,7 @@ def compare_value_with_platform_facts(self, duthost, key, value, component_idx): # Functions to test methods inherited from DeviceBase class # - def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] for i in range(self.num_components): @@ -83,8 +84,7 @@ def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, p self.compare_value_with_platform_facts(duthost, 'name', name, i) self.assert_expectations() - def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_components): presence = component.get_presence(platform_api_conn, i) if self.expect(presence is not None, "Component {}: Unable to retrieve presence".format(i)): @@ -93,16 +93,14 @@ def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos self.expect(presence is True, "Component {} not present".format(i)) self.assert_expectations() - def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_components): model = component.get_model(platform_api_conn, i) if self.expect(model is not None, "Component {}: Unable to retrieve model".format(i)): self.expect(isinstance(model, STRING_TYPE), "Component {}: Model appears incorrect".format(i)) self.assert_expectations() - def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_components): serial = component.get_serial(platform_api_conn, i) if self.expect(serial is not None, "Component {}: Unable to retrieve serial number".format(i)): @@ -110,15 +108,14 @@ def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, "Component {}: Serial number appears incorrect".format(i)) self.assert_expectations() - def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_components): status = component.get_status(platform_api_conn, i) if self.expect(status is not None, "Component {}: Unable to retrieve status".format(i)): self.expect(isinstance(status, bool), "Component {}: Status appears incorrect".format(i)) self.assert_expectations() - def test_get_position_in_parent(self, platform_api_conn): + def test_get_position_in_parent(self, platform_api_conn): # noqa F811 for i in range(self.num_components): position = component.get_position_in_parent(platform_api_conn, i) if self.expect(position is not None, @@ -127,7 +124,7 @@ def test_get_position_in_parent(self, platform_api_conn): "Position value must be an integer value for component {}".format(i)) self.assert_expectations() - def test_is_replaceable(self, platform_api_conn): + def test_is_replaceable(self, platform_api_conn): # noqa F811 for i in range(self.num_components): replaceable = component.is_replaceable(platform_api_conn, i) if self.expect(replaceable is not None, @@ -140,8 +137,8 @@ def test_is_replaceable(self, platform_api_conn): # Functions to test methods defined in ComponentBase class # - def test_get_description(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_description(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 for i in range(self.num_components): description = component.get_description(platform_api_conn, i) if self.expect(description is not None, "Component {}: Failed to retrieve description".format(i)): @@ -149,8 +146,8 @@ def test_get_description(self, duthosts, enum_rand_one_per_hwsku_hostname, local "Component {}: Description appears to be incorrect".format(i)) self.assert_expectations() - def test_get_firmware_version(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_firmware_version(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 for i in range(self.num_components): fw_version = component.get_firmware_version(platform_api_conn, i) if self.expect(fw_version is not None, "Component {}: Failed to retrieve firmware version".format(i)): @@ -159,7 +156,7 @@ def test_get_firmware_version(self, duthosts, enum_rand_one_per_hwsku_hostname, self.assert_expectations() def test_get_available_firmware_version(self, duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn): + localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) @@ -175,7 +172,7 @@ def test_get_available_firmware_version(self, duthosts, enum_rand_one_per_hwsku_ self.assert_expectations() def test_get_firmware_update_notification(self, duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn): + localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) @@ -188,7 +185,8 @@ def test_get_firmware_update_notification(self, duthosts, enum_rand_one_per_hwsk "Component {}: Firmware update notification appears to be incorrect from image {}" .format(i, image)) - def test_install_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_install_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) @@ -202,7 +200,8 @@ def test_install_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, loca .format(i, image)) self.assert_expectations() - def test_update_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_update_firmware(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["nokia"]) diff --git a/tests/platform_tests/api/test_fan_drawer.py b/tests/platform_tests/api/test_fan_drawer.py index f9b67de1cd4..3baf54d029b 100644 --- a/tests/platform_tests/api/test_fan_drawer.py +++ b/tests/platform_tests/api/test_fan_drawer.py @@ -3,6 +3,7 @@ import pytest from tests.common.helpers.platform_api import chassis, fan_drawer +from tests.common.platform.device_utils import platform_api_conn # noqa F401 from .platform_api_test_base import PlatformApiTestBase @@ -39,7 +40,7 @@ class TestFanDrawerApi(PlatformApiTestBase): # it relies on the platform_api_conn fixture, which is scoped at the function # level, so we must do the same here to prevent a scope mismatch. @pytest.fixture(scope="function", autouse=True) - def setup(self, duthost, platform_api_conn): + def setup(self, duthost, platform_api_conn): # noqa F811 if self.num_fan_drawers is None: try: self.num_fan_drawers = int(chassis.get_num_fan_drawers(platform_api_conn)) @@ -87,7 +88,7 @@ def get_fan_drawer_facts(self, duthost, fan_drawer_idx, def_value, *keys): # # Functions to test methods inherited from DeviceBase class # - def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] for i in range(self.num_fan_drawers): name = fan_drawer.get_name(platform_api_conn, i) @@ -98,7 +99,7 @@ def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, p self.assert_expectations() - def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_fan_drawers): presence = fan_drawer.get_presence(platform_api_conn, i) @@ -108,7 +109,7 @@ def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos self.assert_expectations() - def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_fan_drawers): model = fan_drawer.get_model(platform_api_conn, i) @@ -117,7 +118,7 @@ def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_fan_drawers): serial = fan_drawer.get_serial(platform_api_conn, i) @@ -126,7 +127,7 @@ def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_fan_drawers): status = fan_drawer.get_status(platform_api_conn, i) @@ -135,7 +136,7 @@ def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_position_in_parent(self, platform_api_conn): + def test_get_position_in_parent(self, platform_api_conn): # noqa F811 for i in range(self.num_fan_drawers): position = fan_drawer.get_position_in_parent(platform_api_conn, i) if self.expect(position is not None, @@ -144,7 +145,7 @@ def test_get_position_in_parent(self, platform_api_conn): "Position value must be an integer value for fan drawer {}".format(i)) self.assert_expectations() - def test_is_replaceable(self, platform_api_conn): + def test_is_replaceable(self, platform_api_conn): # noqa F811 for i in range(self.num_fan_drawers): replaceable = fan_drawer.is_replaceable(platform_api_conn, i) if self.expect(replaceable is not None, "Failed to perform is_replaceable for fan drawer {}".format(i)): @@ -155,7 +156,7 @@ def test_is_replaceable(self, platform_api_conn): # # Functions to test methods defined in Fan_drawerBase class # - def test_get_num_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_num_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] for i in range(self.num_fan_drawers): @@ -166,7 +167,7 @@ def test_get_num_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos self.compare_value_with_platform_facts(duthost, 'num_fans', num_fans, i) self.assert_expectations() - def test_get_all_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_all_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_fan_drawers): fans_list = fan_drawer.get_all_fans(platform_api_conn, i) @@ -175,7 +176,8 @@ def test_get_all_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos "fan drawer {} list of fans appear to be incorrect".format(i)) self.assert_expectations() - def test_set_fan_drawers_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_set_fan_drawers_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] FAULT_LED_COLOR_LIST = [ @@ -253,7 +255,7 @@ def test_set_fan_drawers_led(self, duthosts, enum_rand_one_per_hwsku_hostname, l self.assert_expectations() def test_get_maximum_consumed_power(self, duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn): + localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] max_power_skipped = 0 diff --git a/tests/platform_tests/api/test_psu.py b/tests/platform_tests/api/test_psu.py index b9ad83b1ea2..d20298a6a7d 100644 --- a/tests/platform_tests/api/test_psu.py +++ b/tests/platform_tests/api/test_psu.py @@ -7,6 +7,7 @@ from tests.platform_tests.cli.util import get_skip_mod_list from .platform_api_test_base import PlatformApiTestBase from tests.common.utilities import skip_release_for_platform, wait_until +from tests.common.platform.device_utils import platform_api_conn # noqa F401 ################################################### @@ -41,7 +42,7 @@ class TestPsuApi(PlatformApiTestBase): chassis_facts = None @pytest.fixture(scope="function", autouse=True) - def setup(self, platform_api_conn, duthosts, enum_rand_one_per_hwsku_hostname): + def setup(self, platform_api_conn, duthosts, enum_rand_one_per_hwsku_hostname): # noqa F811 if self.num_psus is None: try: self.num_psus = int(chassis.get_num_psus(platform_api_conn)) @@ -82,7 +83,7 @@ def get_psu_facts(self, duthost, psu_idx, def_value, *keys): return def_value - def skip_absent_psu(self, psu_num, platform_api_conn): + def skip_absent_psu(self, psu_num, platform_api_conn): # noqa F811 name = psu.get_name(platform_api_conn, psu_num) if name in self.psu_skip_list: logger.info("Skipping PSU {} since it is part of psu_skip_list".format(name)) @@ -103,7 +104,7 @@ def get_psu_parameter(self, psu_info, psu_parameter, get_data, message): # Functions to test methods inherited from DeviceBase class # - def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] for i in range(self.num_psus): if self.skip_absent_psu(i, platform_api_conn): @@ -114,7 +115,7 @@ def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, p self.compare_value_with_platform_facts(duthost, 'name', name, i) self.assert_expectations() - def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_psus): presence = psu.get_presence(platform_api_conn, i) name = psu.get_name(platform_api_conn, i) @@ -127,7 +128,7 @@ def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos # that the psu is not present when in the skip list self.assert_expectations() - def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_psus): if self.skip_absent_psu(i, platform_api_conn): continue @@ -136,7 +137,7 @@ def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.expect(isinstance(model, STRING_TYPE), "PSU {} model appears incorrect".format(i)) self.assert_expectations() - def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_psus): if self.skip_absent_psu(i, platform_api_conn): continue @@ -145,7 +146,7 @@ def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.expect(isinstance(serial, STRING_TYPE), "PSU {} serial number appears incorrect".format(i)) self.assert_expectations() - def test_get_revision(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_revision(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release(duthost, ["201811", "201911", "202012"]) for i in range(self.num_psus): @@ -156,7 +157,7 @@ def test_get_revision(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos self.expect(isinstance(revision, STRING_TYPE), "PSU {} serial number appears incorrect".format(i)) self.assert_expectations() - def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for i in range(self.num_psus): if self.skip_absent_psu(i, platform_api_conn): continue @@ -165,7 +166,7 @@ def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.expect(isinstance(status, bool), "PSU {} status appears incorrect".format(i)) self.assert_expectations() - def test_get_position_in_parent(self, platform_api_conn): + def test_get_position_in_parent(self, platform_api_conn): # noqa F811 for psu_id in range(self.num_psus): if self.skip_absent_psu(psu_id, platform_api_conn): continue @@ -176,7 +177,7 @@ def test_get_position_in_parent(self, platform_api_conn): "Position value must be an integer value for psu id {}".format(psu_id)) self.assert_expectations() - def test_is_replaceable(self, platform_api_conn): + def test_is_replaceable(self, platform_api_conn): # noqa F811 for psu_id in range(self.num_psus): if self.skip_absent_psu(psu_id, platform_api_conn): continue @@ -191,7 +192,7 @@ def test_is_replaceable(self, platform_api_conn): # Functions to test methods defined in PsuBase class # - def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 ''' PSU fan test ''' for psu_id in range(self.num_psus): try: @@ -210,7 +211,7 @@ def test_fans(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platf self.expect(fan and fan == fan_list[i], "Fan {} of PSU {} is incorrect".format(i, psu_id)) self.assert_expectations() - def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_power(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 ''' PSU power test ''' duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["arista"]) @@ -271,7 +272,7 @@ def check_psu_power(failure_count): self.assert_expectations() - def test_temperature(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_temperature(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 ''' PSU temperature test ''' duthost = duthosts[enum_rand_one_per_hwsku_hostname] skip_release_for_platform(duthost, ["202012", "201911", "201811"], ["arista"]) @@ -306,7 +307,7 @@ def test_temperature(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost self.assert_expectations() - def test_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 ''' PSU status led test ''' duthost = duthosts[enum_rand_one_per_hwsku_hostname] FAULT_LED_COLOR_LIST = [ @@ -397,7 +398,7 @@ def test_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platfo self.assert_expectations() - def test_thermals(self, platform_api_conn): + def test_thermals(self, platform_api_conn): # noqa F811 for psu_id in range(self.num_psus): if self.skip_absent_psu(psu_id, platform_api_conn): continue @@ -418,7 +419,7 @@ def test_thermals(self, platform_api_conn): self.assert_expectations() - def test_master_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_master_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] FAULT_LED_COLOR_LIST = [ STATUS_LED_COLOR_AMBER, diff --git a/tests/platform_tests/api/test_psu_fans.py b/tests/platform_tests/api/test_psu_fans.py index f348ecb40a8..cc3e2bdc084 100644 --- a/tests/platform_tests/api/test_psu_fans.py +++ b/tests/platform_tests/api/test_psu_fans.py @@ -5,6 +5,7 @@ import pytest from tests.common.helpers.platform_api import chassis, psu, psu_fan +from tests.common.platform.device_utils import platform_api_conn # noqa F401 from .platform_api_test_base import PlatformApiTestBase @@ -46,7 +47,7 @@ class TestPsuFans(PlatformApiTestBase): # level, so we must do the same here to prevent a scope mismatch. @pytest.fixture(scope="function", autouse=True) - def setup(self, platform_api_conn): + def setup(self, platform_api_conn): # noqa F811 if self.num_psus is None: try: self.num_psus = chassis.get_num_psus(platform_api_conn) @@ -96,7 +97,7 @@ def get_fan_facts(self, duthost, psu_idx, fan_idx, def_value, *keys): # # Functions to test methods inherited from DeviceBase class # - def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) @@ -133,8 +134,7 @@ def test_get_name(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, p self.assert_expectations() - def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) @@ -150,8 +150,7 @@ def test_get_presence(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos self.assert_expectations() - def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) @@ -163,8 +162,7 @@ def test_get_model(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) @@ -177,8 +175,7 @@ def test_get_serial(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) @@ -190,7 +187,7 @@ def test_get_status(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_position_in_parent(self, platform_api_conn): + def test_get_position_in_parent(self, platform_api_conn): # noqa F811 for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) for i in range(num_fans): @@ -201,7 +198,7 @@ def test_get_position_in_parent(self, platform_api_conn): "Position value must be an integer value for PSU {} fan {}".format(j, i)) self.assert_expectations() - def test_is_replaceable(self, platform_api_conn): + def test_is_replaceable(self, platform_api_conn): # noqa F811 for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) for i in range(num_fans): @@ -217,7 +214,7 @@ def test_is_replaceable(self, platform_api_conn): # Functions to test methods defined in FanBase class # - def test_get_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] for j in range(self.num_psus): num_fans = psu.get_num_fans(platform_api_conn, j) @@ -236,7 +233,7 @@ def test_get_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, self.assert_expectations() - def test_get_direction(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_get_direction(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 # Ensure the fan speed is sane FAN_DIRECTION_LIST = [ "intake", @@ -255,8 +252,8 @@ def test_get_direction(self, duthosts, enum_rand_one_per_hwsku_hostname, localho self.assert_expectations() - def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] psus_skipped = 0 @@ -296,8 +293,7 @@ def test_get_fans_target_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, self.assert_expectations() - def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): - + def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 duthost = duthosts[enum_rand_one_per_hwsku_hostname] psus_skipped = 0 @@ -338,7 +334,7 @@ def test_set_fans_speed(self, duthosts, enum_rand_one_per_hwsku_hostname, localh self.assert_expectations() - def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): + def test_set_fans_led(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn): # noqa F811 LED_COLOR_LIST = [ "off", "red", diff --git a/tests/platform_tests/api/test_watchdog.py b/tests/platform_tests/api/test_watchdog.py index 73147b5b8a2..7a79ce9f246 100644 --- a/tests/platform_tests/api/test_watchdog.py +++ b/tests/platform_tests/api/test_watchdog.py @@ -6,6 +6,7 @@ import pytest from tests.common.helpers.platform_api import watchdog from tests.common.helpers.assertions import pytest_assert +from tests.common.platform.device_utils import platform_api_conn # noqa F401 from .platform_api_test_base import PlatformApiTestBase from collections import OrderedDict @@ -40,7 +41,7 @@ class TestWatchdogApi(PlatformApiTestBase): ''' Hardware watchdog platform API test cases ''' @pytest.fixture(scope='function', autouse=True) - def watchdog_not_running(self, platform_api_conn, duthosts, enum_rand_one_per_hwsku_hostname): + def watchdog_not_running(self, platform_api_conn, duthosts, enum_rand_one_per_hwsku_hostname): # noqa F811 ''' Fixture that automatically runs on each test case and verifies that watchdog is not running before the test begins and disables it after the test ends''' @@ -92,7 +93,8 @@ def conf(self, request, duthosts, enum_rand_one_per_hwsku_hostname): return config @pytest.mark.dependency() - def test_arm_disarm_states(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, platform_api_conn, conf): + def test_arm_disarm_states(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, + platform_api_conn, conf): # noqa F811 ''' arm watchdog with a valid timeout value, verify it is in armed state, disarm watchdog and verify it is in disarmed state ''' @@ -139,7 +141,7 @@ def test_arm_disarm_states(self, duthosts, enum_rand_one_per_hwsku_hostname, loc self.assert_expectations() @pytest.mark.dependency(depends=["test_arm_disarm_states"]) - def test_remaining_time(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn, conf): + def test_remaining_time(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn, conf): # noqa F811 ''' arm watchdog with a valid timeout and verify that remaining time API works correctly ''' watchdog_timeout = conf['valid_timeout'] @@ -168,7 +170,7 @@ def test_remaining_time(self, duthosts, enum_rand_one_per_hwsku_hostname, platfo self.assert_expectations() @pytest.mark.dependency(depends=["test_arm_disarm_states"]) - def test_periodic_arm(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn, conf): + def test_periodic_arm(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn, conf): # noqa F811 ''' arm watchdog several times as watchdog deamon would and verify API behaves correctly ''' watchdog_timeout = conf['valid_timeout'] @@ -190,7 +192,8 @@ def test_periodic_arm(self, duthosts, enum_rand_one_per_hwsku_hostname, platform self.assert_expectations() @pytest.mark.dependency(depends=["test_arm_disarm_states"]) - def test_arm_different_timeout_greater(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn, conf): + def test_arm_different_timeout_greater(self, duthosts, enum_rand_one_per_hwsku_hostname, + platform_api_conn, conf): # noqa F811 ''' arm the watchdog with greater timeout value and verify new timeout was accepted; If platform accepts only single valid timeout value, @greater_timeout should be None. ''' @@ -212,7 +215,8 @@ def test_arm_different_timeout_greater(self, duthosts, enum_rand_one_per_hwsku_h self.assert_expectations() @pytest.mark.dependency(depends=["test_arm_disarm_states"]) - def test_arm_different_timeout_smaller(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn, conf): + def test_arm_different_timeout_smaller(self, duthosts, enum_rand_one_per_hwsku_hostname, + platform_api_conn, conf): # noqa F811 ''' arm the watchdog with smaller timeout value and verify new timeout was accepted; If platform accepts only single valid timeout value, @greater_timeout should be None. ''' @@ -235,7 +239,8 @@ def test_arm_different_timeout_smaller(self, duthosts, enum_rand_one_per_hwsku_h self.assert_expectations() @pytest.mark.dependency(depends=["test_arm_disarm_states"]) - def test_arm_too_big_timeout(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn, conf): + def test_arm_too_big_timeout(self, duthosts, enum_rand_one_per_hwsku_hostname, + platform_api_conn, conf): # noqa F811 ''' try to arm the watchdog with timeout that is too big for hardware watchdog; If no such limitation exist, @too_big_timeout should be None for such platform. ''' @@ -249,7 +254,7 @@ def test_arm_too_big_timeout(self, duthosts, enum_rand_one_per_hwsku_hostname, p self.assert_expectations() @pytest.mark.dependency(depends=["test_arm_disarm_states"]) - def test_arm_negative_timeout(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn): + def test_arm_negative_timeout(self, duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn): # noqa F811 ''' try to arm the watchdog with negative value ''' watchdog_timeout = -1 diff --git a/tests/smartswitch/common/device_utils_dpu.py b/tests/smartswitch/common/device_utils_dpu.py index 9b80882dd66..b75335428e0 100644 --- a/tests/smartswitch/common/device_utils_dpu.py +++ b/tests/smartswitch/common/device_utils_dpu.py @@ -4,14 +4,14 @@ import logging import pytest from tests.common.devices.sonic import * # noqa: F401,F403 -from tests.platform_tests.api.conftest import * # noqa: F401,F403 +from tests.common.platform.device_utils import platform_api_conn # noqa: F401,F403 from tests.common.helpers.platform_api import chassis, module from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert @pytest.fixture(scope='function') -def num_dpu_modules(platform_api_conn): +def num_dpu_modules(platform_api_conn): # noqa F811 """ Returns the number of DPU modules """ @@ -23,9 +23,8 @@ def num_dpu_modules(platform_api_conn): @pytest.fixture(scope='function', autouse=True) -def check_smartswitch_and_dark_mode(duthosts, - enum_rand_one_per_hwsku_hostname, - platform_api_conn, num_dpu_modules): +def check_smartswitch_and_dark_mode(duthosts, enum_rand_one_per_hwsku_hostname, + platform_api_conn, num_dpu_modules): # noqa F811 """ Checks whether given testbed is running 202405 image or below versions @@ -40,14 +39,13 @@ def check_smartswitch_and_dark_mode(duthosts, if "DPUS" not in duthost.facts: pytest.skip("Test is not supported for this testbed") - darkmode = is_dark_mode_enabled(duthost, platform_api_conn, - num_dpu_modules) + darkmode = is_dark_mode_enabled(duthost, platform_api_conn, num_dpu_modules) # noqa F811 if darkmode: dpu_power_on(duthost, platform_api_conn, num_dpu_modules) -def is_dark_mode_enabled(duthost, platform_api_conn, num_dpu_modules): +def is_dark_mode_enabled(duthost, platform_api_conn, num_dpu_modules): # noqa F811 """ Checks the liveliness of DPU Returns: @@ -76,7 +74,7 @@ def is_dark_mode_enabled(duthost, platform_api_conn, num_dpu_modules): return False -def dpu_power_on(duthost, platform_api_conn, num_dpu_modules): +def dpu_power_on(duthost, platform_api_conn, num_dpu_modules): # noqa F811 """ Executes power on all DPUs Returns: diff --git a/tests/smartswitch/platform_tests/test_reload_dpu.py b/tests/smartswitch/platform_tests/test_reload_dpu.py index 1e8e7518f33..ac97d435b91 100644 --- a/tests/smartswitch/platform_tests/test_reload_dpu.py +++ b/tests/smartswitch/platform_tests/test_reload_dpu.py @@ -14,7 +14,7 @@ from tests.common.config_reload import config_force_option_supported, config_system_checks_passed # noqa: F401, E501 from tests.smartswitch.common.device_utils_dpu import * # noqa: F401,F403,E501 from tests.common.helpers.platform_api import chassis, module # noqa: F401 -from tests.platform_tests.api.conftest import * # noqa: F401,F403 +from tests.common.platform.device_utils import platform_api_conn # noqa: F401,F403 pytestmark = [ pytest.mark.topology('smartswitch') @@ -22,8 +22,7 @@ def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn, - num_dpu_modules): + localhost, platform_api_conn, num_dpu_modules): # noqa F811 """ @summary: Verify output of `config chassis modules startup ` """ @@ -52,8 +51,7 @@ def test_dpu_ping_after_reboot(duthosts, enum_rand_one_per_hwsku_hostname, def test_show_ping_int_after_reload(duthosts, enum_rand_one_per_hwsku_hostname, - localhost, platform_api_conn, - num_dpu_modules): + localhost, platform_api_conn, num_dpu_modules): # noqa F811 """ @summary: To Check Ping between NPU and DPU after configuration reload on NPU diff --git a/tests/smartswitch/platform_tests/test_show_platform_dpu.py b/tests/smartswitch/platform_tests/test_show_platform_dpu.py index 5049975b67d..74951e9826a 100644 --- a/tests/smartswitch/platform_tests/test_show_platform_dpu.py +++ b/tests/smartswitch/platform_tests/test_show_platform_dpu.py @@ -8,7 +8,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.smartswitch.common.device_utils_dpu import * # noqa: F403,F401,E501 from tests.common.helpers.platform_api import chassis, module # noqa: F401 -from tests.platform_tests.api.conftest import * # noqa: F401,F403 +from tests.common.platform.device_utils import platform_api_conn # noqa: F401,F403 from tests.common.devices.sonic import * # noqa: 403 pytestmark = [ @@ -16,8 +16,7 @@ ] -def test_midplane_ip(duthosts, enum_rand_one_per_hwsku_hostname, - platform_api_conn): +def test_midplane_ip(duthosts, enum_rand_one_per_hwsku_hostname, platform_api_conn): # noqa F811 """ @summary: Verify `Midplane ip address between NPU and DPU` """ @@ -39,7 +38,7 @@ def test_midplane_ip(duthosts, enum_rand_one_per_hwsku_hostname, def test_shutdown_power_up_dpu(duthosts, enum_rand_one_per_hwsku_hostname, - platform_api_conn, num_dpu_modules): + platform_api_conn, num_dpu_modules): # noqa F811 """ @summary: Verify `shut down and power up DPU` """ @@ -63,7 +62,7 @@ def test_shutdown_power_up_dpu(duthosts, enum_rand_one_per_hwsku_hostname, def test_reboot_cause(duthosts, enum_rand_one_per_hwsku_hostname, - platform_api_conn, num_dpu_modules): + platform_api_conn, num_dpu_modules): # noqa F811 """ @summary: Verify `Reboot Cause` """ @@ -88,7 +87,7 @@ def test_reboot_cause(duthosts, enum_rand_one_per_hwsku_hostname, def test_pcie_link(duthosts, enum_rand_one_per_hwsku_hostname, - platform_api_conn, num_dpu_modules): + platform_api_conn, num_dpu_modules): # noqa F811 """ @summary: Verify `PCIe link` """ From 2b39717753542e5a6337131ed8292eed87bd053d Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 22 Nov 2024 09:48:03 +0800 Subject: [PATCH 143/175] Move the shared part in snappi to common place. (#15604) What is the motivation for this PR? Previously, there are some shared variables and scripts which were located in the feature-specific folder snappi_tests and were imported by the common scripts. To reduce cross-feature dependencies and improve modularity, I relocated them directly to the common path tests/common/snappi_tests. How did you do it? I relocated the shared part under folder snappi directly to the common path tests/common/snappi_tests. --- .../snappi_tests}/cisco_pfc_packet.py | 0 tests/common/snappi_tests/read_pcap.py | 2 +- tests/common/snappi_tests/snappi_fixtures.py | 5 ++--- .../common/snappi_tests/traffic_generation.py | 2 +- tests/common/snappi_tests/variables.py | 17 +++++++++++++++++ .../multidut/bgp/files/bgp_outbound_helper.py | 3 ++- ...response_to_external_pause_storms_helper.py | 2 +- ...sponse_to_throttling_pause_storms_helper.py | 2 +- .../files/m2o_fluctuating_lossless_helper.py | 2 +- .../files/m2o_oversubscribe_lossless_helper.py | 2 +- .../m2o_oversubscribe_lossless_lossy_helper.py | 2 +- .../files/m2o_oversubscribe_lossy_helper.py | 2 +- ...less_response_to_throttling_pause_storms.py | 2 +- .../test_m2o_oversubscribe_lossless_lossy.py | 2 +- .../pfcwd/files/pfcwd_multidut_basic_helper.py | 2 +- .../files/pfcwd_multidut_burst_storm_helper.py | 2 +- .../files/pfcwd_multidut_multi_node_helper.py | 2 +- .../pfcwd_multidut_runtime_traffic_helper.py | 2 +- .../pfcwd/files/pfcwd_basic_helper.py | 2 +- .../pfcwd/files/pfcwd_burst_storm_helper.py | 2 +- .../pfcwd/files/pfcwd_multi_node_helper.py | 2 +- .../files/pfcwd_runtime_traffic_helper.py | 2 +- tests/snappi_tests/variables.py | 18 ------------------ 23 files changed, 39 insertions(+), 40 deletions(-) rename tests/{snappi_tests/pfc/files => common/snappi_tests}/cisco_pfc_packet.py (100%) create mode 100644 tests/common/snappi_tests/variables.py diff --git a/tests/snappi_tests/pfc/files/cisco_pfc_packet.py b/tests/common/snappi_tests/cisco_pfc_packet.py similarity index 100% rename from tests/snappi_tests/pfc/files/cisco_pfc_packet.py rename to tests/common/snappi_tests/cisco_pfc_packet.py diff --git a/tests/common/snappi_tests/read_pcap.py b/tests/common/snappi_tests/read_pcap.py index f0a522b9576..fd93b27a420 100644 --- a/tests/common/snappi_tests/read_pcap.py +++ b/tests/common/snappi_tests/read_pcap.py @@ -3,7 +3,7 @@ from dpkt.utils import mac_to_str from tests.common.snappi_tests.pfc_packet import PFCPacket -from tests.snappi_tests.pfc.files.cisco_pfc_packet import CiscoPFCPacket +from tests.common.snappi_tests.cisco_pfc_packet import CiscoPFCPacket logger = logging.getLogger(__name__) diff --git a/tests/common/snappi_tests/snappi_fixtures.py b/tests/common/snappi_tests/snappi_fixtures.py index 6b268b3e409..300fce365ab 100755 --- a/tests/common/snappi_tests/snappi_fixtures.py +++ b/tests/common/snappi_tests/snappi_fixtures.py @@ -16,9 +16,8 @@ from tests.common.snappi_tests.snappi_helpers import SnappiFanoutManager, get_snappi_port_location from tests.common.snappi_tests.port import SnappiPortConfig, SnappiPortType from tests.common.helpers.assertions import pytest_assert -from tests.snappi_tests.variables import dut_ip_start, snappi_ip_start, prefix_length, \ - dut_ipv6_start, snappi_ipv6_start, v6_prefix_length, pfcQueueGroupSize, \ - pfcQueueValueDict # noqa: F401 +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict, dut_ip_start, snappi_ip_start, \ + prefix_length, dut_ipv6_start, snappi_ipv6_start, v6_prefix_length logger = logging.getLogger(__name__) diff --git a/tests/common/snappi_tests/traffic_generation.py b/tests/common/snappi_tests/traffic_generation.py index 005f53c0a00..49b21d08f35 100644 --- a/tests/common/snappi_tests/traffic_generation.py +++ b/tests/common/snappi_tests/traffic_generation.py @@ -11,7 +11,7 @@ traffic_flow_mode from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp, fetch_snappi_flow_metrics -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from .variables import pfcQueueGroupSize, pfcQueueValueDict from tests.common.cisco_data import is_cisco_device logger = logging.getLogger(__name__) diff --git a/tests/common/snappi_tests/variables.py b/tests/common/snappi_tests/variables.py new file mode 100644 index 00000000000..e1b821141bb --- /dev/null +++ b/tests/common/snappi_tests/variables.py @@ -0,0 +1,17 @@ +pfcQueueGroupSize = 8 # can have values 4 or 8 +pfcQueueValueDict = {0: 0, + 1: 1, + 2: 0, + 3: 3, + 4: 2, + 5: 0, + 6: 1, + 7: 0} + +dut_ip_start = '20.1.1.0' +snappi_ip_start = '20.1.1.1' +prefix_length = 31 + +dut_ipv6_start = '2000:1::1' +snappi_ipv6_start = '2000:1::2' +v6_prefix_length = 126 diff --git a/tests/snappi_tests/multidut/bgp/files/bgp_outbound_helper.py b/tests/snappi_tests/multidut/bgp/files/bgp_outbound_helper.py index b77345db203..12de5bafbc7 100755 --- a/tests/snappi_tests/multidut/bgp/files/bgp_outbound_helper.py +++ b/tests/snappi_tests/multidut/bgp/files/bgp_outbound_helper.py @@ -14,13 +14,14 @@ from tests.common.helpers.assertions import pytest_assert # noqa: F401 from tests.common.snappi_tests.snappi_fixtures import create_ip_list # noqa: F401 from tests.snappi_tests.variables import T1_SNAPPI_AS_NUM, T2_SNAPPI_AS_NUM, T1_DUT_AS_NUM, T2_DUT_AS_NUM, t1_ports, \ - t2_uplink_portchannel_members, t1_t2_dut_ipv4_list, v4_prefix_length, v6_prefix_length, \ + t2_uplink_portchannel_members, t1_t2_dut_ipv4_list, v4_prefix_length, \ t1_t2_dut_ipv6_list, t1_t2_snappi_ipv4_list, portchannel_count, \ t1_t2_snappi_ipv6_list, t2_dut_portchannel_ipv4_list, t2_dut_portchannel_ipv6_list, \ snappi_portchannel_ipv4_list, snappi_portchannel_ipv6_list, AS_PATHS, \ BGP_TYPE, t1_side_interconnected_port, t2_side_interconnected_port, router_ids, \ snappi_community_for_t1, snappi_community_for_t2, SNAPPI_TRIGGER, DUT_TRIGGER, \ fanout_presence, t2_uplink_fanout_info # noqa: F401 +from tests.common.snappi_tests.variables import v6_prefix_length logger = logging.getLogger(__name__) total_routes = 0 diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py index fb139f3f255..5237c2c6cdf 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py @@ -15,7 +15,7 @@ from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import run_traffic, verify_pause_flow, \ setup_base_traffic_config # noqa: F401 -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) TEST_FLOW_NAME = 'Test Flow' diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py index b177fd58282..3177d527525 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py @@ -16,7 +16,7 @@ from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import run_traffic, verify_pause_flow, \ setup_base_traffic_config # noqa: F401 -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py index 5da4ec7d6bf..f8a097de2e2 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py @@ -10,7 +10,7 @@ from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import run_traffic, \ setup_base_traffic_config # noqa: F401 -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) PAUSE_FLOW_NAME = 'Pause Storm' diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py index 3f34d6a341b..b3b79f86862 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py @@ -15,7 +15,7 @@ from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import setup_base_traffic_config, \ run_traffic # noqa: F401 -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) PAUSE_FLOW_NAME = 'Pause Storm' diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py index 302ea6b852a..5696454ddc3 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py @@ -15,7 +15,7 @@ from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import run_traffic, \ setup_base_traffic_config # noqa: F401 -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict from tests.common.portstat_utilities import parse_portstat # noqa: F401 logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py index 9bacdc7ade5..d60ca4ecca8 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py @@ -15,7 +15,7 @@ from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import setup_base_traffic_config, \ run_traffic # noqa: F401 -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) PAUSE_FLOW_NAME = 'Pause Storm' diff --git a/tests/snappi_tests/multidut/pfc/test_lossless_response_to_throttling_pause_storms.py b/tests/snappi_tests/multidut/pfc/test_lossless_response_to_throttling_pause_storms.py index 443f25a7ec7..3af2d58a702 100644 --- a/tests/snappi_tests/multidut/pfc/test_lossless_response_to_throttling_pause_storms.py +++ b/tests/snappi_tests/multidut/pfc/test_lossless_response_to_throttling_pause_storms.py @@ -13,7 +13,7 @@ from tests.snappi_tests.multidut.pfc.files.lossless_response_to_throttling_pause_storms_helper import ( run_lossless_response_to_throttling_pause_storms_test) from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict # noqa: F401 +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] diff --git a/tests/snappi_tests/multidut/pfc/test_m2o_oversubscribe_lossless_lossy.py b/tests/snappi_tests/multidut/pfc/test_m2o_oversubscribe_lossless_lossy.py index 19321a14edd..e1200d5c1e9 100644 --- a/tests/snappi_tests/multidut/pfc/test_m2o_oversubscribe_lossless_lossy.py +++ b/tests/snappi_tests/multidut/pfc/test_m2o_oversubscribe_lossless_lossy.py @@ -14,7 +14,7 @@ run_pfc_m2o_oversubscribe_lossless_lossy_test ) # noqa: F401 from tests.common.snappi_tests.snappi_test_params import SnappiTestParams # noqa: F401 -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict # noqa: F401 +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py index 93995f72bda..8e4b33ccc7c 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py @@ -12,7 +12,7 @@ from tests.common.snappi_tests.port import select_ports, select_tx_port # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp # noqa: F401 from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py index daa2048a2af..afa8feff005 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py @@ -10,7 +10,7 @@ from tests.common.snappi_tests.port import select_ports, select_tx_port # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py index f1e4fd6f2c9..6a15b795db1 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py @@ -12,7 +12,7 @@ from tests.common.snappi_tests.port import select_ports # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp # noqa: F401 from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py index 8e97d4f62df..f92ad44f9ae 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py @@ -8,7 +8,7 @@ from tests.common.snappi_tests.port import select_ports, select_tx_port # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp from tests.common.snappi_tests.snappi_test_params import SnappiTestParams -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict DATA_FLOW_NAME = "Data Flow" DATA_PKT_SIZE = 1024 diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py index da163989f58..ea1abc8458c 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py @@ -11,7 +11,7 @@ enable_packet_aging, start_pfcwd, sec_to_nanosec from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py index d6f9a18f2d2..a13a20fe74a 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py @@ -9,7 +9,7 @@ enable_packet_aging, start_pfcwd, sec_to_nanosec from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py index 4cdfe5b7228..e6aeb6202be 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py @@ -10,7 +10,7 @@ start_pfcwd, enable_packet_aging, get_pfcwd_poll_interval, get_pfcwd_detect_time, sec_to_nanosec from tests.common.snappi_tests.port import select_ports from tests.common.snappi_tests.snappi_helpers import wait_for_arp -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py index 14452d6cc41..832ffc991ea 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py @@ -6,7 +6,7 @@ from tests.common.snappi_tests.common_helpers import start_pfcwd, stop_pfcwd, sec_to_nanosec from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp -from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict +from tests.common.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict DATA_FLOW_NAME = "Data Flow" WARM_UP_TRAFFIC_NAME = "Warm Up Traffic" diff --git a/tests/snappi_tests/variables.py b/tests/snappi_tests/variables.py index c862c0c2f3b..d63fbfc9880 100644 --- a/tests/snappi_tests/variables.py +++ b/tests/snappi_tests/variables.py @@ -76,24 +76,6 @@ } } -dut_ip_start = '20.1.1.0' -snappi_ip_start = '20.1.1.1' -prefix_length = 31 - -dut_ipv6_start = '2000:1::1' -snappi_ipv6_start = '2000:1::2' -v6_prefix_length = 126 - -pfcQueueGroupSize = 8 # can have values 4 or 8 -pfcQueueValueDict = {0: 0, - 1: 1, - 2: 0, - 3: 3, - 4: 2, - 5: 0, - 6: 1, - 7: 0} - def create_ip_list(value, count, mask=32, incr=0): ''' From 5fbe52f2c88f061115e6f1523eb10690cf96a676 Mon Sep 17 00:00:00 2001 From: rraghav-cisco <58446052+rraghav-cisco@users.noreply.github.com> Date: Thu, 21 Nov 2024 18:44:01 -0800 Subject: [PATCH 144/175] Fixing the import error and KeyError in snappi_test/multidut executions. (#15524) This PR attempts to fix the fixture-not-found error and KeyError that you are seeing in the snappi_test multidut runs. Pls let me know if this works for you. co-authorized by: jianquanye@microsoft.com --- tests/common/snappi_tests/common_helpers.py | 11 +++++++---- tests/snappi_tests/files/helper.py | 5 ++++- ...ssless_response_to_external_pause_storms_helper.py | 2 +- ...less_response_to_throttling_pause_storms_helper.py | 2 +- .../pfc/files/m2o_fluctuating_lossless_helper.py | 2 +- .../pfc/files/m2o_oversubscribe_lossless_helper.py | 2 +- .../files/m2o_oversubscribe_lossless_lossy_helper.py | 2 +- .../pfc/files/m2o_oversubscribe_lossy_helper.py | 2 +- .../pfcwd/test_multidut_pfcwd_basic_with_snappi.py | 6 +++--- 9 files changed, 20 insertions(+), 14 deletions(-) diff --git a/tests/common/snappi_tests/common_helpers.py b/tests/common/snappi_tests/common_helpers.py index 5521b6c2c97..37fd9454cc2 100644 --- a/tests/common/snappi_tests/common_helpers.py +++ b/tests/common/snappi_tests/common_helpers.py @@ -1137,6 +1137,9 @@ def get_interface_stats(duthost, port): n_out = parse_portstat(duthost.command('portstat -i {}'.format(port))['stdout_lines'])[port] i_stats[duthost.hostname][port] = n_out + for k in ['rx_ok', 'rx_err', 'rx_drp', 'rx_ovr', 'tx_ok', 'tx_err', 'tx_drp', 'tx_ovr']: + i_stats[duthost.hostname][port][k] = int("".join(i_stats[duthost.hostname][port][k].split(','))) + # rx_err, rx_ovr and rx_drp are counted in single counter rx_fail # tx_err, tx_ovr and tx_drp are counted in single counter tx_fail rx_err = ['rx_err', 'rx_ovr', 'rx_drp'] @@ -1144,9 +1147,9 @@ def get_interface_stats(duthost, port): rx_fail = 0 tx_fail = 0 for m in rx_err: - rx_fail = rx_fail + int(n_out[m].replace(',', '')) + rx_fail = rx_fail + n_out[m] for m in tx_err: - tx_fail = tx_fail + int(n_out[m].replace(',', '')) + tx_fail = tx_fail + n_out[m] # Any throughput below 1MBps is measured as 0 for simplicity. thrput = n_out['rx_bps'] @@ -1160,8 +1163,8 @@ def get_interface_stats(duthost, port): else: i_stats[duthost.hostname][port]['rx_thrput_Mbps'] = 0 - i_stats[duthost.hostname][port]['rx_pkts'] = int(n_out['rx_ok'].replace(',', '')) - i_stats[duthost.hostname][port]['tx_pkts'] = int(n_out['tx_ok'].replace(',', '')) + i_stats[duthost.hostname][port]['rx_pkts'] = n_out['rx_ok'] + i_stats[duthost.hostname][port]['tx_pkts'] = n_out['tx_ok'] i_stats[duthost.hostname][port]['rx_fail'] = rx_fail i_stats[duthost.hostname][port]['tx_fail'] = tx_fail diff --git a/tests/snappi_tests/files/helper.py b/tests/snappi_tests/files/helper.py index 44b86b2c5ec..c57f4b4f490 100644 --- a/tests/snappi_tests/files/helper.py +++ b/tests/snappi_tests/files/helper.py @@ -8,6 +8,7 @@ from tests.common.reboot import reboot from tests.common.helpers.parallel import parallel_run from tests.common.utilities import wait_until +from tests.common.platform.interface_utils import check_interface_status_of_up_ports from tests.common.snappi_tests.snappi_fixtures import get_snappi_ports_for_rdma, \ snappi_dut_base_config, is_snappi_multidut @@ -128,17 +129,19 @@ def reboot_duts(setup_ports_and_dut, localhost, request): skip_warm_reboot(snappi_ports[1]['duthost'], reboot_type) def save_config_and_reboot(node, results=None): + up_bgp_neighbors = node.get_bgp_neighbors_per_asic("established") logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, node.hostname)) node.shell("mkdir /etc/sonic/orig_configs; mv /etc/sonic/config_db* /etc/sonic/orig_configs/") node.shell("sudo config save -y") reboot(node, localhost, reboot_type=reboot_type, safe_reboot=True) logger.info("Wait until the system is stable") wait_until(180, 20, 0, node.critical_services_fully_started) + wait_until(180, 20, 0, check_interface_status_of_up_ports, node) + wait_until(300, 10, 0, node.check_bgp_session_state_all_asics, up_bgp_neighbors, "established") # Convert the list of duthosts into a list of tuples as required for parallel func. args = set((snappi_ports[0]['duthost'], snappi_ports[1]['duthost'])) parallel_run(save_config_and_reboot, {}, {}, list(args), timeout=900) - yield def revert_config_and_reload(node, results=None): diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py index 5237c2c6cdf..8830cbbf42f 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py @@ -141,7 +141,7 @@ def run_lossless_response_to_external_pause_storms_test(api, dut_rx_port1 = tx_port[0]['peer_port'] dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py index 3177d527525..15a0559ca1b 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py @@ -148,7 +148,7 @@ def run_lossless_response_to_throttling_pause_storms_test(api, dut_rx_port1 = tx_port[0]['peer_port'] dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py index f8a097de2e2..8dc40c23dfd 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py @@ -130,7 +130,7 @@ def run_m2o_fluctuating_lossless_test(api, dut_rx_port1 = tx_port[0]['peer_port'] dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py index b3b79f86862..f3db2766cf6 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py @@ -133,7 +133,7 @@ def run_m2o_oversubscribe_lossless_test(api, dut_rx_port1 = tx_port[0]['peer_port'] dut_rx_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics - pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py index 5696454ddc3..5dba3c588ec 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py @@ -138,7 +138,7 @@ def run_pfc_m2o_oversubscribe_lossless_lossy_test(api, dut_rx_port2 = tx_port[1]['peer_port'] dut_tx_port = rx_port['peer_port'] # Fetch relevant statistics - pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py index d60ca4ecca8..3d7b37a389c 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py @@ -139,7 +139,7 @@ def run_pfc_m2o_oversubscribe_lossy_test(api, dut_rx_port1 = tx_port[0]['peer_port'] dut_rx_port2 = tx_port[1]['peer_port'] - pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[ingress_duthost.hostname][dut_tx_port]['tx_drp'] + pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets diff --git a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py index 9c09f674b45..1584c00fdd6 100644 --- a/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py +++ b/tests/snappi_tests/multidut/pfcwd/test_multidut_pfcwd_basic_with_snappi.py @@ -19,7 +19,7 @@ from tests.snappi_tests.multidut.pfcwd.files.pfcwd_multidut_basic_helper import run_pfcwd_basic_test from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.snappi_tests.files.helper import skip_pfcwd_test, reboot_duts, \ - setup_ports_and_dut # noqa: F401 + setup_ports_and_dut, multidut_port_info # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [pytest.mark.topology('multidut-tgen', 'tgen')] @@ -193,10 +193,10 @@ def test_pfcwd_basic_multi_lossless_prio_reboot(snappi_api, # no conn_graph_facts, # noqa F811 fanout_graph_facts_multidut, # noqa F811 localhost, - duthosts, - enum_dut_lossless_prio_with_completeness_level, # noqa: F811 + lossless_prio_list, # noqa F811 tbinfo, # noqa: F811 prio_dscp_map, # noqa F811 + setup_ports_and_dut, # noqa: F811 reboot_duts, # noqa: F811 trigger_pfcwd): """ From 62e71c7025cd1e5170e74ad00c2ce4b1a7ee7756 Mon Sep 17 00:00:00 2001 From: Chenyang Wang <49756587+cyw233@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:33:19 +1100 Subject: [PATCH 145/175] feat: add parallel run toggle to pipeline (#15667) Description of PR Add parallel run toggle to pipeline definition Summary: Fixes # (issue) Microsoft ADO 29843837 Approach What is the motivation for this PR? We want to enable parallel run via pipeline, so we need to add the parallel run toggle to the pipeline definition co-authorized by: jianquanye@microsoft.com --- .azure-pipelines/run-test-elastictest-template.yml | 5 +++++ .azure-pipelines/test_plan.py | 13 +++++++++++++ 2 files changed, 18 insertions(+) diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml index 4d1092e50eb..c49f927ece0 100644 --- a/.azure-pipelines/run-test-elastictest-template.yml +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -115,6 +115,10 @@ parameters: type: string default: "" + - name: ENABLE_PARALLEL_RUN + type: string + default: "" + # The number of retries when the script fails. Global retry if retry_cases_include and retry_cases_exclude are both empty, otherwise specific retry - name: RETRY_TIMES type: string @@ -258,6 +262,7 @@ steps: --repo-name ${{ parameters.REPO_NAME }} \ --mgmt-branch ${{ parameters.MGMT_BRANCH }} \ --stop-on-failure ${{ parameters.STOP_ON_FAILURE }} \ + --enable-parallel-run ${{ parameters.ENABLE_PARALLEL_RUN }} \ --retry-times ${{ parameters.RETRY_TIMES }} \ --retry-cases-include ${{ parameters.RETRY_CASES_INCLUDE }} \ --retry-cases-exclude ${{ parameters.RETRY_CASES_EXCLUDE }} \ diff --git a/.azure-pipelines/test_plan.py b/.azure-pipelines/test_plan.py index 4052be78e3d..b339ee05337 100644 --- a/.azure-pipelines/test_plan.py +++ b/.azure-pipelines/test_plan.py @@ -285,6 +285,7 @@ def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params }, "test_option": { "stop_on_failure": kwargs.get("stop_on_failure", True), + "enable_parallel_run": kwargs.get("enable_parallel_run", False), "retry_times": kwargs.get("retry_times", 2), "retry_cases_include": retry_cases_include, "retry_cases_exclude": retry_cases_exclude, @@ -823,6 +824,17 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte choices=[True, False], help="Stop whole test plan if test failed." ) + parser_create.add_argument( + "--enable-parallel-run", + type=ast.literal_eval, + dest="enable_parallel_run", + nargs='?', + const='False', + default='False', + required=False, + choices=[True, False], + help="Enable parallel run or not." + ) parser_create.add_argument( "--retry-times", type=int, @@ -1045,6 +1057,7 @@ def poll(self, test_plan_id, interval=60, timeout=-1, expected_state="", expecte test_plan_type=args.test_plan_type, platform=args.platform, stop_on_failure=args.stop_on_failure, + enable_parallel_run=args.enable_parallel_run, retry_times=args.retry_times, retry_cases_include=args.retry_cases_include, retry_cases_exclude=args.retry_cases_exclude, From 9c2cbd009dad36c75202bdd6e8f5a3b7790a7fe4 Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:03:17 +0800 Subject: [PATCH 146/175] Enforce cross-feature dependency checker in pipeline (#15692) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What is the motivation for this PR? In PR #15559, we introduced a checker to identify cross-feature dependencies within our repository. At the time, since some dependencies still existed, the checker was configured to only run the script without enforcing any pipeline failures. Now that all cross-feature dependencies have been eliminated, we’ve updated the checker to capture the script's return value and trigger a pipeline failure if any cross-feature dependencies are detected. How did you do it? We’ve updated the checker to capture the script's return value and trigger a pipeline failure if any cross-feature dependencies are detected. How did you verify/test it? --- .azure-pipelines/dependency-check.yml | 6 +++++- .azure-pipelines/dependency_check/dependency_check.py | 1 + azure-pipelines.yml | 1 - 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.azure-pipelines/dependency-check.yml b/.azure-pipelines/dependency-check.yml index ea9161927c3..8022c3648b6 100644 --- a/.azure-pipelines/dependency-check.yml +++ b/.azure-pipelines/dependency-check.yml @@ -4,5 +4,9 @@ steps: pip3 install natsort - python3 ./.azure-pipelines/dependency_check/dependency_check.py tests + CHECK_RESULT=$(python3 ./.azure-pipelines/dependency_check/dependency_check.py tests) + if [[ "$CHECK_RESULT" == "True" ]]; then + echo "##vso[task.complete result=Failed;]Condition check failed." + exit 1 + fi displayName: "Dependency Check" diff --git a/.azure-pipelines/dependency_check/dependency_check.py b/.azure-pipelines/dependency_check/dependency_check.py index 17c24a2b35b..fd6ae983b62 100644 --- a/.azure-pipelines/dependency_check/dependency_check.py +++ b/.azure-pipelines/dependency_check/dependency_check.py @@ -205,6 +205,7 @@ def check_cross_dependency(imports_in_script): print("There is a cross-feature dependence. File: {}, import module: {}" .format(file_path, imported_module["module"])) cross_dependency = True + print(cross_dependency) return cross_dependency diff --git a/azure-pipelines.yml b/azure-pipelines.yml index bd19abd9c7a..96c424bc1c5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -46,7 +46,6 @@ stages: - job: dependency_check displayName: "Dependency Check" timeoutInMinutes: 10 - continueOnError: true pool: sonic-common steps: - template: .azure-pipelines/dependency-check.yml From 55962d4e6132646a02e82485c850e59a6294c7c0 Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:10:44 +0800 Subject: [PATCH 147/175] [Bugfix] Add missing conditions for extended entries in `qos/test_buffer.py:` (#15663) Description of PR In #14912, we added conditions for longer matching entries in conditional marks. However, some conditions were missed under the entry qos/test_buffer.py:. This PR adds these missing conditions to entries that start with and extend beyond qos/test_buffer.py: Approach What is the motivation for this PR? In #14912, we added conditions for longer matching entries in conditional marks. However, some conditions were missed under the entry qos/test_buffer.py:. This PR adds these missing conditions to entries that start with and extend beyond qos/test_buffer.py: How did you do it? This PR adds these missing conditions to entries that start with and extend beyond qos/test_buffer.py --- tests/common/plugins/conditional_mark/tests_mark_conditions.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 3ed679e9de3..967725fa32a 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -1353,6 +1353,7 @@ qos/test_buffer.py::test_buffer_model_test: conditions_logical_operator: or conditions: - "asic_type in ['mellanox'] or asic_subtype in ['broadcom-dnx']" + - "asic_type in ['cisco-8000'] or 't2' in topo_name" - "topo_type in ['m0', 'mx']" qos/test_buffer_traditional.py: From 21317edaeadda5ca6f637de6f7e214d3ddd6eb56 Mon Sep 17 00:00:00 2001 From: harjotsinghpawra Date: Fri, 22 Nov 2024 02:09:36 -0800 Subject: [PATCH 148/175] test_snmp_queue_counters.py/test_telemetry.py config_reload and snmpwwalk output time delay fix, test_snmp_queue_counters.py multi-asic KeyError fix (#15688) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit test_snmp_queue_counters.py/test_telemetry.py config_reload and snmpwalk output time delay fix, test_snmp_queue_counters.py multi-asic KeyError fix Description of PR Scripts: test_snmp_queue_counters.py test_telemetry ///////////////////////////////////////////////// First Issue : When we run these scripts sometimes based on the platform and image along with other factors it takes some time for ports to come up and buffer queues to be generated and then further Snmp OID or even gnmi info to be genrated . In script we immediately try to snmpwalk after all docker are up . But interfaces are still not up so no oid is generated . Snmpwalk says No Such Instance currently exists at this OID whihc script count as 1 counter being created when none is created, which causes test case to fail. enum_rand_one_per_hwsku_frontend_hostname = 'mth64-m5-2' get_bfr_queue_cntrs_cmd = 'docker exec snmp snmpwalk -v2c -c public 1.74.23.17 1.3.6.1.4.1.9.9.580.1.5.5.1.4.1' hostip = '1.74.23.17' multicast_expected_diff = 16 queue_counters_cnt_post = 1 queue_counters_cnt_pre = 1 unicast_expected_diff = 8 ["docker exec snmp snmpwalk -v2c -c public 1.74.23.17 1.3.6.1.4.1.9.9.580.1.5.5.1.4.1"], kwargs={} 12:37:54 base._run L0108 �[35mDEBUG �[0m| /data/tests/common/devices/multi_asic.py::_run_on_asics#134: [mth64-m5-2] AnsibleModule::shell Result => {"changed": true, "stdout": "iso.3.6.1.4.1.9.9.580.1.5.5.1.4.1 = No Such Instance currently exists at this OID", "stderr": "", "rc": 0, "cmd": "docker exec snmp snmpwalk -v2c -c public 1.74.23.17 1.3.6.1.4.1.9.9.580.1.5.5.1.4.1", "start": "2024-08-28 12:37:55.343677", "end": "2024-08-28 12:37:55.452104", "delta": "0:00:00.108427", "msg": "", "invocation": {"module_args": {"_raw_params": "docker exec snmp snmpwalk -v2c -c public 1.74.23.17 1.3.6.1.4.1.9.9.580.1.5.5.1.4.1", "_uses_shell": true, "warn": false, "stdin_add_newline": true, "strip_empty_ends": true, "argv": null, "chdir": null, "executable": null, "creates": null, "removes": null, "stdin": null}}, "stdout_lines": ["iso.3.6.1.4.1.9.9.580.1.5.5.1.4.1 = No Such Instance currently exists at this OID"], "stderr_lines": [], "_ansible_no_log": null, "failed": false} ////////////////////////////////////////////////// Second issue : In test_snmp_queue_counters script in multi-asic case we choose a buffer_queue of first interface mentioned in BUFFER_QUEUE config and then we try to match that, also we search asic.namepace in queue name which is invalid check which causes buffer_queue_to_del to be None. This in turn fails the test case by saying that KeyError: None when we try to delete buffer result = testfunction(**testargs) File "/var/src/sonic-mgmt/tests/snmp/test_snmp_queue_counters.py", line 123, in test_snmp_queue_counters del data['BUFFER_QUEUE'][buffer_queue_to_del] KeyError: None Summary: Fixes #15683 and #15686 Approach What is the motivation for this PR? How did you do it? 1.) added necessary checks so that all the interfaces are up and oid's are generated only then take command output. 2.) changed wrong logic of multi asic buffer queue selection and alsoimproved it to work for both single and multi-asic system. 3.) Also added extra check where i match the OID's of counters generated by snmp with queuestat output because they should match queuestat gives the latest information. How did you verify/test it? Ran it on local CISCO platforms and its passing co-authorized by: jianquanye@microsoft.com --- tests/snmp/test_snmp_queue_counters.py | 55 ++++++++++++++++++-------- tests/telemetry/test_telemetry.py | 14 ++++++- 2 files changed, 51 insertions(+), 18 deletions(-) diff --git a/tests/snmp/test_snmp_queue_counters.py b/tests/snmp/test_snmp_queue_counters.py index 83824ea80ec..e35831c7a76 100644 --- a/tests/snmp/test_snmp_queue_counters.py +++ b/tests/snmp/test_snmp_queue_counters.py @@ -2,12 +2,12 @@ import json from tests.common import config_reload from tests.common.helpers.assertions import pytest_assert +from tests.common.utilities import wait_until CFG_DB_PATH = "/etc/sonic/config_db.json" ORIG_CFG_DB = "/etc/sonic/orig_config_db.json" UNICAST_CTRS = 4 MULTICAST_CTRS = 4 -BUFFER_QUEUES_REMOVED = 2 pytestmark = [ pytest.mark.topology('any', 't1-multi-asic'), @@ -17,13 +17,21 @@ def load_new_cfg(duthost, data): duthost.copy(content=json.dumps(data, indent=4), dest=CFG_DB_PATH) - config_reload(duthost, config_source='config_db', safe_reload=True) + config_reload(duthost, config_source='config_db', safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) def get_queue_ctrs(duthost, cmd): return len(duthost.shell(cmd)["stdout_lines"]) +def check_snmp_cmd_output(duthost, cmd): + out_len = len(duthost.shell(cmd)["stdout_lines"]) + if out_len > 1: + return True + else: + return False + + def get_queue_cntrs_oid(interface): """ @summary: Returns queue_cntrs_oid value based on the interface chosen @@ -82,15 +90,18 @@ def test_snmp_queue_counters(duthosts, if interface is None: pytest.skip("No active interface present on the asic {}".format(asic)) queue_cntrs_oid = get_queue_cntrs_oid(interface) + + get_queue_stat_cmd = "queuestat -p {}".format(interface) get_bfr_queue_cntrs_cmd \ = "docker exec snmp snmpwalk -v2c -c {} {} {}".format( creds_all_duts[duthost.hostname]['snmp_rocommunity'], hostip, queue_cntrs_oid) - # Generate sonic-cfggen commands for multi-asic and single-asic duts + # Generate sonic-cfggen and queue stat commands for multi-asic and single-asic duts if duthost.sonichost.is_multi_asic and asic is not None: ORIG_CFG_DB = "/etc/sonic/orig_config_db{}.json".format(asic.asic_index) CFG_DB_PATH = "/etc/sonic/config_db{}.json".format(asic.asic_index) cmd = "sonic-cfggen -n {} -d --print-data > {}".format(asic.namespace, ORIG_CFG_DB) + get_queue_stat_cmd = "queuestat -n {} -p {}".format(asic.namespace, interface) else: cmd = "sonic-cfggen -d --print-data > {}".format(ORIG_CFG_DB) @@ -98,17 +109,14 @@ def test_snmp_queue_counters(duthosts, data = json.loads(duthost.shell("cat {}".format(ORIG_CFG_DB), verbose=False)['stdout']) buffer_queue_to_del = None - # Get appropriate buffer queue value to delete in case of multi-asic - if duthost.sonichost.is_multi_asic: - buffer_queues = list(data['BUFFER_QUEUE'].keys()) - iface_to_check = buffer_queues[0].split('|')[0] - iface_buffer_queues = [bq for bq in buffer_queues if any(val in iface_to_check for val in bq.split('|'))] - for queue in iface_buffer_queues: - if asic.namespace in queue and queue.split('|')[-1] == '3-4' and queue.split('|')[-2] == interface: - buffer_queue_to_del = queue - break + + # Get appropriate buffer queue value to delete + buffer_queues = list(data['BUFFER_QUEUE'].keys()) + iface_buffer_queues = [bq for bq in buffer_queues if any(val in interface for val in bq.split('|'))] + if iface_buffer_queues: + buffer_queue_to_del = iface_buffer_queues[0] else: - buffer_queue_to_del = "{}|3-4".format(interface) + pytest_assert(False, "Buffer Queue list can't be empty if valid interface is selected.") # Add create_only_config_db_buffers entry to device metadata to enable # counters optimization and get number of queue counters of Ethernet0 prior @@ -116,13 +124,24 @@ def test_snmp_queue_counters(duthosts, data['DEVICE_METADATA']["localhost"]["create_only_config_db_buffers"] \ = "true" load_new_cfg(duthost, data) + stat_queue_counters_cnt_pre = (get_queue_ctrs(duthost, get_queue_stat_cmd) - 2) * UNICAST_CTRS + wait_until(60, 20, 0, check_snmp_cmd_output, duthost, get_bfr_queue_cntrs_cmd) queue_counters_cnt_pre = get_queue_ctrs(duthost, get_bfr_queue_cntrs_cmd) - # Remove buffer queue and reload and get number of queue counters of - # Ethernet0 after removing two buffer queues + # snmpwalk output should get info for same number of buffers as queuestat -p dose + pytest_assert((queue_counters_cnt_pre == stat_queue_counters_cnt_pre), + "Snmpwalk Queue counters actual count {} differs from expected queue stat count values {}". + format(queue_counters_cnt_pre, stat_queue_counters_cnt_pre)) + + # Remove buffer queue and reload and get number of queue counters of selected interface del data['BUFFER_QUEUE'][buffer_queue_to_del] load_new_cfg(duthost, data) + stat_queue_counters_cnt_post = (get_queue_ctrs(duthost, get_queue_stat_cmd) - 2) * UNICAST_CTRS + wait_until(60, 20, 0, check_snmp_cmd_output, duthost, get_bfr_queue_cntrs_cmd) queue_counters_cnt_post = get_queue_ctrs(duthost, get_bfr_queue_cntrs_cmd) + pytest_assert((queue_counters_cnt_post == stat_queue_counters_cnt_post), + "Snmpwalk Queue counters actual count {} differs from expected queue stat count values {}". + format(queue_counters_cnt_post, stat_queue_counters_cnt_post)) # For broadcom-dnx voq chassis, number of voq are fixed (static), which cannot be modified dynamically # Hence, make sure the queue counters before deletion and after deletion are same for broadcom-dnx voq chassis @@ -132,8 +151,10 @@ def test_snmp_queue_counters(duthosts, format(queue_counters_cnt_post, queue_counters_cnt_pre)) # check for other duts else: - unicast_expected_diff = BUFFER_QUEUES_REMOVED * UNICAST_CTRS - multicast_expected_diff = unicast_expected_diff + (BUFFER_QUEUES_REMOVED + range_str = str(buffer_queue_to_del.split('|')[-1]) + buffer_queues_removed = int(range_str.split('-')[1]) - int(range_str.split('-')[0]) + 1 + unicast_expected_diff = buffer_queues_removed * UNICAST_CTRS + multicast_expected_diff = unicast_expected_diff + (buffer_queues_removed * MULTICAST_CTRS) pytest_assert((queue_counters_cnt_pre - queue_counters_cnt_post) in [unicast_expected_diff, multicast_expected_diff], diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index be487aac402..b6b4e23212f 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -31,7 +31,7 @@ def load_new_cfg(duthost, data): duthost.copy(content=json.dumps(data, indent=4), dest=CFG_DB_PATH) - config_reload(duthost, config_source='config_db', safe_reload=True) + config_reload(duthost, config_source='config_db', safe_reload=True, check_intf_up_ports=True, wait_for_bgp=True) # config reload overrides testing telemetry config, ensure testing config exists setup_telemetry_forpyclient(duthost) @@ -52,6 +52,14 @@ def get_buffer_queues_cnt(ptfhost, gnxi_path, dut_ip, iface, gnmi_port): return cnt +def check_buffer_queues_cnt_cmd_output(ptfhost, gnxi_path, dut_ip, iface_to_check, gnmi_port): + cnt = get_buffer_queues_cnt(ptfhost, gnxi_path, dut_ip, iface_to_check, gnmi_port) + if cnt > 0: + return True + else: + return False + + def test_config_db_parameters(duthosts, enum_rand_one_per_hwsku_hostname): """Verifies required telemetry parameters from config_db. """ @@ -169,11 +177,15 @@ def test_telemetry_queue_buffer_cnt(duthosts, enum_rand_one_per_hwsku_hostname, data['DEVICE_METADATA']["localhost"]["create_only_config_db_buffers"] \ = "true" load_new_cfg(duthost, data) + wait_until(60, 20, 0, check_buffer_queues_cnt_cmd_output, ptfhost, gnxi_path, + dut_ip, iface_to_check, env.gnmi_port) pre_del_cnt = get_buffer_queues_cnt(ptfhost, gnxi_path, dut_ip, iface_to_check, env.gnmi_port) # Remove buffer queue and reload and get new number of queue counters del data['BUFFER_QUEUE'][iface_buffer_queues[0]] load_new_cfg(duthost, data) + wait_until(60, 20, 0, check_buffer_queues_cnt_cmd_output, ptfhost, gnxi_path, + dut_ip, iface_to_check, env.gnmi_port) post_del_cnt = get_buffer_queues_cnt(ptfhost, gnxi_path, dut_ip, iface_to_check, env.gnmi_port) pytest_assert(pre_del_cnt > post_del_cnt, From 61da28e283514d53b11b45f67f1b5a558cfbfa28 Mon Sep 17 00:00:00 2001 From: Jianquan Ye Date: Fri, 22 Nov 2024 21:43:56 +1000 Subject: [PATCH 149/175] Add timeout for Cisco 8800 snmp (#15701) Description of PR Summary: Fixes MSFT ADO 30112399 BY default, the snmp timeout is 1s, Cisco 8800 has lots of interfacts, this causes the high chance of the timeout of snmp. Add timeout to tolerate the latency. Approach What is the motivation for this PR? Fix the snmp timeout issues on Cisco 8800 chassis. How did you do it? Enable timeout for snmp query. How did you verify/test it? Locally test on physical chassis testbed: snmp/test_snmp_psu.py::test_snmp_numpsu[x-sup-2] PASSED [ 50%] snmp/test_snmp_psu.py::test_snmp_psu_status[x-sup-2] PASSED [100%] co-authorized by: jianquanye@microsoft.com --- ansible/library/snmp_facts.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ansible/library/snmp_facts.py b/ansible/library/snmp_facts.py index 141e11dc44b..a7dbdc9beb4 100644 --- a/ansible/library/snmp_facts.py +++ b/ansible/library/snmp_facts.py @@ -463,9 +463,10 @@ def Tree(): return defaultdict(Tree) elif current_oid == v.sysLocation: results['ansible_syslocation'] = current_val + # Cisco 8800 has lots of interfacts, add timeout to tolerate the latency errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.UdpTransportTarget((m_args['host'], 161), timeout=m_args['timeout']), cmdgen.MibVariable(p.ifIndex,), cmdgen.MibVariable(p.ifDescr,), cmdgen.MibVariable(p.ifType,), @@ -890,9 +891,10 @@ def Tree(): return defaultdict(Tree) ifIndex = int(current_oid.split('.')[12]) results['snmp_interfaces'][ifIndex]['lldpRemManAddrOID'] = current_val + # Cisco 8800 has lots of interfacts, add timeout to tolerate the latency errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.UdpTransportTarget((m_args['host'], 161), timeout=m_args['timeout']), cmdgen.MibVariable(p.cpfcIfRequests,), cmdgen.MibVariable(p.cpfcIfIndications,), cmdgen.MibVariable(p.requestsPerPriority,), From f0415611b7d28fa607facf0452687fc7ac2682df Mon Sep 17 00:00:00 2001 From: ranepbhagyashree Date: Fri, 22 Nov 2024 08:47:26 -0800 Subject: [PATCH 150/175] route_perf: Fix destination mac for multi asic (#15632) --- tests/route/test_route_perf.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/route/test_route_perf.py b/tests/route/test_route_perf.py index d54f46d95ca..4d3d4fac58c 100644 --- a/tests/route/test_route_perf.py +++ b/tests/route/test_route_perf.py @@ -335,11 +335,12 @@ def test_perf_add_remove_routes( if ip_versions == 4: ip_dst = generate_ips(1, dst_nw, []) send_and_verify_traffic( - duthost, ptfadapter, tbinfo, ip_dst, ptf_dst_ports, ptf_src_port + asichost, duthost, ptfadapter, tbinfo, ip_dst, ptf_dst_ports, ptf_src_port ) else: ip_dst = dst_nw.split("/")[0] + "1" send_and_verify_traffic( + asichost, duthost, ptfadapter, tbinfo, @@ -366,11 +367,11 @@ def test_perf_add_remove_routes( def send_and_verify_traffic( - duthost, ptfadapter, tbinfo, ip_dst, expected_ports, ptf_src_port, ipv6=False + asichost, duthost, ptfadapter, tbinfo, ip_dst, expected_ports, ptf_src_port, ipv6=False ): if ipv6: pkt = testutils.simple_tcpv6_packet( - eth_dst=duthost.facts["router_mac"], + eth_dst=asichost.get_router_mac().lower(), eth_src=ptfadapter.dataplane.get_mac(0, ptf_src_port), ipv6_src="2001:db8:85a3::8a2e:370:7334", ipv6_dst=ip_dst, @@ -380,7 +381,7 @@ def send_and_verify_traffic( ) else: pkt = testutils.simple_tcp_packet( - eth_dst=duthost.facts["router_mac"], + eth_dst=asichost.get_router_mac().lower(), eth_src=ptfadapter.dataplane.get_mac(0, ptf_src_port), ip_src="1.1.1.1", ip_dst=ip_dst, From 1b4b3a93193fdca09c5b9f4a046a18f3d8b04b29 Mon Sep 17 00:00:00 2001 From: KISHORE KUNAL <64033340+kishorekunal01@users.noreply.github.com> Date: Fri, 22 Nov 2024 10:28:21 -0800 Subject: [PATCH 151/175] [FRR Upgrade] Remove deprecated code for bgpStatusCodes/bgpOriginCodes (#15691) by FRR Why I did it FRR has deprecated the code for bgpStatusCodes/bgpOriginCodes in below checkin. Hence updating the ansible library to handle this change. https://github.com/FRRouting/frr/pull/14981 Signed-off-by: Kishore Kunal --- ansible/library/bgp_route.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/library/bgp_route.py b/ansible/library/bgp_route.py index 743f1cd6a48..1b96997fde7 100644 --- a/ansible/library/bgp_route.py +++ b/ansible/library/bgp_route.py @@ -177,7 +177,7 @@ def parse_bgp_route_adv_json(self, cmd_result): for k, rt in res['advertisedRoutes'].items(): entry = dict() entry['nexthop'] = rt['nextHop'] - entry['origin'] = rt['bgpOriginCode'] + entry['origin'] = rt.get('bgpOriginCode', rt['origin']) # Use bgpOriginCode if present, else origin entry['weigh'] = rt['weight'] entry['aspath'] = rt['path'].split() self.facts['bgp_route_neiadv']["{}/{}".format( From a99b8487531bac71f5db7a8b4c74f800bbd7eafc Mon Sep 17 00:00:00 2001 From: arista-nwolfe <94405414+arista-nwolfe@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:15:38 -0500 Subject: [PATCH 152/175] Skip l3_alpm_enable check on broadcom-dnx platforms (#15516) Description of PR Broadcom confirmed that l3_alpm_enable soc property is only used in XGS platforms (CS00012377343) Therefor we should skip the check for this soc property on DNX platforms. Summary: Fixes #15511 Type of change Bug fix --- tests/route/test_route_perf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/route/test_route_perf.py b/tests/route/test_route_perf.py index 4d3d4fac58c..92c0b8f1573 100644 --- a/tests/route/test_route_perf.py +++ b/tests/route/test_route_perf.py @@ -60,6 +60,10 @@ def check_config(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_rand_ return duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + if (duthost.facts.get('platform_asic') == 'broadcom-dnx'): + # CS00012377343 - l3_alpm_enable isn't supported on dnx + return + asic = duthost.facts["asic_type"] asic_id = enum_rand_one_frontend_asic_index From 733a24fca3dad9d9300ddcdde317b221a2a45535 Mon Sep 17 00:00:00 2001 From: arista-nwolfe <94405414+arista-nwolfe@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:16:21 -0500 Subject: [PATCH 153/175] [Chassis] Fix iBGP skip in test_4-byte_asn_community.py (#15614) Description of PR Pretty much exactly the same issue as #15411 but in a different test. The fix is identical to so I'll leave this description short as the description in #15411 explains this issue as well. Summary: Fixes #15613 Type of change Bug fix --- tests/bgp/test_4-byte_asn_community.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/bgp/test_4-byte_asn_community.py b/tests/bgp/test_4-byte_asn_community.py index d58e2a60805..ae537d7081e 100644 --- a/tests/bgp/test_4-byte_asn_community.py +++ b/tests/bgp/test_4-byte_asn_community.py @@ -206,8 +206,8 @@ def setup_ceos(tbinfo, nbrhosts, duthosts, enum_frontend_dut_hostname, enum_rand # verify sessions are established and gather neighbor information for k, v in bgp_facts['bgp_neighbors'].items(): - # skip internal neighbors to other 'asic' namespaces - if 'asic' not in v['description'].lower(): + # skip iBGP neighbors + if "INTERNAL" not in v["peer group"] and "VOQ_CHASSIS" not in v["peer group"]: if v['description'] == neigh: if v['ip_version'] == 4: neigh_ip_v4 = k From 1c3671ca4f8abd78505283de2d46bdc07df4d3f0 Mon Sep 17 00:00:00 2001 From: agadia-cisco Date: Fri, 22 Nov 2024 19:17:06 -0800 Subject: [PATCH 154/175] added multi-asic handling in sonic-cfggen (#15572) Description of PR Summary: For Multi-Asic devices, test_gnmi_configdb_full_01 TC isn't using asic specific namespace, due to which PORT keys received from get_interface_status & one in config_db are not in sync. Fixes # (issue) Checks whether DUT is multi-asic or not; if yes, then generates configuration based on the asic namespace which contains the PORT key returned by get_inerface_status Approach What is the motivation for this PR? Issue 15407 : Multi-asic support for test_gnmi_configdb TCs This PR just adds the multi-asic support; but still TC would fail because Multi-Asic support for ApplyPatchDb API has to be provided, Issue link How did you do it? Checks whether DUT is multi-asic or not; if yes, then generates configuration based on the asic namespace which contains the PORT key returned by get_inerface_status co-authorized by: jianquanye@microsoft.com --- tests/gnmi/test_gnmi_configdb.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/tests/gnmi/test_gnmi_configdb.py b/tests/gnmi/test_gnmi_configdb.py index 53e3b47ee06..5e637178218 100644 --- a/tests/gnmi/test_gnmi_configdb.py +++ b/tests/gnmi/test_gnmi_configdb.py @@ -51,6 +51,18 @@ def get_interface_status(duthost, field, interface='Ethernet0'): return output["stdout"] +def get_sonic_cfggen_output(duthost, namespace=None): + ''' + Fetch and return the sonic-cfggen output + ''' + cmd = "sonic-cfggen -d --print-data" + if namespace: + cmd = f"sonic-cfggen -n {namespace} -d --print-data" + output = duthost.shell(cmd) + assert (not output['rc']), "No output" + return (json.loads(output["stdout"])) + + def test_gnmi_configdb_incremental_01(duthosts, rand_one_dut_hostname, ptfhost): ''' Verify GNMI native write, incremental config for configDB @@ -224,12 +236,19 @@ def test_gnmi_configdb_full_01(duthosts, rand_one_dut_hostname, ptfhost): Toggle interface admin status ''' duthost = duthosts[rand_one_dut_hostname] - output = duthost.shell("sonic-cfggen -d --print-data") - assert (not output['rc']), "No output" - dic = json.loads(output["stdout"]) - assert "PORT" in dic, "Failed to read running config" interface = get_first_interface(duthost) assert interface is not None, "Invalid interface" + + # Get ASIC namespace and check interface + if duthost.sonichost.is_multi_asic: + for asic in duthost.frontend_asics: + dic = get_sonic_cfggen_output(duthost, asic.namespace) + if interface in dic["PORT"]: + break + else: + dic = get_sonic_cfggen_output(duthost) + + assert "PORT" in dic, "Failed to read running config" assert interface in dic["PORT"], "Failed to get interface %s" % interface assert "admin_status" in dic["PORT"][interface], "Failed to get interface %s" % interface From 8f2b2df74eee8669d9907f50a9e86ab0e8dc2ac4 Mon Sep 17 00:00:00 2001 From: rraghav-cisco <58446052+rraghav-cisco@users.noreply.github.com> Date: Fri, 22 Nov 2024 19:20:36 -0800 Subject: [PATCH 155/175] check pg_profile_lookup.ini in multiasic specifi path. (#15578) Description of PR Summary: tests/qos/test_buffers.py is failing in mAsic platforms since the pg_profile_lookup.ini path Approach What is the motivation for this PR? The script: qos/test_buffers.py is using wrong path for multi-asic platforms. It gives this error: if (res.is_failed or 'exception' in res) and not module_ignore_errors: > raise RunAnsibleModuleFail("run module {} failed".format(self.module_name), res) E tests.common.errors.RunAnsibleModuleFail: run module shell failed, Ansible Results => E {"changed": true, "cmd": "cat /usr/share/sonic/device/x86_64-8800_rp-r0/Cisco-8800-RP//pg_profile_lookup.ini", "delta": "0:00:00.006060", "end": "2024-10-31 14:58:57.717623", "failed": true, "msg": "non-zero return code", "rc": 1, "start": "2024-10-31 14:58:57.711563", "stderr": "cat: /usr/share/sonic/device/x86_64-8800_rp-r0/Cisco-8800-RP//pg_profile_lookup.ini: No such file or directory", "stderr_lines": ["cat: /usr/share/sonic/device/x86_64-8800_rp-r0/Cisco-8800-RP//pg_profile_lookup.ini: No such file or directory"], "stdout": "", "stdout_lines": []} How did you do it? Updated the script to use asic-specific path for this file. How did you verify/test it? Ran it on my TB. For cisco-8000 this script skips: --------------------------------------------------------------------------- generated xml file: /run_logs/buffer/2024-11-15-01-17-03/pfcwd/qos/test_buffer_2024-11-15-01-17-03.xml --------------------------------------------------------------------------- ------------------------------------------------------------------------------------------------------------------- live log sessionfinish ------------------------------------------------------------------------------------------------------------------- 01:30:05 __init__.pytest_terminal_summary L0067 INFO | Can not get Allure report URL. Please check logs ================================================================================================================== short test summary info =================================================================================================================== SKIPPED [17] qos/test_buffer.py: These tests don't apply to cisco 8000 platforms or T2 or m0/mx, since they support only traditional model. SKIPPED [1] qos/test_buffer.py:2390: These tests don't apply to cisco 8000 platforms or T2 or m0/mx, since they support only traditional model. SKIPPED [1] qos/test_buffer.py:400: Skip test in traditional model ========================================================================================================= 19 skipped, 1 warning in 780.02s (0:13:00) ========================================================================================================= DEBUG:tests.conftest:[log_custom_msg] item: co-authorized by: jianquanye@microsoft.com --- tests/qos/test_buffer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/qos/test_buffer.py b/tests/qos/test_buffer.py index 9aa43b2cca9..784054ca54a 100644 --- a/tests/qos/test_buffer.py +++ b/tests/qos/test_buffer.py @@ -203,8 +203,11 @@ def load_lossless_headroom_data(duthost): dut_platform = duthost.facts["platform"] skudir = "/usr/share/sonic/device/{}/{}/".format( dut_platform, dut_hwsku) + asic_index = "" + if duthost.is_multi_asic: + asic_index = duthost.asic_instance().asic_index lines = duthost.shell( - 'cat {}/pg_profile_lookup.ini'.format(skudir))["stdout"] + f'cat {skudir}/{asic_index}/pg_profile_lookup.ini')["stdout"] DEFAULT_LOSSLESS_HEADROOM_DATA = {} for line in lines.split('\n'): if line[0] == '#': From 66326355f7848a4e7b9fcbe863877909981c55f4 Mon Sep 17 00:00:00 2001 From: byu343 Date: Fri, 22 Nov 2024 23:06:20 -0800 Subject: [PATCH 156/175] Fix test_warm_reboot_mac_jump for mac jump detection (#15329) The log level for fdbEvent messsage to detect mac jump is changed to NOTICE, so the regex used by the test has to be updated. --- tests/common/platform/reboot_timing_constants.py | 4 ++-- tests/common/platform/templates/expect_boot_messages | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/common/platform/reboot_timing_constants.py b/tests/common/platform/reboot_timing_constants.py index 186f6f6c58b..c48fef23f2f 100644 --- a/tests/common/platform/reboot_timing_constants.py +++ b/tests/common/platform/reboot_timing_constants.py @@ -56,9 +56,9 @@ "SYNCD_CREATE_SWITCH|End": re.compile( r'.*syncd#syncd.*performWarmRestartSingleSwitch: Warm boot: create switch VID.*'), "FDB_EVENT_OTHER_MAC_EXPIRY|Start": re.compile( - r".* INFO syncd#syncd.*SAI_API_FDB.*fdbEvent: (delete \(0\)|0) for mac (?!00-06-07-08-09-0A).*"), + r".*syncd#syncd.*SAI_API_FDB.*fdbEvent: (delete \(0\)|0) for mac (?!00-06-07-08-09-0A).*"), "FDB_EVENT_SCAPY_MAC_EXPIRY|Start": re.compile( - r".* INFO syncd#syncd.*SAI_API_FDB.*fdbEvent: (delete \(0\)|0) for mac 00-06-07-08-09-0A.*") + r".*syncd#syncd.*SAI_API_FDB.*fdbEvent: (delete \(0\)|0) for mac 00-06-07-08-09-0A.*") }, "MLNX": { "SYNCD_CREATE_SWITCH|Start": re.compile( diff --git a/tests/common/platform/templates/expect_boot_messages b/tests/common/platform/templates/expect_boot_messages index 1ca5986afb4..cd2dbcf5cf6 100644 --- a/tests/common/platform/templates/expect_boot_messages +++ b/tests/common/platform/templates/expect_boot_messages @@ -19,7 +19,7 @@ r, ".* NOTICE syncd#syncd.*performWarmRestart: switches defined in warm restart. r, ".* NOTICE syncd#syncd.*performWarmRestartSingleSwitch: Warm boot: create switch VID.*" r, ".* NOTICE bgp#fpmsyncd.*main: Warm-Restart timer started.*.*" r, ".* NOTICE bgp#fpmsyncd.*main: Warm-Restart reconciliation processed..*" -r, ".* INFO syncd#syncd.*SAI_API_FDB:_brcm_sai_fdb_event_cb.*fdbEvent: (delete \(0\)|0) for mac.*" +r, ".* syncd#syncd.*SAI_API_FDB:_brcm_sai_fdb_event_cb.*fdbEvent: (delete \(0\)|0) for mac.*" r, ".* NOTICE swss#orchagent.*setAgingFDB: Set switch.*fdb_aging_time 0 sec" r, ".* NOTICE swss#orchagent.*do.*Task: Set switch attribute fdb_aging_time to 600" From b02d8e9b6b6e33a202704fa689062760fa860cb4 Mon Sep 17 00:00:00 2001 From: Aaron Payment Date: Sun, 24 Nov 2024 16:39:59 -0800 Subject: [PATCH 157/175] sonic-mgmt: Assert if Arista Hwsku is not found in port_utils (#15287) Assert to catch when a new hwsku is added so that the proper get_port_alias_to_name_map can be added. Signed-off-by: Aaron Payment --- ansible/module_utils/port_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index 8f195d1fe2b..93103d8195d 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -437,9 +437,9 @@ def get_port_alias_to_name_map(hwsku, asic_name=None): # this device simulates 32 ports, with 4 as the step for port naming. for i in range(0, 32, 4): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i + elif "Arista" in hwsku and "FM" not in hwsku: + assert False, "Please add hwsku %s to port_alias_to_name_map" % hwsku else: - if "Arista-7800" in hwsku: - assert False, "Please add port_alias_to_name_map for new modular SKU %s." % hwsku for i in range(0, 128, 4): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i From 28f5fa8dde27f0d931cfe8682663f2a503234d6b Mon Sep 17 00:00:00 2001 From: ShiyanWangMS Date: Mon, 25 Nov 2024 18:33:18 +0800 Subject: [PATCH 158/175] init commit (#15722) --- ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 | 2 +- ansible/roles/eos/templates/t1-isolated-d128-tor.j2 | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) mode change 100644 => 120000 ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 create mode 120000 ansible/roles/eos/templates/t1-isolated-d128-tor.j2 diff --git a/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 b/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 deleted file mode 100644 index a60cf79c0e0..00000000000 --- a/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 +++ /dev/null @@ -1 +0,0 @@ -t0-leaf.j2 diff --git a/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 b/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 new file mode 120000 index 00000000000..8430cb1debd --- /dev/null +++ b/ansible/roles/eos/templates/t0-isolated-d128u128s2-leaf.j2 @@ -0,0 +1 @@ +t0-leaf.j2 \ No newline at end of file diff --git a/ansible/roles/eos/templates/t1-isolated-d128-tor.j2 b/ansible/roles/eos/templates/t1-isolated-d128-tor.j2 new file mode 120000 index 00000000000..86b7960d847 --- /dev/null +++ b/ansible/roles/eos/templates/t1-isolated-d128-tor.j2 @@ -0,0 +1 @@ +t1-tor.j2 \ No newline at end of file From a0e7e2d021ac9e1f5503e60ecffa03af95b0bed7 Mon Sep 17 00:00:00 2001 From: Dashuai Zhang <164845223+sdszhang@users.noreply.github.com> Date: Tue, 26 Nov 2024 00:28:56 +1100 Subject: [PATCH 159/175] [Snappi] Fixing dut/port mapping for counters (#15631) Description of PR Summary: Fixing dut port mapping when retrieving counter. Otherwise, ingress duthost maybe used to retrieve egress counters. In Snappi ports, the ingress dut and port maybe different for each port. In current code, it uses same ingress dut for both ports. dut_rx_port1 = tx_port[0]['peer_port'] dut_rx_port2 = tx_port[1]['peer_port'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] which results in the counter was retrieved on incorrect dut, and get the following error: FAILED snappi_tests/multidut/pfc/test_lossless_response_to_throttling_pause_storms.py::test_lossless_response_to_throttling_pause_storms[multidut_port_info0] ...... pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage > drop_percentage = 100 * pkt_drop / total_rx_pkts E ZeroDivisionError: division by zero Type of change Approach What is the motivation for this PR? Fixing dut port mapping when retrieving counter. co-authorized by: jianquanye@microsoft.com --- tests/common/snappi_tests/snappi_fixtures.py | 2 +- ...ss_response_to_external_pause_storms_helper.py | 14 ++++++++------ ..._response_to_throttling_pause_storms_helper.py | 14 ++++++++------ .../pfc/files/m2o_fluctuating_lossless_helper.py | 14 ++++++++------ .../files/m2o_oversubscribe_lossless_helper.py | 14 ++++++++------ .../m2o_oversubscribe_lossless_lossy_helper.py | 14 ++++++++------ .../pfc/files/m2o_oversubscribe_lossy_helper.py | 15 ++++++++------- 7 files changed, 49 insertions(+), 38 deletions(-) diff --git a/tests/common/snappi_tests/snappi_fixtures.py b/tests/common/snappi_tests/snappi_fixtures.py index 300fce365ab..816266fcd3f 100755 --- a/tests/common/snappi_tests/snappi_fixtures.py +++ b/tests/common/snappi_tests/snappi_fixtures.py @@ -1116,7 +1116,7 @@ def get_snappi_ports_single_dut(duthosts, # noqa: F811 dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|') pytest_require(rand_one_dut_hostname == dut_hostname, - "Port is not mapped to the expected DUT") + "{} Port is not mapped to the expected DUT".format(rand_one_dut_portname_oper_up)) """ Generate L1 config """ snappi_fanout = get_peer_snappi_chassis(conn_data=conn_graph_facts, diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py index 8830cbbf42f..4a86c12e257 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_external_pause_storms_helper.py @@ -82,10 +82,10 @@ def run_lossless_response_to_external_pause_storms_test(api, tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] - ingress_duthost = tx_port[0]['duthost'] # Append the ingress here for run_traffic to clear its counters - snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[0]['duthost']) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[1]['duthost']) tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set @@ -138,12 +138,14 @@ def run_lossless_response_to_external_pause_storms_test(api, snappi_extra_params=snappi_extra_params) dut_tx_port = rx_port['peer_port'] - dut_rx_port1 = tx_port[0]['peer_port'] - dut_rx_port2 = tx_port[1]['peer_port'] + ingress_dut1 = tx_port[0]['duthost'] + ingress_dut2 = tx_port[1]['duthost'] + ingress_port1 = tx_port[0]['peer_port'] + ingress_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] - rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] - rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] + rx_pkts_1 = get_interface_stats(ingress_dut1, ingress_port1)[ingress_dut1.hostname][ingress_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_dut2, ingress_port2)[ingress_dut2.hostname][ingress_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py index 15a0559ca1b..58c7bc26512 100644 --- a/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/lossless_response_to_throttling_pause_storms_helper.py @@ -88,10 +88,10 @@ def run_lossless_response_to_throttling_pause_storms_test(api, tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] - ingress_duthost = tx_port[0]['duthost'] # Append the ingress here for run_traffic to clear its counters - snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[0]['duthost']) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[1]['duthost']) tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set @@ -145,12 +145,14 @@ def run_lossless_response_to_throttling_pause_storms_test(api, snappi_extra_params=snappi_extra_params) dut_tx_port = rx_port['peer_port'] - dut_rx_port1 = tx_port[0]['peer_port'] - dut_rx_port2 = tx_port[1]['peer_port'] + ingress_dut1 = tx_port[0]['duthost'] + ingress_dut2 = tx_port[1]['duthost'] + ingress_port1 = tx_port[0]['peer_port'] + ingress_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] - rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] - rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] + rx_pkts_1 = get_interface_stats(ingress_dut1, ingress_port1)[ingress_dut1.hostname][ingress_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_dut2, ingress_port2)[ingress_dut2.hostname][ingress_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py index 8dc40c23dfd..028bb80258b 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_fluctuating_lossless_helper.py @@ -75,10 +75,10 @@ def run_m2o_fluctuating_lossless_test(api, tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] - ingress_duthost = tx_port[0]['duthost'] # Append the ingress here for run_traffic to clear its counters - snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[0]['duthost']) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[1]['duthost']) tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set @@ -127,12 +127,14 @@ def run_m2o_fluctuating_lossless_test(api, snappi_extra_params=snappi_extra_params) dut_tx_port = rx_port['peer_port'] - dut_rx_port1 = tx_port[0]['peer_port'] - dut_rx_port2 = tx_port[1]['peer_port'] + ingress_dut1 = tx_port[0]['duthost'] + ingress_dut2 = tx_port[1]['duthost'] + ingress_port1 = tx_port[0]['peer_port'] + ingress_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] - rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] - rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] + rx_pkts_1 = get_interface_stats(ingress_dut1, ingress_port1)[ingress_dut1.hostname][ingress_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_dut2, ingress_port2)[ingress_dut2.hostname][ingress_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py index f3db2766cf6..d6015fee924 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_helper.py @@ -76,10 +76,10 @@ def run_m2o_oversubscribe_lossless_test(api, tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] - ingress_duthost = tx_port[0]['duthost'] # Append the ingress here for run_traffic to clear its counters - snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[0]['duthost']) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[1]['duthost']) tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set @@ -130,12 +130,14 @@ def run_m2o_oversubscribe_lossless_test(api, snappi_extra_params=snappi_extra_params) dut_tx_port = rx_port['peer_port'] - dut_rx_port1 = tx_port[0]['peer_port'] - dut_rx_port2 = tx_port[1]['peer_port'] + ingress_dut1 = tx_port[0]['duthost'] + ingress_dut2 = tx_port[1]['duthost'] + ingress_port1 = tx_port[0]['peer_port'] + ingress_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] - rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] - rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] + rx_pkts_1 = get_interface_stats(ingress_dut1, ingress_port1)[ingress_dut1.hostname][ingress_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_dut2, ingress_port2)[ingress_dut2.hostname][ingress_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py index 5dba3c588ec..0ab1ffb53c7 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossless_lossy_helper.py @@ -83,10 +83,10 @@ def run_pfc_m2o_oversubscribe_lossless_lossy_test(api, tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] - ingress_duthost = tx_port[0]['duthost'] # Append the ingress here for run_traffic to clear its counters - snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[0]['duthost']) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[1]['duthost']) tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set @@ -134,13 +134,15 @@ def run_pfc_m2o_oversubscribe_lossless_lossy_test(api, exp_dur_sec=DATA_FLOW_DURATION_SEC + DATA_FLOW_DELAY_SEC, snappi_extra_params=snappi_extra_params) - dut_rx_port1 = tx_port[0]['peer_port'] - dut_rx_port2 = tx_port[1]['peer_port'] dut_tx_port = rx_port['peer_port'] + ingress_dut1 = tx_port[0]['duthost'] + ingress_dut2 = tx_port[1]['duthost'] + ingress_port1 = tx_port[0]['peer_port'] + ingress_port2 = tx_port[1]['peer_port'] # Fetch relevant statistics pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] - rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] - rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] + rx_pkts_1 = get_interface_stats(ingress_dut1, ingress_port1)[ingress_dut1.hostname][ingress_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_dut2, ingress_port2)[ingress_dut2.hostname][ingress_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage diff --git a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py index 3d7b37a389c..90919abb367 100644 --- a/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/m2o_oversubscribe_lossy_helper.py @@ -81,10 +81,10 @@ def run_pfc_m2o_oversubscribe_lossy_test(api, tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] - ingress_duthost = tx_port[0]['duthost'] # Append the ingress here for run_traffic to clear its counters - snappi_extra_params.multi_dut_params.ingress_duthosts.append(ingress_duthost) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[0]['duthost']) + snappi_extra_params.multi_dut_params.ingress_duthosts.append(tx_port[1]['duthost']) tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] # add ingress DUT into the set @@ -136,12 +136,13 @@ def run_pfc_m2o_oversubscribe_lossy_test(api, snappi_extra_params=snappi_extra_params) dut_tx_port = rx_port['peer_port'] - dut_rx_port1 = tx_port[0]['peer_port'] - dut_rx_port2 = tx_port[1]['peer_port'] - + ingress_dut1 = tx_port[0]['duthost'] + ingress_dut2 = tx_port[1]['duthost'] + ingress_port1 = tx_port[0]['peer_port'] + ingress_port2 = tx_port[1]['peer_port'] pkt_drop = get_interface_stats(egress_duthost, dut_tx_port)[egress_duthost.hostname][dut_tx_port]['tx_drp'] - rx_pkts_1 = get_interface_stats(ingress_duthost, dut_rx_port1)[ingress_duthost.hostname][dut_rx_port1]['rx_ok'] - rx_pkts_2 = get_interface_stats(ingress_duthost, dut_rx_port2)[ingress_duthost.hostname][dut_rx_port2]['rx_ok'] + rx_pkts_1 = get_interface_stats(ingress_dut1, ingress_port1)[ingress_dut1.hostname][ingress_port1]['rx_ok'] + rx_pkts_2 = get_interface_stats(ingress_dut2, ingress_port2)[ingress_dut2.hostname][ingress_port2]['rx_ok'] # Calculate the total received packets total_rx_pkts = rx_pkts_1 + rx_pkts_2 # Calculate the drop percentage From 37e533d1f153ba04107b2ed3eb41fbdae466f690 Mon Sep 17 00:00:00 2001 From: Kumaresh Babu JP <100332470+kbabujp@users.noreply.github.com> Date: Mon, 25 Nov 2024 21:48:21 +0530 Subject: [PATCH 160/175] Changes to support chip name change from innovium to marvell-teralynx (#14330) Adding the changes to support chip name change from Innovium to marvell-teralynx Summary: The chip name for innvium is changed to marvell-teralynx. Same need to be modified in sonic-mgmt where we do conditional checks for different scripts. This PR has dependencies on the below sonic-buildimage PR sonic-net/sonic-buildimage#19829 --- .../files/acstests/everflow_policer_test.py | 8 ++++--- .../test/files/acstests/everflow_tb_test.py | 4 ++-- tests/common/innovium_data.py | 2 -- tests/common/marvell_teralynx_data.py | 2 ++ .../tests_mark_conditions.yaml | 23 ++++++++++--------- tests/common/system_utils/docker.py | 4 ++-- tests/everflow/everflow_test_utilities.py | 4 ++-- tests/everflow/test_everflow_testbed.py | 2 +- tests/fib/test_fib.py | 2 +- tests/ipfwd/test_nhop_group.py | 6 ++--- tests/qos/test_buffer.py | 6 ++--- tests/saitests/py3/sai_qos_tests.py | 4 ++-- 12 files changed, 35 insertions(+), 32 deletions(-) delete mode 100644 tests/common/innovium_data.py create mode 100644 tests/common/marvell_teralynx_data.py diff --git a/ansible/roles/test/files/acstests/everflow_policer_test.py b/ansible/roles/test/files/acstests/everflow_policer_test.py index 09c5cc96b9f..00f611b7474 100644 --- a/ansible/roles/test/files/acstests/everflow_policer_test.py +++ b/ansible/roles/test/files/acstests/everflow_policer_test.py @@ -216,7 +216,8 @@ def checkMirroredFlow(self): if self.asic_type in ["mellanox"]: import binascii payload = binascii.unhexlify("0"*44) + str(payload) # Add the padding - elif self.asic_type in ["innovium"] or self.hwsku in ["rd98DX35xx_cn9131", "rd98DX35xx", "Nokia-7215-A1"]: + elif self.asic_type in ["marvell-teralynx"] or \ + self.hwsku in ["rd98DX35xx_cn9131", "rd98DX35xx", "Nokia-7215-A1"]: import binascii payload = binascii.unhexlify("0"*24) + str(payload) # Add the padding @@ -248,7 +249,7 @@ def checkMirroredFlow(self): masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "flags") masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") - if self.asic_type in ["innovium"]: + if self.asic_type in ["marvell-teralynx"]: masked_exp_pkt.set_do_not_care_scapy(scapy.GRE, "seqnum_present") if self.asic_type in ["marvell"]: masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "id") @@ -270,7 +271,8 @@ def match_payload(pkt): pkt = scapy.Ether(pkt).load pkt = pkt[22:] # Mask the Mellanox specific inner header pkt = scapy.Ether(pkt) - elif self.asic_type in ["innovium"] or self.hwsku in ["rd98DX35xx_cn9131", "rd98DX35xx", "Nokia-7215-A1"]: + elif self.asic_type in ["marvell-teralynx"] or \ + self.hwsku in ["rd98DX35xx_cn9131", "rd98DX35xx", "Nokia-7215-A1"]: pkt = scapy.Ether(pkt)[scapy.GRE].payload pkt_str = str(pkt) pkt = scapy.Ether(pkt_str[8:]) diff --git a/ansible/roles/test/files/acstests/everflow_tb_test.py b/ansible/roles/test/files/acstests/everflow_tb_test.py index 6cfb48dfa72..5977332873a 100644 --- a/ansible/roles/test/files/acstests/everflow_tb_test.py +++ b/ansible/roles/test/files/acstests/everflow_tb_test.py @@ -146,7 +146,7 @@ def sendReceive(self, pkt2send, src_port, destination_ports): payload = str(scapy_pkt[scapy.GRE].payload)[22:] if self.asic_type in ["barefoot"]: payload = str(scapy_pkt[scapy.GRE].payload)[12:] - if self.asic_type in ["innovium"]: + if self.asic_type in ["marvell-teralynx"]: payload = str(scapy_pkt[scapy.GRE].payload)[8:] inner_pkt = scapy.Ether(payload) @@ -270,4 +270,4 @@ def runTest(self): (tests_passed, tests_total) = self.runEverflowTests() print("Passed %d test of %d" % (tests_passed, tests_total)) - assert(tests_passed == tests_total) + assert (tests_passed == tests_total) diff --git a/tests/common/innovium_data.py b/tests/common/innovium_data.py deleted file mode 100644 index c0daa5de02b..00000000000 --- a/tests/common/innovium_data.py +++ /dev/null @@ -1,2 +0,0 @@ -def is_innovium_device(dut): - return dut.facts["asic_type"] == "innovium" diff --git a/tests/common/marvell_teralynx_data.py b/tests/common/marvell_teralynx_data.py new file mode 100644 index 00000000000..1662c3e56e0 --- /dev/null +++ b/tests/common/marvell_teralynx_data.py @@ -0,0 +1,2 @@ +def is_marvell_teralynx_device(dut): + return dut.facts["asic_type"] == "marvell-teralynx" diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 967725fa32a..4342670258e 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -53,9 +53,9 @@ arp/test_neighbor_mac_noptf.py: arp/test_unknown_mac.py: skip: - reason: "Behavior on cisco-8000 & Innovium(Marvell) platform for unknown MAC is flooding rather than DROP, hence skipping." + reason: "Behavior on cisco-8000 & (Marvell) platform for unknown MAC is flooding rather than DROP, hence skipping." conditions: - - "asic_type in ['cisco-8000','innovium']" + - "asic_type in ['cisco-8000','marvell-teralynx']" arp/test_wr_arp.py: skip: @@ -295,17 +295,17 @@ decap/test_decap.py::test_decap[ttl=pipe, dscp=pipe, vxlan=set_unset]: decap/test_decap.py::test_decap[ttl=pipe, dscp=uniform, vxlan=disable]: skip: conditions_logical_operator: or - reason: "Not supported on backend, broadcom before 202012 release, innovium platform. Skip 7260CX3 T1 topo in 202305 release" + reason: "Not supported on backend, broadcom before 202012 release, marvell-teralynx platform. Skip 7260CX3 T1 topo in 202305 release" conditions: - - "(topo_name in ['t1-backend', 't0-backend']) or (asic_type in ['broadcom'] and release in ['201811', '201911']) or asic_type in ['innovium']" + - "(topo_name in ['t1-backend', 't0-backend']) or (asic_type in ['broadcom'] and release in ['201811', '201911']) or asic_type in ['marvell-teralynx']" - "'7260CX3' in hwsku and release in ['202305'] and 't1' in topo_type" decap/test_decap.py::test_decap[ttl=pipe, dscp=uniform, vxlan=set_unset]: skip: - reason: "Not supported on backend, T2 topologies , broadcom platforms before 202012 release, innovium, x86_64-8111_32eh_o-r0 platform. Skip on mellanox dualtor setups for github issue #9646. Skip on 7260CX3 T1 topo in 202305 release" + reason: "Not supported on backend, T2 topologies , broadcom platforms before 202012 release, marvell-teralynx, x86_64-8111_32eh_o-r0 platform. Skip on mellanox dualtor setups for github issue #9646. Skip on 7260CX3 T1 topo in 202305 release" conditions_logical_operator: or conditions: - - "('t2' in topo_name) or (topo_name in ['t1-backend', 't0-backend']) or (asic_type in ['broadcom'] and release in ['201811', '201911']) or asic_type in ['innovium'] or platform in ['x86_64-8111_32eh_o-r0']" + - "('t2' in topo_name) or (topo_name in ['t1-backend', 't0-backend']) or (asic_type in ['broadcom'] and release in ['201811', '201911']) or asic_type in ['marvell-teralynx'] or platform in ['x86_64-8111_32eh_o-r0']" - "https://github.com/sonic-net/sonic-mgmt/issues/9646 and 'dualtor' in topo_name and asic_type in ['mellanox']" - "'7260CX3' in hwsku and release in ['202305'] and 't1' in topo_type" @@ -1087,11 +1087,12 @@ ip/test_ip_packet.py: ip/test_ip_packet.py::TestIPPacket::test_forward_ip_packet_with_0xffff_chksum_drop: skip: - reason: "Broadcom, Cisco, Barefoot, Innovium and Marvell Asic will tolorate IP packets with 0xffff checksum + reason: "Broadcom, Cisco, Barefoot, and Marvell Asic will tolorate IP packets with 0xffff checksum / Skipping ip packet test since can't provide enough interfaces" conditions_logical_operator: or conditions: - - "asic_type in ['broadcom', 'cisco-8000', 'marvell', 'barefoot', 'innovium'] and asic_subtype not in ['broadcom-dnx']" + + - "asic_type in ['broadcom', 'cisco-8000', 'marvell', 'barefoot', 'marvell-teralynx'] and asic_subtype not in ['broadcom-dnx']" - "len(minigraph_interfaces) < 2 and len(minigraph_portchannels) < 2" ip/test_ip_packet.py::TestIPPacket::test_forward_ip_packet_with_0xffff_chksum_tolerant: @@ -1837,7 +1838,7 @@ sub_port_interfaces: skip: reason: "Unsupported platform or asic" conditions: - - "is_multi_asic==True or asic_gen not in ['td2', 'spc1', 'spc2', 'spc3', 'spc4'] and asic_type not in ['barefoot','innovium']" + - "is_multi_asic==True or asic_gen not in ['td2', 'spc1', 'spc2', 'spc3', 'spc4'] and asic_type not in ['barefoot','marvell-teralynx']" sub_port_interfaces/test_show_subinterface.py::test_subinterface_status[port]: skip: @@ -1845,7 +1846,7 @@ sub_port_interfaces/test_show_subinterface.py::test_subinterface_status[port]: conditions_logical_operator: or conditions: - "asic_type in ['vs'] and https://github.com/sonic-net/sonic-buildimage/issues/19735" - - "is_multi_asic==True or asic_gen not in ['td2', 'spc1', 'spc2', 'spc3', 'spc4'] and asic_type not in ['barefoot','innovium']" + - "is_multi_asic==True or asic_gen not in ['td2', 'spc1', 'spc2', 'spc3', 'spc4'] and asic_type not in ['barefoot','marvell-teralynx']" sub_port_interfaces/test_show_subinterface.py::test_subinterface_status[port_in_lag]: skip: @@ -1861,7 +1862,7 @@ sub_port_interfaces/test_sub_port_interfaces.py::TestSubPorts::test_tunneling_be conditions_logical_operator: or conditions: - "asic_type=='cisco-8000'" - - "is_multi_asic==True or asic_gen not in ['td2', 'spc1', 'spc2', 'spc3', 'spc4'] and asic_type not in ['barefoot','innovium']" + - "is_multi_asic==True or asic_gen not in ['td2', 'spc1', 'spc2', 'spc3', 'spc4'] and asic_type not in ['barefoot','marvell-teralynx']" sub_port_interfaces/test_sub_port_interfaces.py::TestSubPorts::test_untagged_packet_not_routed[port_in_lag] : skip: diff --git a/tests/common/system_utils/docker.py b/tests/common/system_utils/docker.py index e6beb2c4730..dc7fdf06a35 100644 --- a/tests/common/system_utils/docker.py +++ b/tests/common/system_utils/docker.py @@ -10,7 +10,7 @@ from tests.common.mellanox_data import is_mellanox_device from tests.common.errors import RunAnsibleModuleFail from tests.common.cisco_data import is_cisco_device -from tests.common.innovium_data import is_innovium_device +from tests.common.marvell_teralynx_data import is_marvell_teralynx_device from tests.common.helpers.constants import DEFAULT_NAMESPACE logger = logging.getLogger(__name__) @@ -244,7 +244,7 @@ def _get_vendor_id(duthost): vendor_id = "mlnx" elif is_cisco_device(duthost): vendor_id = "cisco" - elif is_innovium_device(duthost): + elif is_marvell_teralynx_device(duthost): vendor_id = "invm" else: error_message = '"{}" does not currently support swap_syncd'.format(duthost.facts["asic_type"]) diff --git a/tests/everflow/everflow_test_utilities.py b/tests/everflow/everflow_test_utilities.py index 8377cb40548..fa694220fdc 100644 --- a/tests/everflow/everflow_test_utilities.py +++ b/tests/everflow/everflow_test_utilities.py @@ -862,7 +862,7 @@ def get_expected_mirror_packet(mirror_session, setup, duthost, direction, mirror else: payload = binascii.unhexlify("0" * 44) + bytes(payload) if ( - duthost.facts["asic_type"] in ["barefoot", "cisco-8000", "innovium"] + duthost.facts["asic_type"] in ["barefoot", "cisco-8000", "marvell-teralynx"] or duthost.facts.get("platform_asic") in ["broadcom-dnx"] or duthost.facts["hwsku"] in ["rd98DX35xx", "rd98DX35xx_cn9131", "Nokia-7215-A1"] @@ -893,7 +893,7 @@ def get_expected_mirror_packet(mirror_session, setup, duthost, direction, mirror if duthost.facts["asic_type"] == 'marvell': expected_packet.set_do_not_care_scapy(packet.IP, "id") expected_packet.set_do_not_care_scapy(packet.GRE, "seqnum_present") - if duthost.facts["asic_type"] in ["cisco-8000", "innovium"] or \ + if duthost.facts["asic_type"] in ["cisco-8000", "marvell-teralynx"] or \ duthost.facts.get("platform_asic") in ["broadcom-dnx"]: expected_packet.set_do_not_care_scapy(packet.GRE, "seqnum_present") diff --git a/tests/everflow/test_everflow_testbed.py b/tests/everflow/test_everflow_testbed.py index cfe3c8f109e..114f7549e91 100644 --- a/tests/everflow/test_everflow_testbed.py +++ b/tests/everflow/test_everflow_testbed.py @@ -529,7 +529,7 @@ def test_everflow_dscp_with_policer( hostvars = everflow_dut.host.options['variable_manager']._hostvars[everflow_dut.hostname] everflow_tolerance = 10 - if vendor == 'innovium': + if vendor == 'marvell-teralynx': everflow_tolerance = 11 rate_limit = 100 diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index e65b90d81e2..d98337ae71b 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -189,7 +189,7 @@ def hash_keys(duthost): hash_keys.remove('ip-proto') if 'ingress-port' in hash_keys: hash_keys.remove('ingress-port') - if duthost.facts['asic_type'] in ["innovium", "cisco-8000"]: + if duthost.facts['asic_type'] in ["marvell-teralynx", "cisco-8000"]: if 'ip-proto' in hash_keys: hash_keys.remove('ip-proto') # remove the ingress port from multi asic platform diff --git a/tests/ipfwd/test_nhop_group.py b/tests/ipfwd/test_nhop_group.py index 86a1500b685..0a801b12d1f 100644 --- a/tests/ipfwd/test_nhop_group.py +++ b/tests/ipfwd/test_nhop_group.py @@ -14,7 +14,7 @@ from tests.common.helpers.assertions import pytest_require, pytest_assert from tests.common.cisco_data import is_cisco_device from tests.common.mellanox_data import is_mellanox_device, get_chip_type -from tests.common.innovium_data import is_innovium_device +from tests.common.marvell_teralynx_data import is_marvell_teralynx_device from tests.common.vs_data import is_vs_device from tests.common.utilities import wait_until from tests.common.platform.device_utils import fanout_switch_port_lookup, toggle_one_link @@ -356,7 +356,7 @@ def test_nhop_group_member_count(duthost, tbinfo, loganalyzer): polling_interval = 1 sleep_time = 380 sleep_time_sync_before = 120 - elif is_innovium_device(duthost): + elif is_marvell_teralynx_device(duthost): default_max_nhop_paths = 3 polling_interval = 10 sleep_time = 120 @@ -414,7 +414,7 @@ def test_nhop_group_member_count(duthost, tbinfo, loganalyzer): # Consider both available nhop_grp and nhop_grp_mem before creating nhop_groups nhop_group_mem_count = int((nhop_group_mem_count) / default_max_nhop_paths * CISCO_NHOP_GROUP_FILL_PERCENTAGE) nhop_group_count = min(nhop_group_mem_count, nhop_group_count) - elif is_innovium_device(duthost): + elif is_marvell_teralynx_device(duthost): crm_stat = get_crm_info(duthost, asic) nhop_group_count = crm_stat["available_nhop_grp"] else: diff --git a/tests/qos/test_buffer.py b/tests/qos/test_buffer.py index 784054ca54a..43fc3415507 100644 --- a/tests/qos/test_buffer.py +++ b/tests/qos/test_buffer.py @@ -11,8 +11,8 @@ from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert, pytest_require from tests.common.fixtures.conn_graph_facts import conn_graph_facts # noqa F401 +from tests.common.marvell_teralynx_data import is_marvell_teralynx_device from tests.common.mellanox_data import is_mellanox_device, get_chip_type -from tests.common.innovium_data import is_innovium_device from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer from tests.common.utilities import check_qos_db_fv_reference_with_table from tests.common.utilities import skip_release @@ -332,7 +332,7 @@ def setup_module(duthosts, rand_one_dut_hostname, request): duthost = duthosts[rand_one_dut_hostname] detect_buffer_model(duthost) - if not is_mellanox_device(duthost) and not is_innovium_device(duthost): + if not is_mellanox_device(duthost) and not is_marvell_teralynx_device(duthost): load_lossless_headroom_data(duthost) yield return @@ -2929,7 +2929,7 @@ def _check_port_buffer_info_and_return(dut_db_info, table, ids, port, expected_p buffer_items_to_check_dict = { "up": buffer_table_up, "down": buffer_table_down} - if is_innovium_device(duthost): + if is_marvell_teralynx_device(duthost): buffer_items_to_check_dict["up"][KEY_2_LOSSLESS_QUEUE][3] = ( 'BUFFER_QUEUE_TABLE', '5-7', '[BUFFER_PROFILE_TABLE:egress_lossy_profile]') buffer_items_to_check_dict["down"][KEY_2_LOSSLESS_QUEUE][3] = ( diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index e7ef618eb49..aa564f20436 100755 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -3229,7 +3229,7 @@ def runTest(self): print("pkts sent: %d, lower bound: %d, actual headroom pool watermark: %d, upper_bound: %d" % ( wm_pkt_num, expected_wm, hdrm_pool_wm, upper_bound_wm), file=sys.stderr) - if 'innovium' not in self.asic_type: + if 'marvell-teralynx' not in self.asic_type: assert (expected_wm <= hdrm_pool_wm) assert (hdrm_pool_wm <= upper_bound_wm) if self.platform_asic and self.platform_asic == "broadcom-dnx": @@ -3294,7 +3294,7 @@ def runTest(self): self.src_client, self.buf_pool_roid) sys.stderr.write('After PG headroom filled, actual headroom pool watermark {}, upper_bound {}\n'.format( hdrm_pool_wm, upper_bound_wm)) - if 'innovium' not in self.asic_type: + if 'marvell-teralynx' not in self.asic_type: assert (expected_wm <= hdrm_pool_wm) assert (hdrm_pool_wm <= upper_bound_wm) # at this point headroom pool should be full. send few more packets to continue causing drops From ff2a57a26d20c92b60b024d2ffefc9124b4754aa Mon Sep 17 00:00:00 2001 From: Vasundhara Volam <163894573+vvolam@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:49:44 -0800 Subject: [PATCH 161/175] Increase interface brinup-up wait time on s6000 testbeds (#15610) What is the motivation for this PR? Primary motivation of this PR is to validate the interfaces after the reboot. How did you do it? The timeout value for FORCE10-S6000 HWSKU was increased. How did you verify/test it? Verified running tests on FORCE10-S6000 testbed. Any platform specific information? This change specifically affects the FORCE10-S6100 device configuration. --- tests/platform_tests/test_reboot.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/platform_tests/test_reboot.py b/tests/platform_tests/test_reboot.py index 6ec391d2b26..97be66e0bf8 100644 --- a/tests/platform_tests/test_reboot.py +++ b/tests/platform_tests/test_reboot.py @@ -111,6 +111,10 @@ def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, if interfaces_wait_time is None: interfaces_wait_time = MAX_WAIT_TIME_FOR_INTERFACES + # Interface bring up time is longer for FORCE10-S6000 platform + if "6000" in dut.facts['hwsku']: + interfaces_wait_time = MAX_WAIT_TIME_FOR_INTERFACES * 8 + if dut.is_supervisor_node(): logging.info("skipping interfaces related check for supervisor") else: From 96b38a79806117005535b4561da0b412552c3a63 Mon Sep 17 00:00:00 2001 From: sreejithsreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Tue, 26 Nov 2024 02:06:16 +0000 Subject: [PATCH 162/175] [Cisco] T2 ECN test: ECN comparative marking based on % of traffic (#15589) --- .../common/snappi_tests/traffic_generation.py | 16 +- .../multidut/ecn/files/multidut_helper.py | 456 ++++++++++++++++++ .../test_multidut_ecn_marking_with_snappi.py | 166 +++++++ 3 files changed, 633 insertions(+), 5 deletions(-) create mode 100644 tests/snappi_tests/multidut/ecn/test_multidut_ecn_marking_with_snappi.py diff --git a/tests/common/snappi_tests/traffic_generation.py b/tests/common/snappi_tests/traffic_generation.py index 49b21d08f35..fcd001dd78d 100644 --- a/tests/common/snappi_tests/traffic_generation.py +++ b/tests/common/snappi_tests/traffic_generation.py @@ -114,6 +114,13 @@ def generate_test_flows(testbed_config, test_flow_name_dut_rx_port_map = {} test_flow_name_dut_tx_port_map = {} + # Check if flow_rate_percent is a dictionary + if isinstance(data_flow_config["flow_rate_percent"], (int, float)): + # Create a dictionary with priorities as keys and the flow rate percent as the value for each key + data_flow_config["flow_rate_percent"] = { + prio: data_flow_config["flow_rate_percent"] for prio in test_flow_prio_list + } + for prio in test_flow_prio_list: test_flow_name = "{} Prio {}".format(data_flow_config["flow_name"], prio) test_flow = testbed_config.flows.flow(name=test_flow_name)[-1] @@ -141,7 +148,7 @@ def generate_test_flows(testbed_config, ipv4.priority.dscp.ecn.CAPABLE_TRANSPORT_1) test_flow.size.fixed = data_flow_config["flow_pkt_size"] - test_flow.rate.percentage = data_flow_config["flow_rate_percent"] + test_flow.rate.percentage = data_flow_config["flow_rate_percent"][prio] if data_flow_config["flow_traffic_type"] == traffic_flow_mode.FIXED_DURATION: test_flow.duration.fixed_seconds.seconds = data_flow_config["flow_dur_sec"] test_flow.duration.fixed_seconds.delay.nanoseconds = int(sec_to_nanosec @@ -344,10 +351,9 @@ def run_traffic(duthost, cs.state = cs.START api.set_capture_state(cs) - for host in set([*snappi_extra_params.multi_dut_params.ingress_duthosts, - *snappi_extra_params.multi_dut_params.egress_duthosts, duthost]): - clear_dut_interface_counters(host) - clear_dut_que_counters(host) + clear_dut_interface_counters(duthost) + + clear_dut_que_counters(duthost) logger.info("Starting transmit on all flows ...") ts = api.transmit_state() diff --git a/tests/snappi_tests/multidut/ecn/files/multidut_helper.py b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py index 76c27031316..f1779bb2461 100644 --- a/tests/snappi_tests/multidut/ecn/files/multidut_helper.py +++ b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py @@ -1,4 +1,5 @@ import logging +import time from tests.common.helpers.assertions import pytest_assert from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts # noqa: F401 from tests.common.snappi_tests.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port, \ @@ -11,6 +12,7 @@ from tests.common.snappi_tests.snappi_test_params import SnappiTestParams from tests.common.snappi_tests.traffic_generation import setup_base_traffic_config, generate_test_flows, \ generate_pause_flows, run_traffic # noqa: F401 +import json logger = logging.getLogger(__name__) @@ -21,6 +23,132 @@ DATA_FLOW_NAME = 'Data Flow' +def get_npu_voq_queue_counters(duthost, interface, priority): + + asic_namespace_string = "" + if duthost.is_multi_asic: + asic = duthost.get_port_asic_instance(interface) + asic_namespace_string = " -n " + asic.namespace + + full_line = "".join(duthost.shell( + "show platform npu voq queue_counters -t {} -i {} -d{}". + format(priority, interface, asic_namespace_string))['stdout_lines']) + dict_output = json.loads(full_line) + for entry, value in zip(dict_output['stats_name'], dict_output['counters']): + dict_output[entry] = value + + return dict_output + + +def verify_ecn_counters(ecn_counters, link_state_toggled=False): + + toggle_msg = " post link state toggle" if link_state_toggled else "" + # verify that each flow had packets + init_ctr_3, post_ctr_3 = ecn_counters[0] + init_ctr_4, post_ctr_4 = ecn_counters[1] + flow3_total = post_ctr_3['SAI_QUEUE_STAT_PACKETS'] - init_ctr_3['SAI_QUEUE_STAT_PACKETS'] + + pytest_assert(flow3_total > 0, + 'Queue 3 counters at start {} at end {} did not increment{}'.format( + init_ctr_3['SAI_QUEUE_STAT_PACKETS'], post_ctr_3['SAI_QUEUE_STAT_PACKETS'], toggle_msg)) + + flow4_total = post_ctr_4['SAI_QUEUE_STAT_PACKETS'] - init_ctr_4['SAI_QUEUE_STAT_PACKETS'] + + pytest_assert(flow4_total > 0, + 'Queue 4 counters at start {} at end {} did not increment{}'.format( + init_ctr_4['SAI_QUEUE_STAT_PACKETS'], post_ctr_4['SAI_QUEUE_STAT_PACKETS'], toggle_msg)) + + flow3_ecn = post_ctr_3['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] -\ + init_ctr_3['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] + flow4_ecn = post_ctr_4['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] -\ + init_ctr_4['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] + + pytest_assert(flow3_ecn > 0, + 'Must have ecn marked packets on flow 3{}'. + format(toggle_msg)) + + pytest_assert(flow4_ecn > 0, + 'Must have ecn marked packets on flow 4{}'. + format(toggle_msg)) + + +def verify_ecn_counters_for_flow_percent(ecn_counters, test_flow_percent): + + # verify that each flow had packets + init_ctr_3, post_ctr_3 = ecn_counters[0] + init_ctr_4, post_ctr_4 = ecn_counters[1] + flow3_total = post_ctr_3['SAI_QUEUE_STAT_PACKETS'] - init_ctr_3['SAI_QUEUE_STAT_PACKETS'] + + drop_ctr_3 = post_ctr_3['SAI_QUEUE_STAT_DROPPED_PACKETS'] -\ + init_ctr_3['SAI_QUEUE_STAT_DROPPED_PACKETS'] + wred_drop_ctr_3 = post_ctr_3['SAI_QUEUE_STAT_WRED_DROPPED_PACKETS'] -\ + init_ctr_3['SAI_QUEUE_STAT_WRED_DROPPED_PACKETS'] + + drop_ctr_4 = post_ctr_4['SAI_QUEUE_STAT_DROPPED_PACKETS'] -\ + init_ctr_4['SAI_QUEUE_STAT_DROPPED_PACKETS'] + wred_drop_ctr_4 = post_ctr_4['SAI_QUEUE_STAT_WRED_DROPPED_PACKETS'] -\ + init_ctr_4['SAI_QUEUE_STAT_WRED_DROPPED_PACKETS'] + + pytest_assert(drop_ctr_3 == 0 and wred_drop_ctr_3 == 0, 'Queue 3 Drop not expected') + + pytest_assert(drop_ctr_4 == 0 and wred_drop_ctr_4 == 0, 'Queue 4 Drop not expected') + + pytest_assert(flow3_total > 0, + 'Queue 3 counters at start {} at end {} did not increment'.format( + init_ctr_3['SAI_QUEUE_STAT_PACKETS'], post_ctr_3['SAI_QUEUE_STAT_PACKETS'])) + + flow4_total = post_ctr_4['SAI_QUEUE_STAT_PACKETS'] - init_ctr_4['SAI_QUEUE_STAT_PACKETS'] + + pytest_assert(flow4_total > 0, + 'Queue 4 counters at start {} at end {} did not increment'.format( + init_ctr_4['SAI_QUEUE_STAT_PACKETS'], post_ctr_4['SAI_QUEUE_STAT_PACKETS'])) + + flow3_ecn = post_ctr_3['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] -\ + init_ctr_3['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] + flow4_ecn = post_ctr_4['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] -\ + init_ctr_4['SAI_QUEUE_STAT_WRED_ECN_MARKED_PACKETS'] + + if sum(test_flow_percent) < 100: + pytest_assert( + flow3_ecn == 0, + 'Must have no ecn marked packets on flow 3 without congestion, percent {}'. + format(test_flow_percent)) + pytest_assert( + flow4_ecn == 0, + 'Must have no ecn marked packets on flow 4 without congestion, percent {}'. + format(test_flow_percent)) + elif sum(test_flow_percent) >= 100: + if test_flow_percent[0] > 50: + pytest_assert( + flow3_ecn > 0, + 'Must have ecn marked packets on flow 3, percent {}'. + format(test_flow_percent)) + + if test_flow_percent[1] > 50: + pytest_assert( + flow4_ecn > 0, + 'Must have ecn marked packets on flow 4, percent {}'. + format(test_flow_percent)) + + if test_flow_percent[0] < 50: + pytest_assert( + flow3_ecn == 0, + 'Must not have ecn marked packets on flow 3, percent {}'. + format(test_flow_percent)) + + if test_flow_percent[1] < 50: + pytest_assert( + flow4_ecn == 0, + 'Must not have ecn marked packets on flow 4, percent {}'. + format(test_flow_percent)) + + if test_flow_percent[0] == 50 and test_flow_percent[1] == 50: + pytest_assert( + flow3_ecn > 0 and flow4_ecn > 0, + 'Must have ecn marked packets on flows 3, 4, percent {}'. + format(test_flow_percent)) + + def run_ecn_test(api, testbed_config, port_config_list, @@ -181,3 +309,331 @@ def run_ecn_test(api, result.append(get_ipv4_pkts(snappi_extra_params.packet_capture_file + ".pcapng")) return result + + +def toggle_dut_port_state(api): + # Get the current configuration + config = api.get_config() + # Collect all port names + port_names = [port.name for port in config.ports] + # Create a link state object for all ports + link_state = api.link_state() + # Apply the state to all ports + link_state.port_names = port_names + # Set all ports down (shut) + link_state.state = link_state.DOWN + api.set_link_state(link_state) + logger.info("All Snappi ports are set to DOWN") + time.sleep(0.2) + # Unshut all ports + link_state.state = link_state.UP + api.set_link_state(link_state) + logger.info("All Snappi ports are set to UP") + + +def run_ecn_marking_port_toggle_test( + api, + testbed_config, + port_config_list, + dut_port, + test_prio_list, + prio_dscp_map, + snappi_extra_params=None): + + """ + Run a ECN test + Args: + api (obj): snappi session + testbed_config (obj): testbed L1/L2/L3 configuration + port_config_list (list): list of port configuration + conn_data (dict): the dictionary returned by conn_graph_fact. + fanout_data (dict): the dictionary returned by fanout_graph_fact. + dut_port (str): DUT port to test + test_prio_list (list): priorities of test flows + prio_dscp_map (dict): Priority vs. DSCP map (key = priority). + snappi_extra_params (SnappiTestParams obj): additional parameters for Snappi traffic + Returns: + N/A + """ + + pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config') + pytest_assert(len(test_prio_list) >= 2, 'Must have atleast two lossless priorities') + + test_flow_percent = [99.98] * len(test_prio_list) + + TEST_FLOW_NAME = ['Test Flow 3', 'Test Flow 4'] + DATA_FLOW_PKT_SIZE = 1350 + DATA_FLOW_DURATION_SEC = 2 + DATA_FLOW_DELAY_SEC = 1 + + if snappi_extra_params is None: + snappi_extra_params = SnappiTestParams() + + # Traffic flow: + # tx_port (TGEN) --- ingress DUT --- egress DUT --- rx_port (TGEN) + + rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] + egress_duthost = rx_port['duthost'] + + tx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[1] + ingress_duthost = tx_port['duthost'] + + pytest_assert(testbed_config is not None, 'Failed to get L2/3 testbed config') + + logger.info("Stopping PFC watchdog") + stop_pfcwd(egress_duthost, rx_port['asic_value']) + stop_pfcwd(ingress_duthost, tx_port['asic_value']) + logger.info("Disabling packet aging if necessary") + disable_packet_aging(egress_duthost) + disable_packet_aging(ingress_duthost) + + duthost = egress_duthost + + init_ctr_3 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[0]) + init_ctr_4 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[1]) + + port_id = 0 + # Generate base traffic config + base_flow_config1 = setup_base_traffic_config(testbed_config=testbed_config, + port_config_list=port_config_list, + port_id=port_id) + port_config_list2 = [x for x in port_config_list if x != base_flow_config1['tx_port_config']] + base_flow_config2 = setup_base_traffic_config(testbed_config=testbed_config, + port_config_list=port_config_list2, + port_id=port_id) + + # Create a dictionary with priorities as keys and flow rates as values + flow_rate_dict = { + prio: round(flow / len(test_prio_list), 2) for prio, flow in zip(test_prio_list, test_flow_percent) + } + + snappi_extra_params.base_flow_config = base_flow_config1 + + # Set default traffic flow configs if not set + if snappi_extra_params.traffic_flow_config.data_flow_config is None: + snappi_extra_params.traffic_flow_config.data_flow_config = { + "flow_name": TEST_FLOW_NAME[0], + "flow_dur_sec": DATA_FLOW_DURATION_SEC, + "flow_rate_percent": flow_rate_dict, + "flow_rate_pps": None, + "flow_rate_bps": None, + "flow_pkt_size": DATA_FLOW_PKT_SIZE, + "flow_pkt_count": None, + "flow_delay_sec": DATA_FLOW_DELAY_SEC, + "flow_traffic_type": traffic_flow_mode.FIXED_DURATION + } + + generate_test_flows(testbed_config=testbed_config, + test_flow_prio_list=test_prio_list, + prio_dscp_map=prio_dscp_map, + snappi_extra_params=snappi_extra_params) + + snappi_extra_params.base_flow_config = base_flow_config2 + + snappi_extra_params.traffic_flow_config.data_flow_config = { + "flow_name": TEST_FLOW_NAME[1], + "flow_dur_sec": DATA_FLOW_DURATION_SEC, + "flow_rate_percent": flow_rate_dict, + "flow_rate_pps": None, + "flow_rate_bps": None, + "flow_pkt_size": DATA_FLOW_PKT_SIZE, + "flow_pkt_count": None, + "flow_delay_sec": DATA_FLOW_DELAY_SEC, + "flow_traffic_type": traffic_flow_mode.FIXED_DURATION + } + generate_test_flows(testbed_config=testbed_config, + test_flow_prio_list=test_prio_list, + prio_dscp_map=prio_dscp_map, + snappi_extra_params=snappi_extra_params) + + flows = testbed_config.flows + + all_flow_names = [flow.name for flow in flows] + data_flow_names = [flow.name for flow in flows if PAUSE_FLOW_NAME not in flow.name] + + # Clear PFC and queue counters before traffic run + duthost.command("sonic-clear pfccounters") + duthost.command("sonic-clear queuecounters") + + """ Run traffic """ + _tgen_flow_stats, _switch_flow_stats, _in_flight_flow_metrics = run_traffic( + duthost, + api=api, + config=testbed_config, + data_flow_names=data_flow_names, + all_flow_names=all_flow_names, + exp_dur_sec=DATA_FLOW_DURATION_SEC + + DATA_FLOW_DELAY_SEC, + snappi_extra_params=snappi_extra_params) + + post_ctr_3 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[0]) + post_ctr_4 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[1]) + + ecn_counters = [ + (init_ctr_3, post_ctr_3), + (init_ctr_4, post_ctr_4) + ] + + verify_ecn_counters(ecn_counters) + + toggle_dut_port_state(api) + + init_ctr_3 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[0]) + init_ctr_4 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[1]) + + """ Run traffic """ + _tgen_flow_stats, _switch_flow_stats, _in_flight_flow_metrics = run_traffic( + duthost, + api=api, + config=testbed_config, + data_flow_names=data_flow_names, + all_flow_names=all_flow_names, + exp_dur_sec=DATA_FLOW_DURATION_SEC + + DATA_FLOW_DELAY_SEC, + snappi_extra_params=snappi_extra_params) + + post_ctr_3 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[0]) + post_ctr_4 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[1]) + + ecn_counters = [ + (init_ctr_3, post_ctr_3), + (init_ctr_4, post_ctr_4) + ] + + verify_ecn_counters(ecn_counters, link_state_toggled=True) + + +def run_ecn_marking_test(api, + testbed_config, + port_config_list, + dut_port, + test_prio_list, + prio_dscp_map, + test_flow_percent, + snappi_extra_params=None): + + """ + Run a ECN test + Args: + api (obj): snappi session + testbed_config (obj): testbed L1/L2/L3 configuration + port_config_list (list): list of port configuration + conn_data (dict): the dictionary returned by conn_graph_fact. + fanout_data (dict): the dictionary returned by fanout_graph_fact. + dut_port (str): DUT port to test + test_prio_list (list): priorities of test flows + prio_dscp_map (dict): Priority vs. DSCP map (key = priority). + snappi_extra_params (SnappiTestParams obj): additional parameters for Snappi traffic + + Returns: + N/A + """ + + pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config') + pytest_assert(len(test_prio_list) >= 2, 'Must have atleast two lossless priorities') + + pytest_assert(len(test_flow_percent) == len(test_prio_list), + "The length of test_flow_percent must match the length of test_prio_list") + + TEST_FLOW_NAME = ['Test Flow 3', 'Test Flow 4'] + DATA_FLOW_PKT_SIZE = 1350 + DATA_FLOW_DURATION_SEC = 2 + DATA_FLOW_DELAY_SEC = 1 + + if snappi_extra_params is None: + snappi_extra_params = SnappiTestParams() + + # Traffic flow: + # tx_port (TGEN) --- ingress DUT --- egress DUT --- rx_port (TGEN) + + rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] + egress_duthost = rx_port['duthost'] + + duthost = egress_duthost + + init_ctr_3 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[0]) + init_ctr_4 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[1]) + + port_id = 0 + # Generate base traffic config + base_flow_config1 = setup_base_traffic_config(testbed_config=testbed_config, + port_config_list=port_config_list, + port_id=port_id) + port_config_list2 = [x for x in port_config_list if x != base_flow_config1['tx_port_config']] + base_flow_config2 = setup_base_traffic_config(testbed_config=testbed_config, + port_config_list=port_config_list2, + port_id=port_id) + + # Create a dictionary with priorities as keys and flow rates as values + flow_rate_dict = { + prio: round(flow / len(test_prio_list), 2) for prio, flow in zip(test_prio_list, test_flow_percent) + } + + snappi_extra_params.base_flow_config = base_flow_config1 + + # Set default traffic flow configs if not set + if snappi_extra_params.traffic_flow_config.data_flow_config is None: + snappi_extra_params.traffic_flow_config.data_flow_config = { + "flow_name": TEST_FLOW_NAME[0], + "flow_dur_sec": DATA_FLOW_DURATION_SEC, + "flow_rate_percent": flow_rate_dict, + "flow_rate_pps": None, + "flow_rate_bps": None, + "flow_pkt_size": DATA_FLOW_PKT_SIZE, + "flow_pkt_count": None, + "flow_delay_sec": DATA_FLOW_DELAY_SEC, + "flow_traffic_type": traffic_flow_mode.FIXED_DURATION + } + + generate_test_flows(testbed_config=testbed_config, + test_flow_prio_list=test_prio_list, + prio_dscp_map=prio_dscp_map, + snappi_extra_params=snappi_extra_params) + + snappi_extra_params.base_flow_config = base_flow_config2 + + snappi_extra_params.traffic_flow_config.data_flow_config = { + "flow_name": TEST_FLOW_NAME[1], + "flow_dur_sec": DATA_FLOW_DURATION_SEC, + "flow_rate_percent": flow_rate_dict, + "flow_rate_pps": None, + "flow_rate_bps": None, + "flow_pkt_size": DATA_FLOW_PKT_SIZE, + "flow_pkt_count": None, + "flow_delay_sec": DATA_FLOW_DELAY_SEC, + "flow_traffic_type": traffic_flow_mode.FIXED_DURATION + } + generate_test_flows(testbed_config=testbed_config, + test_flow_prio_list=test_prio_list, + prio_dscp_map=prio_dscp_map, + snappi_extra_params=snappi_extra_params) + + flows = testbed_config.flows + + all_flow_names = [flow.name for flow in flows] + data_flow_names = [flow.name for flow in flows if PAUSE_FLOW_NAME not in flow.name] + + # Clear PFC and queue counters before traffic run + duthost.command("sonic-clear pfccounters") + duthost.command("sonic-clear queuecounters") + + """ Run traffic """ + _tgen_flow_stats, _switch_flow_stats, _in_flight_flow_metrics = run_traffic( + duthost, + api=api, + config=testbed_config, + data_flow_names=data_flow_names, + all_flow_names=all_flow_names, + exp_dur_sec=DATA_FLOW_DURATION_SEC + + DATA_FLOW_DELAY_SEC, + snappi_extra_params=snappi_extra_params) + + post_ctr_3 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[0]) + post_ctr_4 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[1]) + + ecn_counters = [ + (init_ctr_3, post_ctr_3), + (init_ctr_4, post_ctr_4) + ] + + verify_ecn_counters_for_flow_percent(ecn_counters, test_flow_percent) diff --git a/tests/snappi_tests/multidut/ecn/test_multidut_ecn_marking_with_snappi.py b/tests/snappi_tests/multidut/ecn/test_multidut_ecn_marking_with_snappi.py new file mode 100644 index 00000000000..425476e3af9 --- /dev/null +++ b/tests/snappi_tests/multidut/ecn/test_multidut_ecn_marking_with_snappi.py @@ -0,0 +1,166 @@ +import pytest +import logging +from tabulate import tabulate # noqa F401 +from tests.common.helpers.assertions import pytest_assert # noqa: F401 +from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts_multidut # noqa: F401 +from tests.common.snappi_tests.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port, \ + snappi_api, snappi_dut_base_config, get_snappi_ports, get_snappi_ports_for_rdma, cleanup_config, \ + is_snappi_multidut, get_snappi_ports_multi_dut # noqa: F401 +from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, \ + lossless_prio_list, disable_pfcwd # noqa F401 +from tests.snappi_tests.files.helper import multidut_port_info, setup_ports_and_dut # noqa: F401 +from tests.snappi_tests.multidut.ecn.files.multidut_helper import run_ecn_marking_test, run_ecn_marking_port_toggle_test +from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.common.cisco_data import is_cisco_device +logger = logging.getLogger(__name__) +pytestmark = [pytest.mark.topology('multidut-tgen')] + + +def validate_snappi_ports(snappi_ports): + + if not is_cisco_device(snappi_ports[0]['duthost']): + return True + + ''' + One ingress port and the egress port should be on the same DUT and asic. + The second ingress port can be on diff asic or DUT. + This is needed to avoid tail drops caused by use of default voq in case + both the BP ports of egress port are on the same slice + + All ingress and egress port on the same DUT and asic is fine. + ''' + + # Extract duthost and peer_port values for rx_dut and tx_dut configurations + rx_dut = snappi_ports[0]['duthost'] + rx_peer_port = snappi_ports[0]['peer_port'] + tx_dut_1 = snappi_ports[1]['duthost'] + tx_peer_port_1 = snappi_ports[1]['peer_port'] + tx_dut_2 = snappi_ports[2]['duthost'] + tx_peer_port_2 = snappi_ports[2]['peer_port'] + + # get the ASIC namespace for a given duthost and peer_port + def get_asic(duthost, peer_port): + return duthost.get_port_asic_instance(peer_port).namespace + + # Retrieve ASIC namespace + rx_asic = get_asic(rx_dut, rx_peer_port) + tx_asic_1 = get_asic(tx_dut_1, tx_peer_port_1) + tx_asic_2 = get_asic(tx_dut_2, tx_peer_port_2) + + # Check if all duthosts and their ASICs are the same + if (rx_dut == tx_dut_1 == tx_dut_2) and (rx_asic == tx_asic_1 == tx_asic_2): + return True + + # Check if rx_dut and its ASIC matches either of the tx_dut and their ASIC + if (rx_dut == tx_dut_1 and rx_asic == tx_asic_1) or (rx_dut == tx_dut_2 and rx_asic == tx_asic_2): + return True + + return False + + +@pytest.fixture(autouse=True) +def number_of_tx_rx_ports(): + yield (2, 1) + + +def test_ecn_marking_port_toggle( + snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts_multidut, # noqa: F811 + duthosts, + lossless_prio_list, # noqa: F811 + get_snappi_ports, # noqa: F811 + tbinfo, # noqa: F811 + disable_pfcwd, # noqa: F811 + setup_ports_and_dut, # noqa: F811 + prio_dscp_map): # noqa: F811 + """ + Verify ECN marking both pre and post port shut/no shut toggle + Args: + request (pytest fixture): pytest request object + snappi_api (pytest fixture): SNAPPI session + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + lossless_prio_list (pytest fixture): list of all the lossless priorities + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + tbinfo (pytest fixture): fixture provides information about testbed + get_snappi_ports (pytest fixture): gets snappi ports and connected DUT port info and returns as a list + Returns: + N/A + """ + + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut + + logger.info("Snappi Ports : {}".format(snappi_ports)) + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + + try: + run_ecn_marking_port_toggle_test( + api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + dut_port=snappi_ports[0]['peer_port'], + test_prio_list=lossless_prio_list, + prio_dscp_map=prio_dscp_map, + snappi_extra_params=snappi_extra_params) + finally: + cleanup_config(duthosts, snappi_ports) + + +test_flow_percent_list = [[90, 15], [53, 49], [15, 90], [49, 49], [50, 50]] + + +@pytest.mark.parametrize("test_flow_percent", test_flow_percent_list) +def test_ecn_marking_lossless_prio( + snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts_multidut, # noqa: F811 + duthosts, + lossless_prio_list, # noqa: F811 + get_snappi_ports, # noqa: F811 + tbinfo, # noqa: F811 + disable_pfcwd, # noqa: F811 + test_flow_percent, + prio_dscp_map, # noqa: F811 + setup_ports_and_dut): # noqa: F811 + """ + Verify ECN marking on lossless prio with same DWRR weight + + Args: + request (pytest fixture): pytest request object + snappi_api (pytest fixture): SNAPPI session + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + lossless_prio_list (pytest fixture): list of all the lossless priorities + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + tbinfo (pytest fixture): fixture provides information about testbed + test_flow_percent: Percentage of flow rate used for the two lossless prio + get_snappi_ports (pytest fixture): gets snappi ports and connected DUT port info and returns as a list + Returns: + N/A + """ + + testbed_config, port_config_list, snappi_ports = setup_ports_and_dut + + pytest_assert(validate_snappi_ports(snappi_ports), "Invalid combination of duthosts or ASICs in snappi_ports") + + logger.info("Snappi Ports : {}".format(snappi_ports)) + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + + try: + run_ecn_marking_test( + api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + dut_port=snappi_ports[0]['peer_port'], + test_prio_list=lossless_prio_list, + prio_dscp_map=prio_dscp_map, + test_flow_percent=test_flow_percent, + snappi_extra_params=snappi_extra_params) + finally: + cleanup_config(duthosts, snappi_ports) From aa1d42a81941fda710310a4b46a33c6c71684741 Mon Sep 17 00:00:00 2001 From: Zhijian Li Date: Mon, 25 Nov 2024 18:12:49 -0800 Subject: [PATCH 163/175] [Marvell] SKip RX_DRP check in test_drop_l3_ip_packet_non_dut_mac (#15638) What is the motivation for this PR? Skip RX_DRP check in test_drop_l3_ip_packet_non_dut_mac, current ASIC behavior can't support RX_DRP counter. How did you do it? Skip RX_DRP check in test_drop_l3_ip_packet_non_dut_mac, How did you verify/test it? Verified on Nokia-7215 M0 testbed: ip/test_ip_packet.py::TestIPPacket::test_drop_l3_ip_packet_non_dut_mac[7215-6] PASSED [100%] ===================================================== 1 passed, 1 warning in 442.12s (0:07:22) ====================================================== --- tests/ip/test_ip_packet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ip/test_ip_packet.py b/tests/ip/test_ip_packet.py index ab47b2cc1f7..867a6a837f3 100644 --- a/tests/ip/test_ip_packet.py +++ b/tests/ip/test_ip_packet.py @@ -741,7 +741,7 @@ def test_drop_l3_ip_packet_non_dut_mac(self, duthosts, enum_rand_one_per_hwsku_f "Received {} packets in rx, not in expected range".format(rx_ok)) asic_type = duthost.facts["asic_type"] # Packet is dropped silently on Mellanox platform if the destination MAC address is not the router MAC - if asic_type not in ["mellanox"]: + if asic_type not in ["mellanox", "marvell"]: pytest_assert(rx_drp >= self.PKT_NUM_MIN, "Dropped {} packets in rx, not in expected range".format(rx_drp)) pytest_assert(tx_ok <= self.PKT_NUM_ZERO, From 399a1b62b031fdca43fdce12648fdccc32bf72e8 Mon Sep 17 00:00:00 2001 From: ansrajpu-git <113939367+ansrajpu-git@users.noreply.github.com> Date: Mon, 25 Nov 2024 22:46:17 -0500 Subject: [PATCH 164/175] [CHASSIS][Voq][QoS]Increasing LACP timer for lag ports for broadcom-dnx neighbor EOS host (#14469) escription of PR Intermittently testQosSaiLossyQueue tests fails due to Port-channel flap on broadcom-dnx T2 Voq chassis. The reason the port-channel goes down is because this test requires disabling TX on the egress port (which is a member of a port-channel) With the huge buffer-size, it takes a longer time to send packets . This will result in the TX LACP packets to stop egressing, so after 3 LACP packets are missed (~90s) on the server side the LAG is torn down. Issue # #11682 Summary: Fixes # (issue) What is the motivation for this PR? Intermittently testQosSaiLossyQueue tests fails due to Port-channel flap How did you do it? The lacp timer multiplier on the EOS host is configurable. By default, timeout is 30 secs with a failure tolerance of 3. We changed the multiplier to an increased value to hold the connectivity for some time until all packets are sent. And revert the changes after test case execution. How did you verify/test it? Executed qos test cases and verfiy the results. --- tests/common/devices/eos.py | 18 ++++++++++++++ tests/qos/qos_sai_base.py | 47 +++++++++++++++++++++++++++++++++++++ tests/qos/test_qos_sai.py | 12 +++++----- 3 files changed, 71 insertions(+), 6 deletions(-) diff --git a/tests/common/devices/eos.py b/tests/common/devices/eos.py index 35f28ab3e85..e2ce0bb06dc 100644 --- a/tests/common/devices/eos.py +++ b/tests/common/devices/eos.py @@ -556,3 +556,21 @@ def no_isis_metric(self, interface): lines=['no isis metric'], parents=['interface {}'.format(interface)]) return not self._has_cli_cmd_failed(out) + + def set_interface_lacp_time_multiplier(self, interface_name, multiplier): + out = self.eos_config( + lines=['lacp timer multiplier %d' % multiplier], + parents='interface %s' % interface_name) + + if out['failed'] is True or out['changed'] is False: + logging.warning("Unable to set interface [%s] lacp timer multiplier to [%d]" % (interface_name, multiplier)) + else: + logging.info("Set interface [%s] lacp timer to [%d]" % (interface_name, multiplier)) + return out + + def no_lacp_time_multiplier(self, interface_name): + out = self.eos_config( + lines=['no lacp timer multiplier'], + parents=['interface {}'.format(interface_name)]) + logging.info('Reset lacp timer to default for interface [%s]' % interface_name) + return out diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 574dbc3c2a9..d5ba38e9218 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -27,6 +27,7 @@ from tests.common.system_utils import docker # noqa F401 from tests.common.errors import RunAnsibleModuleFail from tests.common import config_reload +from tests.common.devices.eos import EosHost logger = logging.getLogger(__name__) @@ -2577,3 +2578,49 @@ def isLonglink(self, dut_host): if cable_length >= 120000: return True return False + + @pytest.fixture(scope="function", autouse=False) + def change_lag_lacp_timer(self, duthosts, get_src_dst_asic_and_duts, tbinfo, nbrhosts, dutConfig, dutTestParams, + request): + if request.config.getoption("--neighbor_type") == "sonic": + yield + return + + if ('platform_asic' in dutTestParams["basicParams"] and + dutTestParams["basicParams"]["platform_asic"] == "broadcom-dnx"): + src_dut = get_src_dst_asic_and_duts['src_dut'] + dst_dut = get_src_dst_asic_and_duts['dst_dut'] + if src_dut.sonichost.is_multi_asic and dst_dut.sonichost.is_multi_asic: + dst_mgfacts = dst_dut.get_extended_minigraph_facts(tbinfo) + dst_port_id = dutConfig['testPorts']['dst_port_id'] + dst_interface = dutConfig['dutInterfaces'][dst_port_id] + lag_name = '' + for port_ch, port_intf in dst_mgfacts['minigraph_portchannels'].items(): + if dst_interface in port_intf['members']: + lag_name = port_ch + break + if lag_name == '': + yield + return + lag_facts = dst_dut.lag_facts(host=dst_dut.hostname)['ansible_facts']['lag_facts'] + po_interfaces = lag_facts['lags'][lag_name]['po_config']['ports'] + vm_neighbors = dst_mgfacts['minigraph_neighbors'] + neighbor_lag_intfs = [vm_neighbors[po_intf]['port'] for po_intf in po_interfaces] + neigh_intf = next(iter(po_interfaces.keys())) + peer_device = vm_neighbors[neigh_intf]['name'] + vm_host = nbrhosts[peer_device]['host'] + num = 600 + for neighbor_lag_member in neighbor_lag_intfs: + logger.info( + "Changing lacp timer multiplier to 600 for %s in %s" % (neighbor_lag_member, peer_device)) + if isinstance(vm_host, EosHost): + vm_host.set_interface_lacp_time_multiplier(neighbor_lag_member, num) + + yield + if ('platform_asic' in dutTestParams["basicParams"] and + dutTestParams["basicParams"]["platform_asic"] == "broadcom-dnx"): + if src_dut.sonichost.is_multi_asic and dst_dut.sonichost.is_multi_asic: + for neighbor_lag_member in neighbor_lag_intfs: + logger.info( + "Changing lacp timer multiplier to default for %s in %s" % (neighbor_lag_member, peer_device)) + vm_host.no_lacp_time_multiplier(neighbor_lag_member) diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 3463fc09800..9c3ff343493 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -69,7 +69,7 @@ def ignore_expected_loganalyzer_exception(get_src_dst_asic_and_duts, loganalyzer # The following error log is related to the bug of https://github.com/sonic-net/sonic-buildimage/issues/13265 ".*ERR lldp[0-9]*#lldpmgrd.*Command failed.*lldpcli.*configure.*ports.*unable to connect to socket.*", ".*ERR lldp[0-9]*#lldpmgrd.*Command failed.*lldpcli.*configure.*ports.*lldp.*unknown command from argument" - ".*configure.*command was failed.*times, disabling retry.*" + ".*configure.*command was failed.*times, disabling retry.*", # Error related to syncd socket-timeout intermittenly ".*ERR syncd[0-9]*#dsserve: _ds2tty broken pipe.*" ] @@ -325,7 +325,7 @@ def testParameter( def testQosSaiPfcXoffLimit( self, xoffProfile, duthosts, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig, dutQosConfig, - ingressLosslessProfile, egressLosslessProfile + ingressLosslessProfile, egressLosslessProfile, change_lag_lacp_timer ): # NOTE: this test will be skipped for t2 cisco 8800 if it's not xoff_1 or xoff_2 """ @@ -1147,7 +1147,7 @@ def testQosSaiBufferPoolWatermark( def testQosSaiLossyQueue( self, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, - ingressLossyProfile, skip_src_dst_different_asic + ingressLossyProfile, skip_src_dst_different_asic, change_lag_lacp_timer ): """ Test QoS SAI Lossy queue, shared buffer dynamic allocation @@ -1591,7 +1591,7 @@ def testQosSaiDwrr( @pytest.mark.parametrize("pgProfile", ["wm_pg_shared_lossless", "wm_pg_shared_lossy"]) def testQosSaiPgSharedWatermark( self, pgProfile, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, - resetWatermark, _skip_watermark_multi_DUT, skip_src_dst_different_asic + resetWatermark, _skip_watermark_multi_DUT, skip_src_dst_different_asic, change_lag_lacp_timer ): """ Test QoS SAI PG shared watermark test for lossless/lossy traffic @@ -1683,7 +1683,7 @@ def testQosSaiPgSharedWatermark( def testQosSaiPgHeadroomWatermark( self, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, resetWatermark, - ): + change_lag_lacp_timer): """ Test QoS SAI PG headroom watermark test @@ -1793,7 +1793,7 @@ def testQosSaiPGDrop( @pytest.mark.parametrize("queueProfile", ["wm_q_shared_lossless", "wm_q_shared_lossy"]) def testQosSaiQSharedWatermark( self, get_src_dst_asic_and_duts, queueProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, - resetWatermark, _skip_watermark_multi_DUT, skip_pacific_dst_asic + resetWatermark, _skip_watermark_multi_DUT, skip_pacific_dst_asic, change_lag_lacp_timer ): """ Test QoS SAI Queue shared watermark test for lossless/lossy traffic From 150435ff85f63c9a8b3d26384870b67017784829 Mon Sep 17 00:00:00 2001 From: HP Date: Mon, 25 Nov 2024 20:19:59 -0800 Subject: [PATCH 165/175] Ignore SAI switch register read and write not handled logs (#15737) Description of PR This PR relaxes the loganalyzer ignore rules to ignore all "SAI_SWITCH_ATTR_REGISTER_WRITE is not handled" and "SAI_SWITCH_ATTR_REGISTER_READ is not handled" errors. Summary: Fixes #15736 What is the motivation for this PR? To help ignore loganalyzer error messages that lead to testcases failing in sonic-mgmt. How did you do it? Modify the loganalyzer regex to not check for GBSAI How did you verify/test it? Ran the test to verify it ignores these errors. --- .../files/tools/loganalyzer/loganalyzer_common_ignore.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index 1dfafae8765..fe199fd6b65 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -251,9 +251,8 @@ r, ".* ERR syncd#syncd.* SAI_API_SWITCH:sai_bulk_object_get_stats.* get bulk que r, ".* ERR .*-E-PVT-0- get_temperature: sensor=GIBRALTAR_HBM_SENSOR_0 is not ready.*" r, ".* ERR .*-E-PVT-0- get_temperature: sensor=GIBRALTAR_HBM_SENSOR_1 is not ready.*" r, ".* ERR CCmisApi: system_service_Map_base::at.*" -r, ".* ERR gbsyncd\d*#GBSAI.*pointer for SAI_SWITCH_ATTR_REGISTER_READ is not handled, FIXME.*" -r, ".* ERR gbsyncd\d*#GBSAI.*pointer for SAI_SWITCH_ATTR_REGISTER_WRITE is not handled, FIXME.*" -r, ".* ERR gbsyncd\d*#GBSAI[\d*] updateNotifications: pointer for SAI_SWITCH_ATTR_REGISTER_WRITE is not handled, FIXME!" +r, ".* ERR gbsyncd\d*.*pointer for SAI_SWITCH_ATTR_REGISTER_READ is not handled, FIXME.*" +r, ".* ERR gbsyncd\d*.*pointer for SAI_SWITCH_ATTR_REGISTER_WRITE is not handled, FIXME.*" r, ".* ERR kernel:.*No associated hostinterface to 6 port.*" r, ".* ERR lldp#lldpmgrd\[\d*\]: Port init timeout reached.*" r, ".* ERR swss\d*#orchagent.*pointer for SAI_SWITCH_ATTR_REGISTER_READ is not handled, FIXME.*" From f61e4fe8e31218e4ab7c3c780ccaa304b2b39ed6 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Tue, 26 Nov 2024 16:14:45 +0800 Subject: [PATCH 166/175] [dualtor][test_bgp_session] Skip reboot test type on dualtor (#15729) What is the motivation for this PR? Skip the warm reboot test type, as it will leave the DUT in an error state, and causes failures of following cases: bgp/test_bgp_session.py::test_bgp_session_interface_down[interface-bgp_docker] PASSED [ 16%] bgp/test_bgp_session.py::test_bgp_session_interface_down[interface-swss_docker] PASSED [ 33%] bgp/test_bgp_session.py::test_bgp_session_interface_down[interface-reboot] FAILED [ 50%] bgp/test_bgp_session.py::test_bgp_session_interface_down[neighbor-bgp_docker] FAILED [ 66%] bgp/test_bgp_session.py::test_bgp_session_interface_down[neighbor-swss_docker] FAILED [ 83%] bgp/test_bgp_session.py::test_bgp_session_interface_down[neighbor-reboot] FAILED [100%] Signed-off-by: Longxiang Lyu lolv@microsoft.com How did you do it? If test type is reboot (warm reboot), skip on dualtor. --- tests/bgp/test_bgp_session.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/bgp/test_bgp_session.py b/tests/bgp/test_bgp_session.py index 75314f4a1a0..10a41a2343d 100644 --- a/tests/bgp/test_bgp_session.py +++ b/tests/bgp/test_bgp_session.py @@ -4,6 +4,7 @@ from tests.common.platform.device_utils import fanout_switch_port_lookup from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.assertions import pytest_require from tests.common.reboot import reboot logger = logging.getLogger(__name__) @@ -101,13 +102,19 @@ def verify_bgp_session_down(duthost, bgp_neighbor): @pytest.mark.parametrize("failure_type", ["interface", "neighbor"]) @pytest.mark.disable_loganalyzer def test_bgp_session_interface_down(duthosts, rand_one_dut_hostname, fanouthosts, localhost, - nbrhosts, setup, test_type, failure_type): + nbrhosts, setup, test_type, failure_type, tbinfo): ''' 1: check all bgp sessions are up 2: inject failure, shutdown fanout physical interface or neighbor port or neighbor session 4: do the test, reset bgp or swss or do the reboot 5: Verify all bgp sessions are up ''' + # Skip the test on dualtor with reboot test type + pytest_require( + ("dualtor" not in tbinfo["topo"]["name"] or test_type != "reboot"), + "warm reboot is not supported on dualtor" + ) + duthost = duthosts[rand_one_dut_hostname] # Skip the test on Virtual Switch due to fanout switch dependency and warm reboot From f2f171cc0bbe24fcfdba90bc5d0e51484a4e2674 Mon Sep 17 00:00:00 2001 From: Eddie Ruan <119699263+eddieruan-alibaba@users.noreply.github.com> Date: Tue, 26 Nov 2024 00:22:42 -0800 Subject: [PATCH 167/175] SRv6 Test Cases on 7 node testbed Infra changes part (#15349) This part contains infra changes only for enabling 7 node testbed's traffic test cases. The test case would be added in another PR What is the motivation for this PR? Need to run ptf traffic test on 7 node testbed. How did you do it? Add 5 more test cases, including ptf traffic test and link flapping test cases. How did you verify/test it? Via daily jenkins run Any platform specific information? Only on 7 node vsonic testbed. Co-authored-by: wenwang <2437730491@qq.com> --- ansible/roles/test/files/ptftests/remote.py | 14 +++++++++++- ansible/roles/vm_set/library/vm_topology.py | 12 ++++++++-- tests/common/plugins/ptfadapter/__init__.py | 25 +++++++++++++++++++-- 3 files changed, 46 insertions(+), 5 deletions(-) diff --git a/ansible/roles/test/files/ptftests/remote.py b/ansible/roles/test/files/ptftests/remote.py index e0941e145d7..abae548dec9 100644 --- a/ansible/roles/test/files/ptftests/remote.py +++ b/ansible/roles/test/files/ptftests/remote.py @@ -8,6 +8,7 @@ ETH_PFX = 'eth' +BACKPLANE = 'backplane' SUB_INTF_SEP = '.' @@ -24,7 +25,7 @@ def get_ifaces(): iface = line.split(':')[0].strip() # Skip not FP interfaces and vlan interface, like eth1.20 - if ETH_PFX not in iface: + if ETH_PFX not in iface and BACKPLANE != iface: continue ifaces.append(iface) @@ -45,14 +46,25 @@ def build_ifaces_map(ifaces): sub_ifaces = [] iface_map = {} + used_index = set() + backplane_exist = False for iface in ifaces: iface_suffix = iface.lstrip(ETH_PFX) if SUB_INTF_SEP in iface_suffix: iface_index = int(iface_suffix.split(SUB_INTF_SEP)[0]) sub_ifaces.append((iface_index, iface)) + elif iface == BACKPLANE: + backplane_exist = True else: iface_index = int(iface_suffix) iface_map[(0, iface_index)] = iface + used_index.add(iface_index) + + count = 1 + while count in used_index: + count = count + 1 + if backplane_exist: + iface_map[(0, count)] = "backplane" if ptf_port_mapping_mode == "use_sub_interface": # override those interfaces that has sub interfaces diff --git a/ansible/roles/vm_set/library/vm_topology.py b/ansible/roles/vm_set/library/vm_topology.py index 7c35917144b..bd0d629a3a4 100644 --- a/ansible/roles/vm_set/library/vm_topology.py +++ b/ansible/roles/vm_set/library/vm_topology.py @@ -286,6 +286,7 @@ def init(self, vm_set_name, vm_base, duts_fp_ports, duts_name, ptf_exists=True, self.duts_fp_ports = duts_fp_ports self.injected_fp_ports = self.extract_vm_vlans() + self.injected_VM_ports = self.extract_vm_ovs() self.bp_bridge = ROOT_BACK_BR_TEMPLATE % self.vm_set_name @@ -386,6 +387,13 @@ def extract_vm_vlans(self): return vlans + def extract_vm_ovs(self): + vlans = {} + for _, attr in self.OVS_LINKs.items(): + VM = self.vm_names[self.vm_base_index + attr['start_vm_offset']] + vlans[VM] = attr['vlans'][:] + return vlans + def add_network_namespace(self): """Create a network namespace.""" self.delete_network_namespace() @@ -1153,8 +1161,8 @@ def bind_ovs_ports(self, br_name, dut_iface, injected_iface, vm_iface, disconnec (br_name, dut_iface_id, vm_iface_id, injected_iface_id)) VMTopology.cmd("ovs-ofctl add-flow %s table=0,priority=5,ip,in_port=%s,action=output:%s" % (br_name, dut_iface_id, injected_iface_id)) - VMTopology.cmd("ovs-ofctl add-flow %s table=0,priority=5,ipv6,in_port=%s,action=output:%s" % - (br_name, dut_iface_id, injected_iface_id)) + VMTopology.cmd("ovs-ofctl add-flow %s table=0,priority=5,ipv6,in_port=%s,action=output:%s,%s" % + (br_name, dut_iface_id, vm_iface_id, injected_iface_id)) VMTopology.cmd("ovs-ofctl add-flow %s table=0,priority=3,in_port=%s,action=output:%s,%s" % (br_name, dut_iface_id, vm_iface_id, injected_iface_id)) VMTopology.cmd("ovs-ofctl add-flow %s table=0,priority=10,ip,in_port=%s,nw_proto=89,action=output:%s,%s" % diff --git a/tests/common/plugins/ptfadapter/__init__.py b/tests/common/plugins/ptfadapter/__init__.py index d87c5fbf222..5c0f618e339 100644 --- a/tests/common/plugins/ptfadapter/__init__.py +++ b/tests/common/plugins/ptfadapter/__init__.py @@ -14,6 +14,7 @@ DEFAULT_DEVICE_NUM = 0 ETH_PFX = 'eth' ETHERNET_PFX = "Ethernet" +BACKPLANE = 'backplane' MAX_RETRY_TIME = 3 @@ -65,7 +66,7 @@ def get_ifaces(netdev_output): iface = line.split(':')[0].strip() # Skip not FP interfaces - if ETH_PFX not in iface and ETHERNET_PFX not in iface: + if ETH_PFX not in iface and ETHERNET_PFX not in iface and BACKPLANE != iface: continue ifaces.append(iface) @@ -77,14 +78,25 @@ def get_ifaces_map(ifaces, ptf_port_mapping_mode): """Get interface map.""" sub_ifaces = [] iface_map = {} + used_index = set() + backplane_exist = False for iface in ifaces: iface_suffix = iface.lstrip(ETH_PFX) if "." in iface_suffix: iface_index = int(iface_suffix.split(".")[0]) sub_ifaces.append((iface_index, iface)) + elif iface == BACKPLANE: + backplane_exist = True else: iface_index = int(iface_suffix) iface_map[iface_index] = iface + used_index.add(iface_index) + + count = 1 + while count in used_index: + count = count + 1 + if backplane_exist: + iface_map[count] = "backplane" if ptf_port_mapping_mode == "use_sub_interface": # override those interfaces that has sub interface @@ -148,6 +160,14 @@ def start_ptf_nn_agent(): ptf_nn_agent_port = start_ptf_nn_agent() assert ptf_nn_agent_port is not None + def check_if_use_minigraph_from_tbinfo(tbinfo): + if 'properties' in tbinfo['topo'] and "init_cfg_profile" in tbinfo['topo']['properties']: + # + # Since init_cfg_profile is used, this topology would not use minigraph + # + return False + return True + with PtfTestAdapter(tbinfo['ptf_ip'], ptf_nn_agent_port, 0, list(ifaces_map.keys()), ptfhost) as adapter: if not request.config.option.keep_payload: override_ptf_functions() @@ -155,7 +175,8 @@ def start_ptf_nn_agent(): adapter.payload_pattern = node_id + " " adapter.duthost = duthost - adapter.mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + if check_if_use_minigraph_from_tbinfo(tbinfo): + adapter.mg_facts = duthost.get_extended_minigraph_facts(tbinfo) yield adapter From 64e5cad18af2b520ca800df0c903867e939e2d3c Mon Sep 17 00:00:00 2001 From: Eddie Ruan <119699263+eddieruan-alibaba@users.noreply.github.com> Date: Tue, 26 Nov 2024 00:27:10 -0800 Subject: [PATCH 168/175] Add SRv6 test cases on 7 nodes testbed (#15723) This PR is to add some SRv6 test cases listed in https://github.com/sonic-net/sonic-mgmt/blob/master/docs/testplan/srv6/SRv6-phoenixwing-ptf-testplan.md. These test cases are running on the 7 node testbed. The infra changes are in #15349 What is the motivation for this PR? Add couple SRv6 Test cases listed in https://github.com/sonic-net/sonic-mgmt/blob/master/docs/testplan/srv6/SRv6-phoenixwing-ptf-testplan.md. How did you do it? Add 5 more test cases, including ptf traffic test and link flapping test cases. How did you verify/test it? Via daily jenkins run The test log could be found at http://phoenixwing.com.cn/vsonic Any platform specific information? Only on 7 node vsonic testbed. --- tests/srv6/common_utils.py | 104 ++++++++ tests/srv6/srv6_utils.py | 186 +++++++++++++- tests/srv6/test_srv6_basic_sanity.py | 354 ++++++++++++++++++++++++++- 3 files changed, 642 insertions(+), 2 deletions(-) create mode 100644 tests/srv6/common_utils.py diff --git a/tests/srv6/common_utils.py b/tests/srv6/common_utils.py new file mode 100644 index 00000000000..377a39a6225 --- /dev/null +++ b/tests/srv6/common_utils.py @@ -0,0 +1,104 @@ +import subprocess +import logging +import getpass + +logger = logging.getLogger(__name__) + +# +# Flags used at run time +# +run_inside_docker = False +debug_flag = False + + +def set_debug_flag(flag): + global debug_flag + debug_flag = flag + + +def get_debug_flag(): + return debug_flag + + +def set_run_inside_docker(flag): + global run_inside_docker + run_inside_docker = flag + + +def get_run_inside_docker(): + global run_inside_docker + return run_inside_docker + + +# +# This is the IP to accessing host from sonic-mgmt +# +def get_hostip_and_user(): + hostip, hostuser = "172.17.0.1", getpass.getuser() + return hostip, hostuser + + +# +# Debug print util function for printing out debug information +# +def debug_print(msg, force=False): + if not get_debug_flag() and not force: + return + logger.info(msg) + print(msg) + + +# +# a util function to run command. add ssh if it is running inside sonic-mgmt docker. +# +def run_command_with_return(cmd, force=False): + if get_run_inside_docker(): + # add host access + hostip, user = get_hostip_and_user() + cmd1 = "ssh -q -o \"UserKnownHostsFile=/dev/null\" -o \"StrictHostKeyChecking=no\" " + cmd2 = "{}@{} \"{}\"".format(user, hostip, cmd) + cmd = cmd1 + cmd2 + process = subprocess.Popen( + cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True + ) + output, stderr = process.communicate() + if stderr != "" and stderr is not None: + # It is an error, use force print + debug_print("{} : get error {}".format(cmd, stderr), force=True) + + debug_print("cmd : {}, stderr : {}, output : {}".format(cmd, stderr, output), force) + return output, stderr + + +# +# Goal is to run the following command to set up tcpdump +# For example +# ssh ubuntu@172.17.0.1 "nohup tcpdump -i VM0100-t0 -w /tmp/Vm0100-t0.pcap > /tmp/tcpdump.log 2>&1 &" +# +def enable_tcpdump(intf_list, file_loc, prefix, use_docker=False, set_debug=False): + # Enable flags baased on input flags + set_run_inside_docker(use_docker) + set_debug_flag(set_debug) + for intf in intf_list: + cmd = ( + "tcpdump -i {} -w {}/{}_{}.pcap > /tmp/{}_{}.log 2>&1 &" + .format(intf, file_loc, prefix, intf, prefix, intf) + ) + if get_run_inside_docker(): + cmd = "nohup {}".format(cmd) + debug_print("Run {}".format(cmd), force=True) + run_command_with_return(cmd) + run_command_with_return("ps aux | grep tcpdump", force=True) + # Disable flags + set_debug_flag(False) + set_run_inside_docker(False) + + +# +# Remove all existing tcpdump sessions +# +def disable_tcpdump(use_docker=False, set_debug=False): + set_run_inside_docker(use_docker) + run_command_with_return("pkill tcpdump") + set_run_inside_docker(False) diff --git a/tests/srv6/srv6_utils.py b/tests/srv6/srv6_utils.py index b0219ffe18e..a9c1c319176 100755 --- a/tests/srv6/srv6_utils.py +++ b/tests/srv6/srv6_utils.py @@ -1,9 +1,18 @@ import logging +import time import requests +import ptf.packet as scapy +import ptf.testutils as testutils + from tests.common.helpers.assertions import pytest_assert logger = logging.getLogger(__name__) +# +# log directory inside each vsonic. vsonic starts with admin as user. +# +test_log_dir = "/home/admin/testlogs/" + # # Helper func for print a set of lines @@ -35,7 +44,7 @@ def change_route(operation, ptfip, neighbor, route, nexthop, port): # Skip some BGP neighbor check # def skip_bgp_neighbor_check(neighbor): - skip_addresses = ['2064:100::1d', '2064:200::1e', '2064:300::1f'] + skip_addresses = [] for addr in skip_addresses: if neighbor == addr: return True @@ -101,3 +110,178 @@ def find_node_interfaces(nbrhost): found = found + 1 return found, hwsku + + +# +# Send receive packets +# +def runSendReceive(pkt, src_port, exp_pkt, dst_ports, pkt_expected, ptfadapter): + """ + @summary Send packet and verify it is received/not received on the expected ports + @param pkt: The packet that will be injected into src_port + @param src_ports: The port into which the pkt will be injected + @param exp_pkt: The packet that will be received on one of the dst_ports + @param dst_ports: The ports on which the exp_pkt may be received + @param pkt_expected: Indicated whether it is expected to receive the exp_pkt on one of the dst_ports + @param ptfadapter: The ptfadapter fixture + """ + # Send the packet and poll on destination ports + testutils.send(ptfadapter, src_port, pkt, 1) + logger.debug("Sent packet: " + pkt.summary()) + (index, rcv_pkt) = testutils.verify_packet_any_port(ptfadapter, exp_pkt, dst_ports) + received = False + if rcv_pkt: + received = True + pytest_assert(received is True) + logger.debug('index=%s, received=%s' % (str(index), str(received))) + if received: + logger.debug("Received packet: " + scapy.Ether(rcv_pkt).summary()) + if pkt_expected: + logger.debug('Expected packet on dst_ports') + passed = True if received else False + logger.debug('Received: ' + str(received)) + else: + logger.debug('No packet expected on dst_ports') + passed = False if received else True + logger.debug('Received: ' + str(received)) + logger.debug('Passed: ' + str(passed)) + return passed + + +# +# Helper func to check if a list of IPs go via a given set of next hop +# +def check_routes_func(nbrhost, ips, nexthops, vrf="", is_v6=False): + # Check remote learnt dual homing routes + vrf_str = "" + if vrf != "": + vrf_str = "vrf {}".format(vrf) + ip_str = "ip" + if is_v6: + ip_str = "ipv6" + for ip in ips: + cmd = "show {} route {} {} nexthop-group".format(ip_str, vrf_str, ip) + res = nbrhost.command(cmd)["stdout_lines"] + print_lines(res) + found = 0 + for nexthop in nexthops: + for line in res: + if nexthop in line: + found = found + 1 + if len(nexthops) != found: + return False + return True + + +# +# check if a list of IPs go via a given set of next hop +# +def check_routes(nbrhost, ips, nexthops, vrf="", is_v6=False): + # Add retry for debugging purpose + count = 0 + ret = False + + # + # Sleep 10 sec before retrying + # + sleep_duration_for_retry = 10 + + # retry 3 times before claiming failure + while count < 3 and not ret: + ret = check_routes_func(nbrhost, ips, nexthops, vrf, is_v6) + if not ret: + count = count + 1 + # sleep make sure all forwarding structures are settled down. + time.sleep(sleep_duration_for_retry) + logger.info("Sleep {} seconds to retry round {}".format(sleep_duration_for_retry, count)) + + pytest_assert(ret) + + +# +# Record fwding chain to a file +# +def recording_fwding_chain(nbrhost, fname, comments): + + filename = "{}{}".format(test_log_dir, fname) + + cmd = "mkdir -p {}".format(test_log_dir) + nbrhost.shell(cmd, module_ignore_errors=True) + cmd = "sudo touch /etc/sonic/frr/vtysh.conf" + nbrhost.shell(cmd, module_ignore_errors=True) + + cmd = "date >> {} ".format(filename) + nbrhost.shell(cmd, module_ignore_errors=True) + cmd = "echo ' {}' >> {} ".format(comments, filename) + nbrhost.shell(cmd, module_ignore_errors=True) + cmd = "vtysh -c 'show bgp summary' >> {} ".format(filename) + nbrhost.shell(cmd, module_ignore_errors=True) + cmd = "vtysh -c 'show ip route vrf Vrf1 192.100.1.0 nexthop-group' >> {} ".format(filename) + nbrhost.shell(cmd, module_ignore_errors=True) + cmd = "vtysh -c 'show ipv6 route fd00:201:201:fff1:11:: nexthop-group' >> {} ".format(filename) + nbrhost.shell(cmd, module_ignore_errors=True) + cmd = "vtysh -c 'show ipv6 route fd00:202:202:fff2:22:: nexthop-group' >> {} ".format(filename) + nbrhost.shell(cmd, module_ignore_errors=True) + + cmd = "echo '' >> {} ".format(filename) + nbrhost.shell(cmd, module_ignore_errors=True) + + +# +# Debug commands for FRR zebra +# +debug_cmds = [ + 'debug zebra events', + 'debug zebra rib', + 'debug zebra rib detailed', + 'debug zebra nht', + 'debug zebra nht detailed', + 'debug zebra dplane', + 'debug zebra nexthop', + 'debug zebra nexthop detail', + 'debug zebra packet', + 'debug zebra packet detail' +] + + +# +# Turn on/off FRR debug to a file +# +def turn_on_off_frr_debug(duthosts, rand_one_dut_hostname, nbrhosts, filename, vm, is_on=True): + nbrhost = nbrhosts[vm]['host'] + # save frr log to a file + pfxstr = " " + if not is_on: + pfxstr = " no " + + cmd = "vtysh -c 'configure terminal' -c '{} log file {}'".format(pfxstr, filename) + nbrhost.command(cmd) + + # + # Change frr debug flags + # + for dcmd in debug_cmds: + cmd = "vtysh -c '" + pfxstr + dcmd + "'" + nbrhost.command(cmd) + + # + # Check debug flags + # + cmd = "vtysh -c 'show debug'" + nbrhost.shell(cmd, module_ignore_errors=True) + # + # Check log file + # + cmd = "vtysh -c 'show run' | grep log" + nbrhost.shell(cmd, module_ignore_errors=True) + + +# +# Collect file from bgp docker +# +def collect_frr_debugfile(duthosts, rand_one_dut_hostname, nbrhosts, filename, vm): + nbrhost = nbrhosts[vm]['host'] + cmd = "mkdir -p {}".format(test_log_dir) + nbrhost.shell(cmd, module_ignore_errors=True) + cmd = "docker cp bgp:{} {}".format(filename, test_log_dir) + nbrhost.shell(cmd, module_ignore_errors=True) diff --git a/tests/srv6/test_srv6_basic_sanity.py b/tests/srv6/test_srv6_basic_sanity.py index ca5e7a98c0e..3360babc70e 100644 --- a/tests/srv6/test_srv6_basic_sanity.py +++ b/tests/srv6/test_srv6_basic_sanity.py @@ -1,7 +1,10 @@ import time import logging import pytest +import ptf.packet as scapy +from ptf.testutils import simple_tcp_packet +from ptf.mask import Mask from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until @@ -9,6 +12,14 @@ from srv6_utils import find_node_interfaces from srv6_utils import check_bgp_neighbors from srv6_utils import check_bgp_neighbors_func +from srv6_utils import runSendReceive +from srv6_utils import check_routes +from srv6_utils import recording_fwding_chain +from srv6_utils import turn_on_off_frr_debug +from srv6_utils import collect_frr_debugfile + +from common_utils import enable_tcpdump +from common_utils import disable_tcpdump logger = logging.getLogger(__name__) @@ -26,9 +37,45 @@ test_vm_names = ["PE1", "PE2", "PE3", "P2", "P3", "P4"] +# +# Sender PE3's MAC +# +sender_mac = "52:54:00:df:1c:5e" + +# +# The port used by ptf to connect with backplane. This number is different from 3 ndoe case. +# +ptf_port_for_backplane = 18 + # The number of routes published by each CE num_ce_routes = 10 +# +# Routes learnt from pe1 and pe2 +# +route_prefix_for_pe1_and_pe2 = "192.100.0" + +# +# Routes learnt from pe3 +# +route_prefix_for_pe3 = "192.200.0" + +# +# This 10 sec sleep is used for make sure software programming is finished +# It has enough buffer zone. +# +sleep_duration = 10 + +# +# BGP neighbor up waiting time, waiting up to 180 sec +# +bgp_neighbor_up_wait_time = 180 + +# +# BGP neighbor down waiting time, waiting up to 30 sec +# +bgp_neighbor_down_wait_time = 30 + # # Initialize the testbed @@ -44,6 +91,7 @@ def setup_config(duthosts, rand_one_dut_hostname, nbrhosts, ptfhost): # Publish to PE2 neighbor2 = "10.10.246.30" route_prefix_for_pe1_and_pe2 = "192.100.0" + for x in range(1, num_ce_routes+1): route = "{}.{}/32".format(route_prefix_for_pe1_and_pe2, x) announce_route(ptfip, neighbor, route, nexthop, port_num[0]) @@ -51,7 +99,6 @@ def setup_config(duthosts, rand_one_dut_hostname, nbrhosts, ptfhost): # Publish to PE3 neighbor = "10.10.246.31" - route_prefix_for_pe3 = "192.200.0" for x in range(1, num_ce_routes+1): route = "{}.{}/32".format(route_prefix_for_pe3, x) announce_route(ptfip, neighbor, route, nexthop, port_num[2]) @@ -126,3 +173,308 @@ def test_check_bgp_neighbors(duthosts, rand_one_dut_hostname, nbrhosts): # From P4 nbrhost = nbrhosts["P4"]['host'] check_bgp_neighbors(nbrhost, ['fc01::86', 'fc04::2', 'fc07::2', 'fc06::1']) + + +# +# Test Case: Check VPN routes both local learnt and remote learnt and core routes +# +def test_check_routes(duthosts, rand_one_dut_hostname, nbrhosts): + global_route = "" + is_v6 = True + + # From PE3 + nbrhost = nbrhosts["PE3"]['host'] + logger.info("Check learnt vpn routes") + # check remote learnt VPN routes via two PE1 and PE2 + dut1_ips = [] + for x in range(1, num_ce_routes+1): + ip = "{}.{}/32".format(route_prefix_for_pe1_and_pe2, x) + dut1_ips.append(ip) + check_routes(nbrhost, dut1_ips, ["2064:100::1d", "2064:200::1e"], "Vrf1") + + # check local learnt VPN routes via local PE + dut2_ips = [] + for x in range(1, num_ce_routes+1): + ip = "{}.{}/32".format(route_prefix_for_pe3, x) + dut2_ips.append(ip) + check_routes(nbrhost, dut2_ips, ["10.10.246.254"], "Vrf1") + # Check core routes + check_routes( + nbrhost, ["fd00:201:201:fff1:11::", "fd00:202:202:fff2:22::"], + ["fc08::2", "fc06::2"], global_route, is_v6 + ) + + +# +# Test Case : Traffic check in Normal Case +# +def test_traffic_check(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, ptfadapter): + # + # Create a packet sending to 192.100.0.1 + # + # establish_and_configure_bfd(nbrhosts) + tcp_pkt0 = simple_tcp_packet( + ip_src="192.200.0.1", + ip_dst="192.100.0.1", + tcp_sport=8888, + tcp_dport=6666, + ip_ttl=64 + ) + pkt = tcp_pkt0.copy() + pkt['Ether'].dst = sender_mac + + exp_pkt = tcp_pkt0.copy() + exp_pkt['IP'].ttl -= 4 + masked2recv = Mask(exp_pkt) + masked2recv.set_do_not_care_scapy(scapy.Ether, "dst") + masked2recv.set_do_not_care_scapy(scapy.Ether, "src") + + # Enable tcpdump for debugging purpose, file_loc is host file location + intf_list = ["VM0102-t1", "VM0102-t3"] + file_loc = "~/sonic-mgmt/tests/logs/" + prefix = "test_traffic_check" + enable_tcpdump(intf_list, file_loc, prefix, True, True) + + # Add retry for debugging purpose + count = 0 + done = False + while count < 10 and done is False: + try: + runSendReceive(pkt, ptf_port_for_backplane, masked2recv, [ptf_port_for_backplane], True, ptfadapter) + logger.info("Done with traffic run") + done = True + except Exception as e: + count = count + 1 + logger.info("Retry round {}, Excetpion {}".format(count, e)) + # sleep make sure all forwarding structures are settled down. + sleep_duration_for_retry = 60 + time.sleep(sleep_duration_for_retry) + logger.info( + "Sleep {} seconds to make sure all forwarding structures are settled down" + .format(sleep_duration_for_retry) + ) + + # Disable tcpdump + disable_tcpdump(True) + + logger.info("Done {} count {}".format(done, count)) + if not done: + raise Exception("Traffic test failed") + + +# +# Test Case : Local Link flap test with zebra debug log collecting +# +def test_traffic_check_local_link_fail_case(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, ptfadapter): + filename = "zebra_case_1_locallink_down.txt" + docker_filename = "/tmp/{}".format(filename) + vm = "PE3" + pe3 = nbrhosts[vm]['host'] + p2 = nbrhosts["P2"]['host'] + + logname = "zebra_case_1_locallink_down_running_log.txt" + # Recording + recording_fwding_chain(pe3, logname, "Before starting local link fail case") + # + # Turn on frr debug + # + turn_on_off_frr_debug(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm, True) + # + # shut down the link between PE3 and P2 + # + cmd = "sudo ifconfig Ethernet4 down" + pe3.command(cmd) + cmd = "sudo ifconfig Ethernet12 down" + p2.command(cmd) + time.sleep(sleep_duration) + # expect remaining BGP session are up on PE3 + ret1 = wait_until( + bgp_neighbor_down_wait_time, + 10, 0, check_bgp_neighbors_func, + pe3, ['2064:100::1d', '2064:200::1e', 'fc06::2']) + + # Recording + recording_fwding_chain(pe3, logname, "After local link down") + + # + # Recover local links + # + cmd = "sudo ifconfig Ethernet4 up" + pe3.command(cmd) + cmd = "sudo ifconfig Ethernet12 up" + p2.command(cmd) + time.sleep(sleep_duration) + + # Recording + recording_fwding_chain(pe3, logname, "After the local link gets recovered") + + # + # Turn off frr debug and collect debug log + # + turn_on_off_frr_debug(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm, False) + collect_frr_debugfile(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm) + + # expect remaining BGP session are up on PE3 + pytest_assert(ret1, "wait for PE3 BGP neighbors to settle down") + # expect All BGP session are up on PE3 + pytest_assert(wait_until( + bgp_neighbor_up_wait_time, + 10, 0, + check_bgp_neighbors_func, pe3, + ['2064:100::1d', '2064:200::1e', 'fc08::2', 'fc06::2']), + "wait for PE3 BGP neighbors up") + + +# +# Test Case : remote IGP Link flap test with zebra debug log collecting +# +def test_traffic_check_remote_igp_fail_case(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, ptfadapter): + filename = "zebra_case_2_remotelink_down.txt" + docker_filename = "/tmp/{}".format(filename) + vm = "PE3" + pe3 = nbrhosts[vm]['host'] + + logname = "zebra_case_2_remotelink_down_running_log.txt" + # Recording + recording_fwding_chain(pe3, logname, "Before starting remote link fail case") + # + # Turn on frr debug + # + turn_on_off_frr_debug(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm, True) + # + # shut down the link between P3 and P1, P2, P4 + # + p1 = duthosts[rand_one_dut_hostname] + p2 = nbrhosts["P2"]['host'] + p3 = nbrhosts["P3"]['host'] + p4 = nbrhosts["P4"]['host'] + + cmd = "sudo ifconfig Ethernet124 down" + p1.command(cmd) + cmd = "sudo ifconfig Ethernet4 down" + p2.command(cmd) + cmd = "sudo ifconfig Ethernet4 down" + p4.command(cmd) + + cmd = "sudo ifconfig Ethernet0 down" + p3.command(cmd) + cmd = "sudo ifconfig Ethernet12 down" + p3.command(cmd) + cmd = "sudo ifconfig Ethernet16 down" + p3.command(cmd) + + time.sleep(sleep_duration) + # expect no BGP session change on PE3 + ret1 = wait_until( + 5, 1, 0, check_bgp_neighbors_func, + pe3, ['2064:100::1d', '2064:200::1e', 'fc08::2', 'fc06::2'] + ) + + # Recording + recording_fwding_chain(pe3, logname, "After the remote IGP link is down") + # + # Recover back + # + cmd = "sudo ifconfig Ethernet124 up" + p1.command(cmd) + cmd = "sudo ifconfig Ethernet4 up" + p2.command(cmd) + cmd = "sudo ifconfig Ethernet4 up" + p4.command(cmd) + + cmd = "sudo ifconfig Ethernet0 up" + p3.command(cmd) + cmd = "sudo ifconfig Ethernet12 up" + p3.command(cmd) + cmd = "sudo ifconfig Ethernet16 up" + p3.command(cmd) + time.sleep(sleep_duration) + + # Recording + recording_fwding_chain(pe3, logname, "After the remote IGP link gets recovered") + # + # Turn off frr debug and collect debug log + # + turn_on_off_frr_debug(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm, False) + collect_frr_debugfile(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm) + + # expect no BGP session change on PE3 + pytest_assert(ret1, "no change in BGP sessions") + + # expect no BGP session change on PE3 + pytest_assert(wait_until( + 5, 1, 0, + check_bgp_neighbors_func, pe3, + ['2064:100::1d', '2064:200::1e', 'fc08::2', 'fc06::2']), "wait for PE3 BGP neighbors up") + + +# +# Test Case : BGP remote PE failure with zebra debug log collecting +# +def test_traffic_check_remote_bgp_fail_case(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, ptfadapter): + filename = "zebra_case_3_remote_peer_down.txt" + docker_filename = "/tmp/{}".format(filename) + vm = "PE3" + pe3 = nbrhosts[vm]['host'] + + logname = "zebra_case_3_remote_peer_down_running_log.txt" + # Recording + recording_fwding_chain(pe3, logname, "Before starting remote PE failure case") + # + # Turn on frr debug + # + turn_on_off_frr_debug(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm, True) + # + # shut down the link between PE1 and P1, P3 + # + p1 = duthosts[rand_one_dut_hostname] + pe1 = nbrhosts["PE1"]['host'] + p3 = nbrhosts["P3"]['host'] + + cmd = "sudo ifconfig Ethernet112 down" + p1.command(cmd) + cmd = "sudo ifconfig Ethernet4 down" + p3.command(cmd) + cmd = "sudo ifconfig Ethernet0 down" + pe1.command(cmd) + cmd = "sudo ifconfig Ethernet4 down" + pe1.command(cmd) + time.sleep(sleep_duration) + # expect BGP session change on PE3 + ret1 = wait_until( + bgp_neighbor_down_wait_time, 10, 0, + check_bgp_neighbors_func, pe3, + ['2064:100::1d', '2064:200::1e', 'fc08::2', 'fc06::2']) + + # Recording + recording_fwding_chain(pe3, logname, "After shutting down the remote BGP peer") + # + # Recover back + # + cmd = "sudo ifconfig Ethernet112 up" + p1.command(cmd) + cmd = "sudo ifconfig Ethernet4 up" + p3.command(cmd) + cmd = "sudo ifconfig Ethernet0 up" + pe1.command(cmd) + cmd = "sudo ifconfig Ethernet4 up" + pe1.command(cmd) + time.sleep(sleep_duration) + + # Recording + recording_fwding_chain(pe3, logname, "After recovering the remote BGP peer") + + # + # Turn off frr debug and collect debug log + # + turn_on_off_frr_debug(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm, False) + collect_frr_debugfile(duthosts, rand_one_dut_hostname, nbrhosts, docker_filename, vm) + + # expect BGP session change on PE3 + pytest_assert(ret1, "Remote BGP PE down") + # expect no BGP session change on PE3 + pytest_assert(wait_until( + bgp_neighbor_up_wait_time, 10, 0, + check_bgp_neighbors_func, pe3, + ['2064:100::1d', '2064:200::1e', 'fc08::2', 'fc06::2']), + "wait for PE3 BGP neighbors up") From 037f80a7e95988af7f87fcb372dc0d17d4d748da Mon Sep 17 00:00:00 2001 From: sreejithsreekumaran <60534136+sreejithsreekumaran@users.noreply.github.com> Date: Tue, 26 Nov 2024 18:22:11 +0000 Subject: [PATCH 169/175] sonic-mgmt / IXIA : [cisco] T2 test to verify ECN Counter operation pre and post port state toggle (#15586) [cisco] sonic-mgmt / IXIA : T2 test to verify ECN Counter operation pre and post port state toggle --- .../multidut/ecn/files/multidut_helper.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/tests/snappi_tests/multidut/ecn/files/multidut_helper.py b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py index f1779bb2461..33c078ffb2e 100644 --- a/tests/snappi_tests/multidut/ecn/files/multidut_helper.py +++ b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py @@ -6,7 +6,7 @@ snappi_api # noqa: F401 from tests.common.snappi_tests.snappi_helpers import get_dut_port_id from tests.common.snappi_tests.common_helpers import pfc_class_enable_vector, config_wred, \ - enable_ecn, config_ingress_lossless_buffer_alpha, stop_pfcwd, disable_packet_aging, \ + enable_ecn, config_ingress_lossless_buffer_alpha, stop_pfcwd, disable_packet_aging,\ config_capture_pkt, traffic_flow_mode, calc_pfc_pause_flow_rate # noqa: F401 from tests.common.snappi_tests.read_pcap import get_ipv4_pkts from tests.common.snappi_tests.snappi_test_params import SnappiTestParams @@ -375,18 +375,6 @@ def run_ecn_marking_port_toggle_test( rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] egress_duthost = rx_port['duthost'] - tx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[1] - ingress_duthost = tx_port['duthost'] - - pytest_assert(testbed_config is not None, 'Failed to get L2/3 testbed config') - - logger.info("Stopping PFC watchdog") - stop_pfcwd(egress_duthost, rx_port['asic_value']) - stop_pfcwd(ingress_duthost, tx_port['asic_value']) - logger.info("Disabling packet aging if necessary") - disable_packet_aging(egress_duthost) - disable_packet_aging(ingress_duthost) - duthost = egress_duthost init_ctr_3 = get_npu_voq_queue_counters(duthost, dut_port, test_prio_list[0]) From 432554f1ebe42492e5474c505e7d7a057fcfd1c2 Mon Sep 17 00:00:00 2001 From: Ashwin Srinivasan <93744978+assrinivasan@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:32:45 -0800 Subject: [PATCH 170/175] Skip chassis watchdog API test for unsupported S6000 platform (#15440) * Skipping chassi watchdog test for unsupported S6000 HWSKU * Skipping all API watchdog tests on S6000 platform due to unsupported API --- .../conditional_mark/tests_mark_conditions_platform_tests.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml index 87b35da50b4..0ff53f56d52 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml @@ -68,6 +68,7 @@ platform_tests/api/test_chassis.py::TestChassisApi::test_get_watchdog: conditions: - "asic_type in ['barefoot'] and hwsku in ['newport']" - "'sw_to3200k' in hwsku" + - "'Force10-S6000' in hwsku" platform_tests/api/test_chassis.py::TestChassisApi::test_status_led: skip: @@ -662,7 +663,7 @@ platform_tests/api/test_watchdog.py: conditions_logical_operator: or conditions: - "asic_type in ['barefoot'] and hwsku in ['newport', 'montara'] or ('sw_to3200k' in hwsku)" - - "platform in ['x86_64-nokia_ixr7250e_sup-r0', 'x86_64-nokia_ixr7250e_36x400g-r0']" + - "platform in ['x86_64-nokia_ixr7250e_sup-r0', 'x86_64-nokia_ixr7250e_36x400g-r0', 'x86_64-dell_s6000_s1220-r0']" ####################################### ##### broadcom ##### From 09f70c2335843019d8d3c933c2ff69bacb86c871 Mon Sep 17 00:00:00 2001 From: rraghav-cisco <58446052+rraghav-cisco@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:44:10 -0800 Subject: [PATCH 171/175] T2-snappi: Split udp stream to 6 ports for lossy, but only one for lossless. (#15698) Description of PR Summary: The test: test_pfc_pause_single_lossy_prio is resulting in flaky results. On inspecting the failed state, we find that the cisco-8000 backplane load-balancing is the cause for flakiness. It needs six streams to have higher chance of traffic being equally distributed in the backplane, and the traffic can be sent without being dropped. Approach What is the motivation for this PR? Flakiness of test_pfc_pause_single_lossy_prio test. How did you do it? Changed to 6 udp streams for lossy traffic. co-authorized by: jianquanye@microsoft.com --- tests/snappi_tests/multidut/pfc/files/multidut_helper.py | 7 +++++++ .../pfc/test_multidut_pfc_pause_lossy_with_snappi.py | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/tests/snappi_tests/multidut/pfc/files/multidut_helper.py b/tests/snappi_tests/multidut/pfc/files/multidut_helper.py index fbd84fbf08a..52217176899 100644 --- a/tests/snappi_tests/multidut/pfc/files/multidut_helper.py +++ b/tests/snappi_tests/multidut/pfc/files/multidut_helper.py @@ -49,6 +49,7 @@ def run_pfc_test(api, bg_prio_list, prio_dscp_map, test_traffic_pause, + test_flow_is_lossless=True, snappi_extra_params=None): """ Run a multidut PFC test @@ -195,10 +196,16 @@ def run_pfc_test(api, snappi_extra_params.traffic_flow_config.pause_flow_config["flow_traffic_type"] = \ traffic_flow_mode.FIXED_DURATION + no_of_streams = 1 + if egress_duthost.fatcs['asic_type'] == "cisco-8000": + if not test_flow_is_lossless: + no_of_streams = 6 + # Generate test flow config generate_test_flows(testbed_config=testbed_config, test_flow_prio_list=test_prio_list, prio_dscp_map=prio_dscp_map, + number_of_streams=no_of_streams, snappi_extra_params=snappi_extra_params) if snappi_extra_params.gen_background_traffic: diff --git a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py index e44c5a86de1..22499aaaafe 100644 --- a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py +++ b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py @@ -78,6 +78,7 @@ def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 bg_prio_list=bg_prio_list, prio_dscp_map=prio_dscp_map, test_traffic_pause=False, + test_flow_is_lossless=False, snappi_extra_params=snappi_extra_params) @@ -127,6 +128,7 @@ def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 bg_prio_list=bg_prio_list, prio_dscp_map=prio_dscp_map, test_traffic_pause=False, + test_flow_is_lossless=False, snappi_extra_params=snappi_extra_params) @@ -187,6 +189,7 @@ def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 bg_prio_list=bg_prio_list, prio_dscp_map=prio_dscp_map, test_traffic_pause=False, + test_flow_is_lossless=False, snappi_extra_params=snappi_extra_params) @@ -242,4 +245,5 @@ def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 bg_prio_list=bg_prio_list, prio_dscp_map=prio_dscp_map, test_traffic_pause=False, + test_flow_is_lossless=False, snappi_extra_params=snappi_extra_params) From 29922f75b9dec3228abaaff49c5ecb5470e22d3f Mon Sep 17 00:00:00 2001 From: Chris <156943338+ccroy-arista@users.noreply.github.com> Date: Tue, 26 Nov 2024 18:00:23 -0800 Subject: [PATCH 172/175] sonic-mgmt: rename qsp 128x400g hwsku (#15687) The Arista-7060X6-64PE-128x400G HWSKU folder in sonic-buildimage has been renamed to end in '-O128S2' instead; this change updates the corresponding references in sonic-mgmt. --- ansible/group_vars/sonic/variables | 2 +- ansible/module_utils/port_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables index 2d2ec3f80d3..d083754eeee 100644 --- a/ansible/group_vars/sonic/variables +++ b/ansible/group_vars/sonic/variables @@ -15,7 +15,7 @@ broadcom_th_hwskus: ['Force10-S6100', 'Arista-7060CX-32S-C32', 'Arista-7060CX-32 broadcom_th2_hwskus: ['Arista-7260CX3-D108C8', 'Arista-7260CX3-C64', 'Arista-7260CX3-Q64'] broadcom_th3_hwskus: ['DellEMC-Z9332f-M-O16C64', 'DellEMC-Z9332f-O32'] broadcom_th4_hwskus: ['Arista-7060DX5-32', 'Arista-7060DX5-64S'] -broadcom_th5_hwskus: ['Arista-7060X6-64DE', 'Arista-7060X6-64DE-64x400G', 'Arista-7060X6-64DE-256x200G', 'Arista-7060X6-64PE', 'Arista-7060X6-64PE-64x400G', 'Arista-7060X6-64PE-128x400G', 'Arista-7060X6-64PE-256x200G', 'Arista-7060X6-64PE-C256S2'] +broadcom_th5_hwskus: ['Arista-7060X6-64DE', 'Arista-7060X6-64DE-64x400G', 'Arista-7060X6-64DE-256x200G', 'Arista-7060X6-64PE', 'Arista-7060X6-64PE-64x400G', 'Arista-7060X6-64PE-O128S2', 'Arista-7060X6-64PE-256x200G', 'Arista-7060X6-64PE-C256S2'] broadcom_j2c+_hwskus: ['Nokia-IXR7250E-36x100G', 'Nokia-IXR7250E-36x400G', 'Arista-7800R3A-36DM2-C36', 'Arista-7800R3A-36DM2-D36', 'Arista-7800R3AK-36DM2-C36', 'Arista-7800R3AK-36DM2-D36'] broadcom_jr2_hwskus: ['Arista-7800R3-48CQ2-C48', 'Arista-7800R3-48CQM2-C48'] diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index 93103d8195d..d091dad152f 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -112,7 +112,7 @@ def get_port_alias_to_name_map(hwsku, asic_name=None): port_alias_to_name_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 8) port_alias_to_name_map["Ethernet65"] = "Ethernet512" port_alias_to_name_map["Ethernet66"] = "Ethernet513" - elif hwsku == "Arista-7060X6-64PE-128x400G": + elif hwsku == "Arista-7060X6-64PE-O128S2": for i in range(1, 65): for j in [1, 5]: port_alias_to_name_map["Ethernet%d/%d" % (i, j)] = "Ethernet%d" % ((i - 1) * 8 + j - 1) From 70843858031535587a9d329471e9ab60fe91b743 Mon Sep 17 00:00:00 2001 From: Jibin Bao Date: Wed, 27 Nov 2024 10:16:29 +0800 Subject: [PATCH 173/175] [Dynamic buffer] Fix enable-dynamic-buffer.py issue (#15374) [Dynamic buffer] Fix enable-dynamic-buffer.py issue --- tests/common/helpers/enable-dynamic-buffer.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/common/helpers/enable-dynamic-buffer.py b/tests/common/helpers/enable-dynamic-buffer.py index 0f122f2cd2e..ebf28bfef99 100755 --- a/tests/common/helpers/enable-dynamic-buffer.py +++ b/tests/common/helpers/enable-dynamic-buffer.py @@ -2,6 +2,7 @@ import subprocess import re +import time from sonic_py_common.logger import Logger from swsscommon.swsscommon import ConfigDBConnector @@ -119,6 +120,10 @@ def stop_traditional_buffer_model(config_db): # Stop the buffermgrd # We don't stop the buffermgrd at the beginning # because we need it to remove tables from APPL_DB while their counter part are removed from CONFIG_DB + + # Before stopping buffermgrd, need to make sure buffermgrd is running, + # otherwise it might cause some side-effect timing issue + check_buffermgrd_is_running() command = 'docker exec swss supervisorctl stop buffermgrd' proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) _, err = proc.communicate() @@ -131,6 +136,28 @@ def stop_traditional_buffer_model(config_db): return lossless_pgs +def check_buffermgrd_is_running(): + cmd_get_buffermgrd_status = "docker exec swss supervisorctl status buffermgrd" + max_try_times = 10 + try_times = 0 + while try_times < max_try_times: + try_times += 1 + proc = subprocess.Popen(cmd_get_buffermgrd_status, shell=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + if err: + logger.log_notice("try_times:{}. Failed to check buffermgrd status: {}".format(try_times, err)) + else: + if "RUNNING" in output.decode('utf-8'): + logger.log_notice("Daemon buffermgrd is running") + return True + else: + logger.log_notice("try_times:{}. Daemon buffermgrd is not running".format(try_times)) + time.sleep(2) + + logger.log_notice("Daemon buffermgrd is not running, after checking {} times".format(max_try_times)) + exit(1) + + def start_dynamic_buffer_model(config_db, lossless_pgs, metadata): """ Start the dynamic buffer model From 2194bfb1cb681f52f4c8f8b85d4c05d9d2e0fafe Mon Sep 17 00:00:00 2001 From: xwjiang-ms <96218837+xwjiang-ms@users.noreply.github.com> Date: Wed, 27 Nov 2024 11:23:46 +0800 Subject: [PATCH 174/175] Revert "sonic-mgmt: Assert if Arista Hwsku is not found in port_utils (#15287)" (#15747) This reverts commit b02d8e9b6b6e33a202704fa689062760fa860cb4. --- ansible/module_utils/port_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index d091dad152f..ff5558430f8 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -437,9 +437,9 @@ def get_port_alias_to_name_map(hwsku, asic_name=None): # this device simulates 32 ports, with 4 as the step for port naming. for i in range(0, 32, 4): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i - elif "Arista" in hwsku and "FM" not in hwsku: - assert False, "Please add hwsku %s to port_alias_to_name_map" % hwsku else: + if "Arista-7800" in hwsku: + assert False, "Please add port_alias_to_name_map for new modular SKU %s." % hwsku for i in range(0, 128, 4): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i From 86bfa94094ddefd6ab84054c173f03a87e6662d7 Mon Sep 17 00:00:00 2001 From: liamkearney-msft Date: Wed, 27 Nov 2024 13:35:07 +1000 Subject: [PATCH 175/175] [ipfwd/test_nhop_group]: Support multi-asic in interface flap test (#15486) Arp eviction commands need to respect the asic namespace when being applied to multi-asic devices, as the procfs entries are for each individual asic. Signed-off-by: Liam Kearney --- tests/ipfwd/test_nhop_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ipfwd/test_nhop_group.py b/tests/ipfwd/test_nhop_group.py index 0a801b12d1f..8f94b836fdb 100644 --- a/tests/ipfwd/test_nhop_group.py +++ b/tests/ipfwd/test_nhop_group.py @@ -879,7 +879,7 @@ def test_nhop_group_interface_flap(duthosts, enum_rand_one_per_hwsku_frontend_ho # Enable kernel flag to not evict ARP entries when the interface goes down # and shut the fanout switch ports. - duthost.shell(arp_noevict_cmd % gather_facts['src_router_intf_name']) + asic.command(arp_noevict_cmd % gather_facts['src_router_intf_name']) for i in range(0, len(gather_facts['src_port'])): fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, duthost.hostname, gather_facts['src_port'][i]) @@ -926,6 +926,6 @@ def test_nhop_group_interface_flap(duthosts, enum_rand_one_per_hwsku_frontend_ho logger.info("portstats: %s", result['stdout']) finally: - duthost.shell(arp_evict_cmd % gather_facts['src_router_intf_name']) + asic.command(arp_evict_cmd % gather_facts['src_router_intf_name']) nhop.delete_routes() arplist.clean_up()