diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index a7d7afa83ee..8909c9fea43 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -2,6 +2,7 @@ t0: - acl/custom_acl_table/test_custom_acl_table.py - acl/null_route/test_null_route_helper.py - acl/test_acl.py + - acl/test_acl_outer_vlan.py - acl/test_stress_acl.py - arp/test_arp_extended.py - arp/test_neighbor_mac.py @@ -29,6 +30,7 @@ t0: - container_hardening/test_container_hardening.py - database/test_db_config.py - database/test_db_scripts.py + - decap/test_decap.py - dhcp_relay/test_dhcp_pkt_recv.py - dhcp_relay/test_dhcp_relay.py - dhcp_relay/test_dhcpv6_relay.py @@ -36,6 +38,10 @@ t0: - dns/static_dns/test_static_dns.py - dns/test_dns_resolv_conf.py - dualtor/test_orch_stress.py + - dualtor/test_orchagent_active_tor_downstream.py + - dualtor/test_orchagent_mac_move.py + - dualtor/test_orchagent_standby_tor_downstream.py + - dualtor/test_standby_tor_upstream_mux_toggle.py - dualtor_mgmt/test_server_failure.py - dualtor_mgmt/test_toggle_mux.py - dut_console/test_console_baud_rate.py @@ -93,6 +99,7 @@ t0: - pc/test_lag_2.py - pc/test_po_cleanup.py - pc/test_po_update.py + - pfcwd/test_pfc_config.py - platform_tests/broadcom/test_ser.py - platform_tests/counterpoll/test_counterpoll_watermark.py - platform_tests/fwutil/test_fwutil.py @@ -128,7 +135,6 @@ t0: - route/test_forced_mgmt_route.py - route/test_route_consistency.py - route/test_route_flap.py - - route/test_route_flow_counter.py - route/test_route_perf.py - route/test_static_route.py - scp/test_scp_copy.py @@ -163,7 +169,8 @@ t0: - tacacs/test_ro_disk.py - tacacs/test_ro_user.py - tacacs/test_rw_user.py - - telemetry/test_events.py + # Temporarily skip test_events due to test issue with high failure rate + # - telemetry/test_events.py - telemetry/test_telemetry.py - telemetry/test_telemetry_cert_rotation.py - test_features.py @@ -176,6 +183,14 @@ t0: - vlan/test_vlan.py - vlan/test_vlan_ping.py - vxlan/test_vnet_route_leak.py + - bgp/test_bgp_peer_shutdown.py + - clock/test_clock.py + - generic_config_updater/test_pfcwd_status.py + - pfcwd/test_pfc_config.py + - platform_tests/link_flap/test_link_flap.py + - platform_tests/test_memory_exhaustion.py + - generic_config_updater/test_pg_headroom_update.py + t0-2vlans: - dhcp_relay/test_dhcp_relay.py @@ -195,6 +210,9 @@ t0-sonic: dualtor: - arp/test_arp_extended.py + - dualtor_mgmt/test_grpc_periodical_sync.py + - dualtor_mgmt/test_server_failure.py + - dualtor_mgmt/test_toggle_mux.py t1-lag: - acl/test_acl.py @@ -211,6 +229,7 @@ t1-lag: - bgp/test_traffic_shift.py - configlet/test_add_rack.py - container_checker/test_container_checker.py + - decap/test_decap.py - dhcp_relay/test_dhcp_pkt_fwd.py - everflow/test_everflow_ipv6.py - everflow/test_everflow_per_interface.py @@ -222,6 +241,7 @@ t1-lag: - http/test_http_copy.py - iface_namingmode/test_iface_namingmode.py - ip/test_ip_packet.py + - ip/test_mgmt_ipv6_only.py - ipfwd/test_dip_sip.py - ipfwd/test_mtu.py - lldp/test_lldp.py @@ -229,10 +249,14 @@ t1-lag: - override_config_table/test_override_config_table.py - pc/test_lag_2.py - pc/test_po_update.py + - pfcwd/test_pfc_config.py - platform_tests/test_cpu_memory_usage.py - process_monitoring/test_critical_process_monitoring.py - qos/test_buffer.py + - radv/test_radv_restart.py - route/test_default_route.py + - route/test_route_consistency.py + - route/test_route_flap.py - route/test_route_perf.py - scp/test_scp_copy.py - snmp/test_snmp_cpu.py @@ -295,24 +319,15 @@ dpu: onboarding_t0: # We will add a batch of T0 control plane cases and fix the failed cases later - - bgp/test_bgp_peer_shutdown.py - - cacl/test_ebtables_application.py - - clock/test_clock.py - generic_config_updater/test_dynamic_acl.py - - generic_config_updater/test_pfcwd_status.py - - generic_config_updater/test_pg_headroom_update.py - - mvrf/test_mgmtvrf.py - pc/test_lag_member.py - - pfcwd/test_pfc_config.py - pfcwd/test_pfcwd_all_port_storm.py - pfcwd/test_pfcwd_function.py - pfcwd/test_pfcwd_timer_accuracy.py - pfcwd/test_pfcwd_warm_reboot.py - platform_tests/cli/test_show_platform.py - - platform_tests/link_flap/test_link_flap.py # - platform_tests/test_advanced_reboot.py - platform_tests/test_cont_warm_reboot.py - - platform_tests/test_memory_exhaustion.py - platform_tests/test_reboot.py - platform_tests/test_reload_config.py - snmp/test_snmp_link_local.py @@ -320,22 +335,27 @@ onboarding_t0: - snmp/test_snmp_queue_counters.py - sub_port_interfaces/test_show_subinterface.py - sub_port_interfaces/test_sub_port_interfaces.py - - system_health/test_system_health.py + - sub_port_interfaces/test_sub_port_l2_forwarding.py - test_pktgen.py - - acl/test_acl_outer_vlan.py - arp/test_unknown_mac.py - - decap/test_decap.py + - hash/test_generic_hash.py + - gnmi/test_gnmi_countersdb.py + onboarding_t1: - - decap/test_decap.py - generic_config_updater/test_cacl.py + - hash/test_generic_hash.py + - gnmi/test_gnmi_countersdb.py onboarding_dualtor: - - dualtor_mgmt/test_dualtor_bgp_update_delay.py - - dualtor_mgmt/test_grpc_periodical_sync.py + - dualtor/test_ipinip.py + - dualtor/test_orchagent_slb.py + - dualtor/test_switchover_failure.py + - dualtor/test_tor_ecn.py + - dualtor/test_tunnel_memory_leak.py - dualtor_mgmt/test_ingress_drop.py - - dualtor_mgmt/test_server_failure.py - - dualtor_mgmt/test_toggle_mux.py + - dualtor_mgmt/test_dualtor_bgp_update_delay.py + specific_param: t0-sonic: diff --git a/.azure-pipelines/pr_test_skip_scripts.yaml b/.azure-pipelines/pr_test_skip_scripts.yaml index 7ec9c98c5bc..ca93c4bc7b4 100644 --- a/.azure-pipelines/pr_test_skip_scripts.yaml +++ b/.azure-pipelines/pr_test_skip_scripts.yaml @@ -7,6 +7,11 @@ t0: - k8s/test_config_reload.py - k8s/test_disable_flag.py - k8s/test_join_available_master.py + # Mclag test only support on t0-mclag platform which is not in PR test + - mclag/test_mclag_l3.py + # Nat feature is default disabled on both KVM and physical platforms + - nat/test_dynamic_nat.py + - nat/test_static_nat.py # Neighbor type must be sonic - ospf/test_ospf.py - ospf/test_ospf_bfd.py @@ -41,9 +46,20 @@ t0: - platform_tests/mellanox/test_reboot_cause.py # This script only supported on Mellanox - restapi/test_restapi.py + # Route flow counter is not supported on vs platform + - route/test_route_flow_counter.py + # Sflow feature is default disabled on vs platform + - sflow/test_sflow.py - snmp/test_snmp_phy_entity.py + # Remove from PR test in https://github.com/sonic-net/sonic-mgmt/pull/6073 + - cacl/test_ebtables_application.py + # There is no table SYSTEM_HEALTH_INFO in STATE_DB on kvm testbed + # The tests in this script are all related to the above table + - system_health/test_system_health.py + # This script is also skipped in nightly test + - mvrf/test_mgmtvrf.py -t1: +t1-lag: # KVM do not support bfd test - bfd/test_bfd.py # KVM do not support drop reason in testcase, and testcase would set drop reason in setup stage, can't do more test @@ -85,9 +101,16 @@ t1: - platform_tests/mellanox/test_hw_management_service.py - platform_tests/mellanox/test_psu_power_threshold.py - platform_tests/mellanox/test_reboot_cause.py + # Route flow counter is not supported on vs platform + - route/test_route_flow_counter.py - snmp/test_snmp_phy_entity.py - # Not supported port type - - sub_port_interfaces/test_sub_port_l2_forwarding.py + # Remove from PR test in https://github.com/sonic-net/sonic-mgmt/pull/6073 + - cacl/test_ebtables_application.py + # There is no table SYSTEM_HEALTH_INFO in STATE_DB on kvm testbed + # The tests in this script are all related to the above table + - system_health/test_system_health.py + # This script is also skipped in nightly test + - mvrf/test_mgmtvrf.py t2: # KVM do not support bfd test @@ -129,6 +152,13 @@ t2: - platform_tests/mellanox/test_psu_power_threshold.py - platform_tests/mellanox/test_reboot_cause.py - snmp/test_snmp_phy_entity.py + # Remove from PR test in https://github.com/sonic-net/sonic-mgmt/pull/6073 + - cacl/test_ebtables_application.py + # There is no table SYSTEM_HEALTH_INFO in STATE_DB on kvm testbed + # The tests in this script are all related to the above table + - system_health/test_system_health.py + # This script is also skipped in nightly test + - mvrf/test_mgmtvrf.py dualtor: # This test is only for Nvidia platforms. diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml index 1a50c5d040c..38a5811bfac 100644 --- a/.azure-pipelines/run-test-elastictest-template.yml +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -241,7 +241,9 @@ steps: displayName: "Trigger test" env: - ELASTICTEST_MSAL_CLIENT_SECRET: $(ELASTICTEST_MSAL_CLIENT_SECRET) + SONIC_AUTOMATION_SERVICE_PRINCIPAL: $(SONIC_AUTOMATION_SERVICE_PRINCIPAL) + ELASTICTEST_MSAL_TENANT_ID: $(ELASTICTEST_MSAL_TENANT_ID) + SYSTEM_ACCESS_TOKEN: $(System.AccessToken) - task: AzureCLI@2 inputs: @@ -276,7 +278,9 @@ steps: displayName: "Lock testbed" env: - ELASTICTEST_MSAL_CLIENT_SECRET: $(ELASTICTEST_MSAL_CLIENT_SECRET) + SONIC_AUTOMATION_SERVICE_PRINCIPAL: $(SONIC_AUTOMATION_SERVICE_PRINCIPAL) + ELASTICTEST_MSAL_TENANT_ID: $(ELASTICTEST_MSAL_TENANT_ID) + SYSTEM_ACCESS_TOKEN: $(System.AccessToken) - task: AzureCLI@2 inputs: @@ -312,7 +316,9 @@ steps: displayName: "Prepare testbed" env: - ELASTICTEST_MSAL_CLIENT_SECRET: $(ELASTICTEST_MSAL_CLIENT_SECRET) + SONIC_AUTOMATION_SERVICE_PRINCIPAL: $(SONIC_AUTOMATION_SERVICE_PRINCIPAL) + ELASTICTEST_MSAL_TENANT_ID: $(ELASTICTEST_MSAL_TENANT_ID) + SYSTEM_ACCESS_TOKEN: $(System.AccessToken) - task: AzureCLI@2 inputs: @@ -348,7 +354,9 @@ steps: displayName: "Run test" timeoutInMinutes: ${{ parameters.MAX_RUN_TEST_MINUTES }} env: - ELASTICTEST_MSAL_CLIENT_SECRET: $(ELASTICTEST_MSAL_CLIENT_SECRET) + SONIC_AUTOMATION_SERVICE_PRINCIPAL: $(SONIC_AUTOMATION_SERVICE_PRINCIPAL) + ELASTICTEST_MSAL_TENANT_ID: $(ELASTICTEST_MSAL_TENANT_ID) + SYSTEM_ACCESS_TOKEN: $(System.AccessToken) - ${{ if eq(parameters.DUMP_KVM_IF_FAIL, 'True') }}: - task: AzureCLI@2 @@ -375,7 +383,9 @@ steps: condition: succeededOrFailed() displayName: "KVM dump" env: - ELASTICTEST_MSAL_CLIENT_SECRET: $(ELASTICTEST_MSAL_CLIENT_SECRET) + SONIC_AUTOMATION_SERVICE_PRINCIPAL: $(SONIC_AUTOMATION_SERVICE_PRINCIPAL) + ELASTICTEST_MSAL_TENANT_ID: $(ELASTICTEST_MSAL_TENANT_ID) + SYSTEM_ACCESS_TOKEN: $(System.AccessToken) - task: AzureCLI@2 inputs: @@ -393,4 +403,6 @@ steps: condition: always() displayName: "Finalize running test plan" env: - ELASTICTEST_MSAL_CLIENT_SECRET: $(ELASTICTEST_MSAL_CLIENT_SECRET) + SONIC_AUTOMATION_SERVICE_PRINCIPAL: $(SONIC_AUTOMATION_SERVICE_PRINCIPAL) + ELASTICTEST_MSAL_TENANT_ID: $(ELASTICTEST_MSAL_TENANT_ID) + SYSTEM_ACCESS_TOKEN: $(System.AccessToken) diff --git a/.azure-pipelines/test_plan.py b/.azure-pipelines/test_plan.py index 20ea07f6079..e74899c30ec 100644 --- a/.azure-pipelines/test_plan.py +++ b/.azure-pipelines/test_plan.py @@ -131,18 +131,6 @@ def __init__(self): super(FinishStatus, self).__init__(TestPlanStatus.FINISHED) -# def get_scope(elastictest_url): -# scope = "api://sonic-testbed-tools-dev/.default" -# if elastictest_url in [ -# "http://sonic-testbed2-scheduler-backend.azurewebsites.net", -# "https://sonic-testbed2-scheduler-backend.azurewebsites.net", -# "http://sonic-elastictest-prod-scheduler-backend-webapp.azurewebsites.net", -# "https://sonic-elastictest-prod-scheduler-backend-webapp.azurewebsites.net" -# ]: -# scope = "api://sonic-testbed-tools-prod/.default" -# return scope - - def parse_list_from_str(s): # Since Azure Pipeline doesn't support to receive an empty parameter, # We use ' ' as a magic code for empty parameter. @@ -159,6 +147,7 @@ def parse_list_from_str(s): class TestPlanManager(object): def __init__(self, url, frontend_url, client_id=None): + self.last_login_time = datetime.now() self.url = url self.frontend_url = frontend_url self.client_id = client_id @@ -181,7 +170,34 @@ def cmd(self, cmds): return stdout, stderr, return_code + def az_run(self, cmd): + stdout, stderr, retcode = self.cmd(cmd.split()) + if retcode != 0: + raise Exception(f'Command {cmd} execution failed, rc={retcode}, error={stderr}') + return stdout, stderr, retcode + def get_token(self): + + # Success of "az account get-access-token" depends on "az login". However, the "az login" session may expire + # after 24 hours. So we re-login every 12 hours to ensure success of "az account get-access-token". + if datetime.now() - self.last_login_time > timedelta(hours=12): + cmd = "az login --service-principal -u {} --tenant {} --allow-no-subscriptions --federated-token {}"\ + .format( + os.environ.get("SONIC_AUTOMATION_SERVICE_PRINCIPAL"), + os.environ.get("ELASTICTEST_MSAL_TENANT_ID"), + os.environ.get("SYSTEM_ACCESS_TOKEN") + ) + attempt = 0 + while (attempt < MAX_GET_TOKEN_RETRY_TIMES): + try: + stdout, _, _ = self.az_run(cmd) + self.last_login_time = datetime.now() + break + except Exception as exception: + attempt += 1 + print("Failed to login with exception: {}. Retry {} times to login." + .format(repr(exception), MAX_GET_TOKEN_RETRY_TIMES - attempt)) + token_is_valid = \ self._token_expires_on is not None and \ (self._token_expires_on - datetime.now()) > timedelta(hours=TOKEN_EXPIRE_HOURS) @@ -192,10 +208,8 @@ def get_token(self): cmd = 'az account get-access-token --resource {}'.format(self.client_id) attempt = 0 while (attempt < MAX_GET_TOKEN_RETRY_TIMES): - stdout, stderr, return_code = self.cmd(cmd.split()) try: - if return_code != 0: - raise Exception("Failed to get token: rc: {}, error: {}".format(return_code, stderr)) + stdout, _, _ = self.az_run(cmd) token = json.loads(stdout.decode("utf-8")) self._token = token.get("accessToken", None) @@ -214,30 +228,6 @@ def get_token(self): raise Exception("Failed to get token after {} attempts".format(MAX_GET_TOKEN_RETRY_TIMES)) - # token_url = "https://login.microsoftonline.com/{}/oauth2/v2.0/token".format(self.tenant_id) - # headers = { - # "Content-Type": "application/x-www-form-urlencoded" - # } - - # payload = { - # "grant_type": "client_credentials", - # "client_id": self.client_id, - # "client_secret": self.client_secret, - # "scope": get_scope(self.url) - # } - # attempt = 0 - # while (attempt < MAX_GET_TOKEN_RETRY_TIMES): - # try: - # resp = requests.post(token_url, headers=headers, data=payload, timeout=10).json() - # self._token = resp["access_token"] - # self._token_generate_time = datetime.utcnow() - # return self._token - # except Exception as exception: - # attempt += 1 - # print("Get token failed with exception: {}. Retry {} times to get token." - # .format(repr(exception), MAX_GET_TOKEN_RETRY_TIMES - attempt)) - # raise Exception("Failed to get token after {} attempts".format(MAX_GET_TOKEN_RETRY_TIMES)) - def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params="", kvm_build_id="", min_worker=None, max_worker=None, pr_id="unknown", output=None, common_extra_params="", **kwargs): diff --git a/.azure-pipelines/testscripts_analyse/analyse_testscripts.py b/.azure-pipelines/testscripts_analyse/analyse_testscripts.py index 9fa4a222a46..2701a42b0f7 100644 --- a/.azure-pipelines/testscripts_analyse/analyse_testscripts.py +++ b/.azure-pipelines/testscripts_analyse/analyse_testscripts.py @@ -131,10 +131,13 @@ def get_PRChecker_scripts(): topology_type_pr_test_scripts = {} - for key, value in pr_test_scripts.items(): - if pr_test_skip_scripts.get(key, ""): - pr_test_scripts[key].extend(pr_test_skip_scripts[key]) + for key, value in pr_test_skip_scripts.items(): + if key in pr_test_scripts: + pr_test_scripts[key].extend(value) + else: + pr_test_scripts[key] = value + for key, value in pr_test_scripts.items(): topology_type = PR_TOPOLOGY_MAPPING.get(key, "") if topology_type: if topology_type_pr_test_scripts.get(topology_type, ""): diff --git a/ansible/library/announce_routes.py b/ansible/library/announce_routes.py index aed26bc9bda..3128110b7ea 100644 --- a/ansible/library/announce_routes.py +++ b/ansible/library/announce_routes.py @@ -10,9 +10,11 @@ import sys import socket import random +import logging import time from multiprocessing.pool import ThreadPool from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.debug_utils import config_module_logging if sys.version_info.major == 3: UNICODE_TYPE = str @@ -160,7 +162,8 @@ def change_routes(action, ptf_ip, port, routes): try: r = requests.post(url, data=data, timeout=360, proxies={"http": None, "https": None}) break - except ConnectionError as e: + except Exception as e: + logging.debug("Got exception {}, will try to connect again".format(e)) time.sleep(0.01 * (i+1)) if i == 4: raise e @@ -1037,10 +1040,14 @@ def main(): ptf_ip=dict(required=True, type='str'), action=dict(required=False, type='str', default='announce', choices=["announce", "withdraw"]), - path=dict(required=False, type='str', default='') + path=dict(required=False, type='str', default=''), + log_path=dict(required=False, type='str', default='') ), supports_check_mode=False) + if module.params['log_path']: + config_module_logging("announce_routes", log_path=module.params['log_path']) + topo_name = module.params['topo_name'] ptf_ip = module.params['ptf_ip'] action = module.params['action'] diff --git a/ansible/library/reduce_and_add_sonic_images.py b/ansible/library/reduce_and_add_sonic_images.py index 7d09b4354ec..d86661ec3f7 100644 --- a/ansible/library/reduce_and_add_sonic_images.py +++ b/ansible/library/reduce_and_add_sonic_images.py @@ -208,6 +208,11 @@ def install_new_sonic_image(module, new_image_url, save_as=None, required_space= ) return + skip_package_migrate_param = "" + _, output, _ = exec_command(module, cmd="sonic_installer install --help", ignore_error=True) + if "skip-package-migration" in output: + skip_package_migrate_param = "--skip-package-migration" + if save_as.startswith("/tmp/tmpfs"): log("Create a tmpfs partition to download image to install") exec_command(module, cmd="mkdir -p /tmp/tmpfs", ignore_error=True) @@ -222,7 +227,7 @@ def install_new_sonic_image(module, new_image_url, save_as=None, required_space= log("Running sonic_installer to install image at {}".format(save_as)) rc, out, err = exec_command( module, - cmd="sonic_installer install {} -y".format(save_as), + cmd="sonic_installer install {} {} -y".format(save_as, skip_package_migrate_param), msg="installing new image", ignore_error=True ) log("Done running sonic_installer to install image") @@ -241,8 +246,8 @@ def install_new_sonic_image(module, new_image_url, save_as=None, required_space= log("Running sonic_installer to install image at {}".format(save_as)) rc, out, err = exec_command( module, - cmd="sonic_installer install {} -y".format( - save_as), + cmd="sonic_installer install {} {} -y".format( + save_as, skip_package_migrate_param), msg="installing new image", ignore_error=True ) log("Always remove the downloaded temp image inside /host/ before proceeding") diff --git a/ansible/roles/fanout/library/port_config_gen.py b/ansible/roles/fanout/library/port_config_gen.py index 58408592169..b3bc373035c 100644 --- a/ansible/roles/fanout/library/port_config_gen.py +++ b/ansible/roles/fanout/library/port_config_gen.py @@ -151,13 +151,13 @@ def _read_from_port_config(filepath): with open(filepath) as fd: lines = fd.readlines() data_index = 0 - while lines[data_index].startswith("#"): + while not lines[data_index].strip() or lines[data_index].startswith("#"): data_index = data_index + 1 header = lines[data_index-1].strip("#\n ") keys = header.split() alias_index = keys.index("alias") for line in lines[data_index:]: - if not line: + if not line.strip() or line.startswith("#"): continue values = line.split() # port alias as the key diff --git a/docs/testbed/README.testbed.Setup.md b/docs/testbed/README.testbed.Setup.md index 7452407cb40..5db9aea904b 100644 --- a/docs/testbed/README.testbed.Setup.md +++ b/docs/testbed/README.testbed.Setup.md @@ -219,6 +219,7 @@ Once you are in the docker container, you need to modify the testbed configurati ``` ansible -m ping -i veos vm_host_1 ``` + - (Optional) The connectivity to the public internet is necessary during the setup, if the lab env of your organization requires http/https proxy server to reach out to the internet, you need to configure to use the proxy server. It will automatically be leveraged on required steps (e.g. Docker daemon config for image pulling, APT configuration for installing packages). You can configure it in [`ansible/group_vars/all/env.yml`](https://github.com/sonic-net/sonic-mgmt/blob/master/ansible/group_vars/all/env.yml) - VMs - Update /ansible/group_vars/vm_host/main.yml with the location of the veos files or veos file name if you downloaded a different version @@ -239,7 +240,7 @@ Once you are in the docker container, you need to modify the testbed configurati ceos_image: ceosimage:4.25.10M skip_ceos_image_downloading: true ``` - **NOTE: We are using local ceos image, hence the skip ceos image downloading should be set as true. + **NOTE**: We are using local ceos image, hence the skip ceos image downloading should be set as true. ## Deploy physical Fanout Switch VLAN diff --git a/docs/testbed/README.testbed.VsSetup.md b/docs/testbed/README.testbed.VsSetup.md index 82d7b80c062..db101ca6176 100644 --- a/docs/testbed/README.testbed.VsSetup.md +++ b/docs/testbed/README.testbed.VsSetup.md @@ -96,11 +96,12 @@ All testbed configuration steps and tests are run from a `sonic-mgmt` docker con 1. Run the `setup-container.sh` in the root directory of the sonic-mgmt repository: -``` +```bash cd sonic-mgmt ./setup-container.sh -n -d /data ``` + 2. (Required for IPv6 test cases): Follow the steps [IPv6 for docker default bridge](https://docs.docker.com/config/daemon/ipv6/#use-ipv6-for-the-default-bridge-network) to enable IPv6 for container. For example, edit the Docker daemon configuration file located at `/etc/docker/daemon.json` with the following parameters to use ULA address if no special requirement. Then restart docker daemon by running `sudo systemctl restart docker` to take effect. ```json @@ -112,6 +113,7 @@ cd sonic-mgmt } ``` + 3. From now on, **all steps are running inside the sonic-mgmt docker**, unless otherwise specified. @@ -321,6 +323,8 @@ cd /data/sonic-mgmt/ansible ## Deploy minigraph on the DUT Once the topology has been created, we need to give the DUT an initial configuration. +(Optional) The connectivity to the public internet is necessary during the setup, if the lab env of your organization requires http/https proxy server to reach out to the internet, you need to configure to use the proxy server. It will automatically be leveraged on required steps (e.g. Docker daemon config for image pulling, APT configuration for installing packages). You can configure it in [`ansible/group_vars/all/env.yml`](https://github.com/sonic-net/sonic-mgmt/blob/master/ansible/group_vars/all/env.yml) + 1. Deploy the `minigraph.xml` to the DUT and save the configuration: ``` @@ -381,12 +385,12 @@ Peers 4, using 87264 KiB of memory Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.57 4 64600 3792 3792 0 0 0 00:29:24 6400 ARISTA01T1 -10.0.0.59 4 64600 3792 3795 0 0 0 00:29:24 6400 ARISTA02T1 -10.0.0.61 4 64600 3792 3792 0 0 0 00:29:24 6400 ARISTA03T1 -10.0.0.63 4 64600 3795 3796 0 0 0 00:29:24 6400 ARISTA04T1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.57 4 64600 3792 3792 0 0 0 00:29:24 6400 ARISTA01T1 +10.0.0.59 4 64600 3792 3795 0 0 0 00:29:24 6400 ARISTA02T1 +10.0.0.61 4 64600 3792 3792 0 0 0 00:29:24 6400 ARISTA03T1 +10.0.0.63 4 64600 3795 3796 0 0 0 00:29:24 6400 ARISTA04T1 Total number of neighbors 4 ``` @@ -404,12 +408,12 @@ Peers 4, using 83680 KiB of memory Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.57 4 64600 8 8 0 0 0 00:00:10 3 ARISTA01T1 -10.0.0.59 4 64600 0 0 0 0 0 00:00:10 3 ARISTA02T1 -10.0.0.61 4 64600 0 0 0 0 0 00:00:11 3 ARISTA03T1 -10.0.0.63 4 64600 0 0 0 0 0 00:00:11 3 ARISTA04T1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.57 4 64600 8 8 0 0 0 00:00:10 3 ARISTA01T1 +10.0.0.59 4 64600 0 0 0 0 0 00:00:10 3 ARISTA02T1 +10.0.0.61 4 64600 0 0 0 0 0 00:00:11 3 ARISTA03T1 +10.0.0.63 4 64600 0 0 0 0 0 00:00:11 3 ARISTA04T1 ``` diff --git a/docs/testbed/README.testbed.WANSetup.md b/docs/testbed/README.testbed.WANSetup.md index 6f0e609dfa1..78a7d8b3b38 100644 --- a/docs/testbed/README.testbed.WANSetup.md +++ b/docs/testbed/README.testbed.WANSetup.md @@ -147,6 +147,7 @@ In order to configure the testbed on your host automatically, Ansible needs to b ## Deploy multiple devices topology Now we're finally ready to deploy the topology for our testbed! Run the following command: +(Optional) The connectivity to the public internet is necessary during the setup, if the lab env of your organization requires http/https proxy server to reach out to the internet, you need to configure to use the proxy server. It will automatically be leveraged on required steps (e.g. Docker daemon config for image pulling, APT configuration for installing packages). You can configure it in [`ansible/group_vars/all/env.yml`](https://github.com/sonic-net/sonic-mgmt/blob/master/ansible/group_vars/all/env.yml) ### cEOS ``` diff --git a/docs/testbed/sai_quality/DeploySAITestTopologyWithSONiC-MGMT.md b/docs/testbed/sai_quality/DeploySAITestTopologyWithSONiC-MGMT.md index ccfe7746d68..9acf1deae9a 100644 --- a/docs/testbed/sai_quality/DeploySAITestTopologyWithSONiC-MGMT.md +++ b/docs/testbed/sai_quality/DeploySAITestTopologyWithSONiC-MGMT.md @@ -3,6 +3,11 @@ In this article, you will get to know how to use the sonic-mgmt docker to set up **Those commands need to be run within a sonic-mgmt docker, or you need to run them within a similar environment.** This section of the document described how to build a sonic-mgmt docker https://github.com/sonic-net/sonic-mgmt/blob/master/docs/testbed/README.testbed.VsSetup.md#setup-sonic-mgmt-docker + + + +(Optional) The connectivity to the public internet is necessary during the setup, if the lab env of your organization requires http/https proxy server to reach out to the internet, you need to configure to use the proxy server. It will automatically be leveraged on required steps (e.g. Docker daemon config for image pulling, APT configuration for installing packages). You can configure it in [`ansible/group_vars/all/env.yml`](https://github.com/sonic-net/sonic-mgmt/blob/master/ansible/group_vars/all/env.yml) + 1. install the sonic image in the DUT(device under test) for example ``` @@ -41,6 +46,7 @@ For example, we want to use the config `vms-sn2700-t1-lag`, then we need to chan **for the topo, if it ends with 64, then the topo should be ptf64, please change it according to the actual device port.** 4. deploy the new topology + ``` ./testbed-cli.sh -t testbed.yaml add-topo vms-sn2700-t1 password.txt ``` diff --git a/docs/testplan/PFC_Snappi_Additional_Testcases.md b/docs/testplan/PFC_Snappi_Additional_Testcases.md new file mode 100644 index 00000000000..f5cca953fe0 --- /dev/null +++ b/docs/testplan/PFC_Snappi_Additional_Testcases.md @@ -0,0 +1,65 @@ +This document describes the list of additional system testcases. + +- [Background](#background) +- [Setup](#setup) +- [Testcases](#testcases) + + +### Background + +Intent of these testcases is to test throughput at the interface speed, PFC, PFC-WD, ECN congestion, port-channels and MACSEC. These testcases will be executed for 100 and 400Gbps ports on single line-card single asic, single line-card multiple asic and multiple line-card. The packet-size will vary from 128 to 1400 bytes. + +Test cases are specifically meant for Multi-ASIC, DNX based chassis platform. + +Traffic pattern includes at least one lossless priority flow and with/without lossy background traffic. Some of the testcases use pause flows to simulate congestion. + +Additionally, the testcases also intend to capture the utilization, packet-drops and/or latency. + + +### Setup + +Example for 100Gbps: + + ________________ + | | + IXIA(100Gbps) <---------> | DUT-LC1 |<---------> IXIA(100Gbps) + IXIA(100Gbps) <---------> | ASIC0 |<---------> IXIA(100Gbps) + |_______________| + |_______________| + | | + IXIA(100Gbps) <---------> | DUT-LC1 | + IXIA(100Gbps) <---------> | ASIC1 | + |_______________| + + ________________ + | | + IXIA(100Gbps) <---------> | DUT-LC2 |<---------> IXIA(100Gbps) + | ASIC0 | + |_______________| + + +There will be similar setup on a different line-cards for 400Gbps in the same chassis. + +### Testcases + +| Test case ID | Testcase Groups | Description | Topology | Traffic Pattern and Settings | LC combination | Pass-Fail Criteria | +|-------------:|:-------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| 01 | NON-CONGESTION(NORMAL) | **NO CONGESTION AT LINE RATE TEST:**
Aim of the test is to ensure that there are no packet drops with equal speed ingress and egress links and mix of lossless and lossy traffic. No packet drops for both lossless and lossy priority traffic. | - Single 100Gbps Ingress and egress
- Repeat with 400Gbps ingress and egress port
- Frame size: IMIX packet-profile. | - 90% of ingress interface speed - Lossless Priority 3 and 4
- 10% of ingress interface speed - Lossy Background Flow [Priority 0, 1, and 2] | - Single LC - Multiple ASIC
- Single LC - Single ASIC
- Multiple LC | 1. There should be NO packet drops on the DUT (Tool Tx packets == Tool Rx packets)
2. There should no PFCs on tool Tx port or on the DUT.
3. Measure latency of the packets traversing the DUT.
4. Expected throughput of linerate on the egress port of DUT. | +| 02 | NON-CONGESTION(MACSEC) | **NO CONGESTION LINE-RATE OVER MACSEC ENABLED LINK:**
Test case to ensure that there are no packet drops with equal speed ingress and egress links with MACSEC enabled and mix of lossless and lossy traffic. | - Single 100Gbps Ingress and single 100Gbps egress - MACSEC
- Repeat with Single 400Gbps ingress and Single 400Gbps egress - MACSEC.
- Repeat with IPv6
- Frame size: IMIX packet-profile. | - 90% - Lossless Priority 3 and 4
- 10% - Lossy Background Flow [Priority 0, 1, and 2] | - Single LC - Multiple ASIC
- Multiple LC | 1. There should be NO packet drops on the DUT (IXIA Tx packets == IXIA Rx packets)
2. There should no PFCs on IXIA Tx port or on the DUT.
3. Measure latency of the packets traversing the DUT.
4. Expected throughput of linerate on the egress port of DUT. | +| 03 | CONGESTION | **DETECT CONGESTION WITH MISMATCHED INGRESS AND EGRESS:**
Aim of the test is ensure that congestion with ingress 400Gbps link and single 100Gbps egress link and mix of lossless and lossy traffic.No losses for lossless priorities (Priority 3 and 4) traffic streams. | - Single 400Gbps Ingress and single 100Gbps egress
- Repeat with IPv6
- 1024B packet-size | - 90% of 400Gbps - Lossless Priority 3 and 4
- 10% of 400Gbps - Lossy Background Flow [Priority 0, 1, and 2] | - Single LC - Multiple ASIC
- Multiple LC | 1. The single 100Gbps egress should see 100Gbps lossy+lossless traffic
2. The 40Gbps lossy traffic should be continue to flow.
3. The traffic transmitter port should be receiving PFCs for the lossless priority traffic.
4. On receiving the PFCs, the IXIA-ingress should reduce the lossless traffic to around 60Gbps.
5. No drop for lossy and lossless traffic. | +| 04 | CONGESTION | **DETECT CONGESTION WITH REAL-LIFE TRAFFIC PATTERN - 90% LOSSLESS and 10% LOSSY:**
Aim of the testcase is to ensure that congestion is detected with two ingress and single egresses of same speed and mix of lossless and lossy traffic. No losses for lossless priority traffic (Priority 3 and priority 4 streams). | - Two 100Gbps Ingress and single 100Gbps egress
- Repeat test with 2x400Gbps ingress and 1x400Gbps egress
- IMIX packet-profile
- Unequal traffic lossy and lossless priorities. | - 90% - Lossless Priority 3 and 4 per ingress LC
- 10% - Lossy Background Flow [Priority 0, 1, and 2] per ingress LC. | - Single LC - Multiple ASIC
- Single LC - Single ASIC
- Multiple LC | 1. No packet drops for Lossy Traffic.
2. PFCs for lossless traffic sent from DUT ingress to traffic transmitter. No packet drops for losslesss traffic.
3. Expected throughput of (Lossless + lossy traffic only) on egress interface line rate of DUT. | +| 05 | CONGESTION | **DETECT CONGESTION WITH EQUAL DISTRIBUTION OF LOSSLESS AND LOSSY TRAFFIC:**
Purpose of the testcase is to determine that amount of congestion faced by both lossless and lossy priority traffic. No losses for priority 3 and priority 4 traffic streams. | - Two 100Gbps Ingress and single 100Gbps egress
- Repeat test with 2x400Gbps ingress and 1x400Gbps egress
- IMIX packet-profile
- Equal traffic for all priorities | - 24% - Priority 3 and 4 per ingress LC
- 36% Background flows [Priority 0, 1, and 2] per ingress LC
- Enabled PFCWD and credit-watchdog on ingress and egress. | - Single LC - Multiple ASIC
- Multiple LC | 1. Packet drops for Lossy Traffic.
2. PFCs for lossless traffic. No packet drops for losslesss traffic.
3. Expected throughput of egress interface line-rate (Lossless + lossy traffic only) on egress interface of DUT.
4. Expected close to equal throughput for all priorities. | +| 06 | CONGESTION (MACSEC) | **CONGESTION DETECTION WITH PAUSE FRAMES OVER MACSEC ENABLED LINK:**
Purpose of the test is to ensure that congestion is detected on DUT egress with MACSEC enabled links on ingress and egress and mix of lossless and lossy traffic. No drops for lossless priority traffic. | - Single 100Gbps Ingress and Egress - PFC over MACSEC.
- Repeat with 400Gbps Ingress and Egress
- 1024B packet-size | - 90% - Lossless Priority 3 and 4 per ingress LC
- 10% - Lossy Background Flow [Priority 0, 1, and 2]
- Disable PFCWD before the test
- Send PAUSE frames to egress interface of DUT. | - Single LC - Multiple ASIC
- Single LC - Single ASIC
- Multiple LC | 1. There should be NO packet drops on the DUT (IXIA Tx packets == IXIA Rx packets)
2. There should be PFCs on IXIA and DUT - Tx and Rx port.
3. No Lossless Priority traffic on IXIA Rx Interface.
4. Only 10% lossy priority traffic allowed from the DUT.
5. No loss for Lossless traffic. | +| 07 | CONGESTION (MACSEC) | **CONGESTION OVER MISMATCHED INGRESS AND EGRESS MACSEC ENABLED LINK:**
Test case to check if the DUT detects congestion due to mismatched MACSEC enabled link speeds and sends PFCs to rate-limit the lossless priority traffic. | - Single 400Gbps Ingress and single 100Gbps egress
- Repeat with IPv6
- 1024B packet-size. | - 90% - ingress line rate Priority 3 and 4
- 10% of ingress line-rate -- Background Flow[Priority 0, 1, and 2] | - Single LC - Multiple ASIC
- Multiple LC | 1. There should be NO packet drops on the DUT for both Lossless and lossy priority traffic.
2. DUT should send PFCs to the traffic generator to rate-limit the lossless priority traffic.
3. 100Gbps Egress port should receive Lossless Priority 3 and 4 traffic. | +| 08 | CONGESTION (PC) | **UNEQUAL SPEED INGRESS-EGRESS PORTCHANNEL CONGESTION TEST:**
Test case to check that DUT detects congestion due to mismatched ingress and egress port-channel speeds and send PFCs to the ingress to rate-limit the lossless priority traffic. No drops for the lossless priority traffic. | - Port-channel - N x 100Gbps Ingress - 400Gbps Egress with N > 4.
- 1024B packet-size. | - 90% of total ingress port-channel link- Priority 3 and 4
- 10% of ingress port-channel link- Background Flow[Prio0, 1, and 2] | - Single LC - Multiple ASIC
- Multiple LC | 1. The lossless Prio3-4 traffic should not experience drop.
2. Ensure that background flows experience drop.
3. DUT sends PFCs to traffic generator to rate-limit the lossless prio traffic. | +| 09 | CONGESTION (PC + MACSEC) | **CONGESTION DETECTION ON PORT FAILURE ON PORTCHANNEL WITH MACSEC ENABLED:**
Test case to verify that DUT detects congestion on egress port-channel link on port-failure with MACSEC enabled on port-channel. | - Minimum of Two x 100Gbps ingress and egress port with MACSEC and PFC-WD enabled.
- Min port set to 1 for this test
- Repeat with minimum 2x400Gbps ingress and egress ports as port-channel members.
- 1024B packet-size. | - 90% - Lossless Priority 3 and 4
- 10% - Lossy Background Flow[Prio0, 1, and 2]
- Shutdown one of the egress ports.
- Shutting one of egress port causing congestion on other egress port.
- Bring back the egress port online again
Repeat in loop. | - Single LC - Multiple ASIC
Multiple LC | 1. Line-rate should be 200Gbps without any PFCs or packet drops.
2. When one of egress ports is shut, PFCs should be sent to the ingress to slow down the incoming traffic.
3. The other egress port should receive all the Lossless Priority 3 and 4 traffic.
4. Miniscule drop for both lossless and lossy traffic while traffic is shifting to other egress port.
5. When egress port is back-online, the traffic on egress ports should switch back with no loss or drops. | +| 10 | PFCWD | **PFCWD ENABLED DROP MODE TEST:**
Purpose of the test case is to ensure that lossless priorities are dropped when PFCWD is enabled and egress ports faces PFC storm. DUT does not send PFCs out of ingress port and no drops for lossy traffic. | - Two 100Gbps Ingress and single 100Gbps egress
- Repeat test with 2x400Gbps ingress and 1x400Gbps egress
-Repeat with IPv6.
- 1024B packet-size | - 30% - Lossless Priority 3 and 4 per ingress LC
- 10% Lossy Background Flow [Priority 0, 1, and 2] per ingress LC.
- Enable PFCWD with "action" set to DROP(default)
- Send continuous PFC for lossless Priorities for configured interval. | - Single LC - Multiple ASIC
- Single LC - Single ASIC
- Multiple LC | 1. No lossy background flows dropped on DUT when the PFC is sent to DUT egress. Egress port is in 'stormed' mode.
2. Lossless flows are dropped on DUT.
3. PFCWD drop counter should increment.
4. No PFCs are sent from DUT ingress to traffic transmitter.
5. After the PFC packets are stopped, the egress port is moves to "operational state" and storm-restored counter increments. | +| 11 | PFCWD | **PFCWD ENABLED FORWARD MODE TEST:**
Test case to ensure that both lossless and lossy priority traffic are forwarded when PFCWD is enabled in forward mode and egress ports faces PFC storm. DUT does not send PFCs out of ingress port and no drops for both lossy and lossless traffic. | - Two 100Gbps Ingress and single 100Gbps egress
- Repeat test with 2x400Gbps ingress and 1x400Gbps egress
- 1024B packet-size | - 30% - Lossless Priority 3 and 4 per ingress LC
- 10% Lossy Background Flow [Priority 0, 1, and 2] per ingress LC.
- Enable PFCWD with "action" set to FORWARD
- Send continuous PFC for lossless Priorities for configured interval. | - Single LC - Multiple ASIC
- Single LC - Single ASIC
- Multiple LC | 1. There should be no packet drops on DUT for lossy background flows.
2. The Priority 3 and 4 packets are forwarded to the Traffic Receiver.
3. After the PFC packets are stopped, the lossless PRIO traffic should continue without any drops.
4. No PFCs sent from DUT ingress to traffic transmitter.
5. PFCWD drop counters should not increment. | +| 12 | PFCWD (MACSEC) | **TEST PFCWD BEHAVIOR OVER MACSEC ENABLED LINK:**
Aim of the test case is to ensure that PFCWD drops the lossless priority traffic on detecting PFC storm on MACSEC enabled DUT and mix of lossless and lossy traffic. | - Single 100Gbps Ingress and single 100Gbps egress - PFCWD(Drop) over MACSEC
- Repeat with Single 400Gbps ingress and Single 400Gbps egress.
- 1024B packet-size. | - 90% - Lossless Priority 3 and 4
- 10% - Lossy Background Flow [Priority 0, 1, and 2]
3. Enabled PFCWD in default mode before the test.
4. Send pause to the egress interface of DUT from IXIA Rx. | - Single LC - Multiple ASIC
- Multiple LC | 1. There should be packet drops on the DUT (IXIA Tx packets > IXIA Rx packets).
2. There should PFCs on DUT egress but no PFC sent from DUT ingress to traffic transmitter.
3. Only 10% of lossy traffic should flow.
4. PFCWD drop counter should increment
5. DUT Rx port should be in 'stormed' mode. | +| 13 | ECN | **APPROPRIATE ECN MARKING ON CONGESTION RESTORATION:**
Aim of the test case is to ensure that DUT marks the lossless priority traffic with appropriate ECN flags when congestion is detection and restoration. | - Single 100Gbs ingress and single 100Gbps egress
- Repeat with 400Gbps ingress and egress.
- 1024B packet-size
- ECN congestion and packet-marking test.
| - Disable PFCWD
- 90% - Lossless Priority 3 and 4
- 10% - Lossy Background Flow [Priority 0, 1, and 2]
- Send PFC storm to the egress implying ECN congestion.
- Stop the PFC storm.
- Monitor set of packets to ensure packets are ECN-marked. | - Single LC - Multiple ASIC
- Multiple LC | 1. PFC storm should stop the packets coming DUT ingress.
2. No packets going out of egress port during storm.
3. The captured packet on egress, once storm is stopped should be ECN-marked. | + + + + + + diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index eb62c17a3f1..ef324487df8 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -17,8 +17,9 @@ from tests.common.helpers.assertions import pytest_require, pytest_assert from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError from tests.common.config_reload import config_reload -from tests.common.fixtures.ptfhost_utils import \ - copy_arp_responder_py, run_garp_service, change_mac_addresses, skip_traffic_test # noqa F401 +from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import wait_until from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr # noqa F401 from tests.common.helpers.constants import DEFAULT_NAMESPACE diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py index 17ba6e84bce..ea8e07401f0 100644 --- a/tests/bgp/conftest.py +++ b/tests/bgp/conftest.py @@ -719,3 +719,10 @@ def is_quagga(duthosts, enum_rand_one_per_hwsku_frontend_hostname): @pytest.fixture(scope="module") def is_dualtor(tbinfo): return "dualtor" in tbinfo["topo"]["name"] + + +@pytest.fixture(scope="module") +def traffic_shift_community(duthost): + community = duthost.shell('sonic-cfggen -y /etc/sonic/constants.yml -v constants.bgp.traffic_shift_community')[ + 'stdout'] + return community diff --git a/tests/bgp/test_bgp_session_flap.py b/tests/bgp/test_bgp_session_flap.py index c8507f6e540..f41fafc6894 100644 --- a/tests/bgp/test_bgp_session_flap.py +++ b/tests/bgp/test_bgp_session_flap.py @@ -28,7 +28,7 @@ memSpike = 1.3 pytestmark = [ - pytest.mark.topology('t1') + pytest.mark.topology('t1', 't2') ] @@ -56,24 +56,27 @@ def get_cpu_stats(dut): @pytest.fixture(scope='module') -def setup(tbinfo, nbrhosts, duthosts, rand_one_dut_hostname, enum_rand_one_frontend_asic_index): - duthost = duthosts[rand_one_dut_hostname] +def setup(tbinfo, nbrhosts, duthosts, enum_frontend_dut_hostname, enum_rand_one_frontend_asic_index): + duthost = duthosts[enum_frontend_dut_hostname] asic_index = enum_rand_one_frontend_asic_index namespace = duthost.get_namespace_from_asic_id(asic_index) + bgp_facts = duthost.bgp_facts(instance_id=asic_index)['ansible_facts'] + neigh_keys = [] tor_neighbors = dict() - tor1 = natsorted(nbrhosts.keys())[0] - - skip_hosts = duthost.get_asic_namespace_list() - - bgp_facts = duthost.bgp_facts(instance_id=enum_rand_one_frontend_asic_index)['ansible_facts'] neigh_asn = dict() for k, v in bgp_facts['bgp_neighbors'].items(): - if v['description'].lower() not in skip_hosts: + if 'asic' not in v['description'].lower(): + neigh_keys.append(v['description']) neigh_asn[v['description']] = v['remote AS'] tor_neighbors[v['description']] = nbrhosts[v['description']]["host"] assert v['state'] == 'established' + if not neigh_keys: + pytest.skip("No BGP neighbors found on ASIC {} of DUT {}".format(asic_index, duthost.hostname)) + + tor1 = natsorted(neigh_keys)[0] + # verify sessions are established logger.info(duthost.shell('show ip bgp summary')) logger.info(duthost.shell('show ipv6 bgp summary')) diff --git a/tests/bgp/test_traffic_shift.py b/tests/bgp/test_traffic_shift.py index d6e49e20904..af6307569a4 100644 --- a/tests/bgp/test_traffic_shift.py +++ b/tests/bgp/test_traffic_shift.py @@ -26,13 +26,6 @@ TS_NO_NEIGHBORS = "System Mode: No external neighbors" -@pytest.fixture -def traffic_shift_community(duthost): - community = duthost.shell('sonic-cfggen -y /etc/sonic/constants.yml -v constants.bgp.traffic_shift_community')[ - 'stdout'] - return community - - @pytest.fixture(scope="module") def nbrhosts_to_dut(duthosts, enum_rand_one_per_hwsku_frontend_hostname, nbrhosts): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] diff --git a/tests/cacl/test_cacl_application.py b/tests/cacl/test_cacl_application.py index 78efab36960..04bf19f08f7 100644 --- a/tests/cacl/test_cacl_application.py +++ b/tests/cacl/test_cacl_application.py @@ -404,6 +404,9 @@ def generate_expected_rules(duthost, tbinfo, docker_network, asic_index, expecte if asic_index is None: # Allow Communication among docker containers for k, v in list(docker_network['container'].items()): + # network mode for dhcp_server container is bridge, but this rule is not expected to be seen + if k == "dhcp_server": + continue iptables_rules.append("-A INPUT -s {}/32 -d {}/32 -j ACCEPT" .format(docker_network['bridge']['IPv4Address'], docker_network['bridge']['IPv4Address'])) diff --git a/tests/common/devices/onyx.py b/tests/common/devices/onyx.py index b3f467b72dc..3169213d77a 100644 --- a/tests/common/devices/onyx.py +++ b/tests/common/devices/onyx.py @@ -189,19 +189,6 @@ def set_speed(self, interface_name, speed): speed = 'auto' else: speed = speed[:-3] + 'G' - # The speed support list for onyx is like '1G 10G 25G 40G 50Gx1 50Gx2 100Gx2 100Gx4 200Gx4'. - # We need to set the speed according to the speed support list. - # For example, when dut and fanout all support 50G, - # if support speed list of fanout just includes 50Gx1 not 50G, - # we need to set the speed with 50Gx1 instead of 50G, otherwise, the port can not be up. - all_support_speeds = self.get_supported_speeds(interface_name, raw_data=True) - for support_speed in all_support_speeds: - if speed in support_speed: - logger.info("Speed {} find the matched support speed:{} ".format(speed, support_speed)) - speed = support_speed - break - logger.info("set speed is {}".format(speed)) - if autoneg_mode or speed == 'auto': out = self.host.onyx_config( lines=['shutdown', 'speed {}'.format(speed), 'no shutdown'], diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py index 7cd31b94361..66e3a369a48 100644 --- a/tests/common/dualtor/dual_tor_utils.py +++ b/tests/common/dualtor/dual_tor_utils.py @@ -831,7 +831,7 @@ def mux_cable_server_ip(dut): def check_tunnel_balance(ptfhost, standby_tor_mac, vlan_mac, active_tor_ip, standby_tor_ip, selected_port, target_server_ip, target_server_ipv6, target_server_port, ptf_portchannel_indices, - completeness_level, check_ipv6=False): + completeness_level, check_ipv6=False, skip_traffic_test=False): """ Function for testing traffic distribution among all avtive T1. A test script will be running on ptf to generate traffic to standby interface, and the traffic will be forwarded to @@ -849,7 +849,9 @@ def check_tunnel_balance(ptfhost, standby_tor_mac, vlan_mac, active_tor_ip, Returns: None. """ - + if skip_traffic_test is True: + logging.info("Skip checking tunnel balance due to traffic test was skipped") + return HASH_KEYS = ["src-port", "dst-port", "src-ip"] params = { "server_ip": target_server_ip, @@ -1104,7 +1106,7 @@ def check_nexthops_balance(rand_selected_dut, ptfadapter, dst_server_addr, pc)) -def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num): +def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num, skip_traffic_test=False): for pc, intfs in portchannel_ports.items(): count = 0 # Collect the packets count within a single portchannel @@ -1113,13 +1115,16 @@ def check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_pa count = count + port_packet_count.get(uplink_int, 0) logging.info("Packets received on portchannel {}: {}".format(pc, count)) + if skip_traffic_test is True: + logging.info("Skip checking single uplink balance due to traffic test was skipped") + continue if count > 0 and count != expect_packet_num: pytest.fail("Packets not sent up single standby port {}".format(pc)) # verify nexthops are only sent to single active or standby mux def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, downlink_ints): + tbinfo, downlink_ints, skip_traffic_test=False): HASH_KEYS = ["src-port", "dst-port", "src-ip"] expect_packet_num = 1000 expect_packet_num_high = expect_packet_num * (0.90) @@ -1134,6 +1139,9 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add port_packet_count = dict() packets_to_send = generate_hashed_packet_to_server(ptfadapter, rand_selected_dut, HASH_KEYS, dst_server_addr, expect_packet_num) + if skip_traffic_test is True: + logging.info("Skip checking single downlink balance due to traffic test was skipped") + return for send_packet, exp_pkt, exp_tunnel_pkt in packets_to_send: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), send_packet, count=1) # expect multi-mux nexthops to focus packets to one downlink @@ -1155,10 +1163,11 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add if len(downlink_ints) == 0: # All nexthops are now connected to standby mux, and the packets will be sent towards a single portchanel int # Check if uplink distribution is towards a single portchannel - check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num) + check_nexthops_single_uplink(portchannel_ports, port_packet_count, expect_packet_num, skip_traffic_test) -def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, pkt_num=100, drop=False): +def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, + pkt_num=100, drop=False, skip_traffic_test=False): """ @summary: Helper function for verifying upstream packets @param host: The dut host @@ -1211,6 +1220,9 @@ def verify_upstream_traffic(host, ptfadapter, tbinfo, itfs, server_ip, pkt_num=1 logger.info("Verifying upstream traffic. packet number = {} interface = {} \ server_ip = {} expect_drop = {}".format(pkt_num, itfs, server_ip, drop)) + if skip_traffic_test is True: + logger.info("Skip verifying upstream traffic due to traffic test was skipped") + return for i in range(0, pkt_num): ptfadapter.dataplane.flush() testutils.send(ptfadapter, tx_port, pkt, count=1) diff --git a/tests/common/dualtor/server_traffic_utils.py b/tests/common/dualtor/server_traffic_utils.py index bcd41622089..943dcea4b5d 100644 --- a/tests/common/dualtor/server_traffic_utils.py +++ b/tests/common/dualtor/server_traffic_utils.py @@ -50,7 +50,8 @@ class ServerTrafficMonitor(object): VLAN_INTERFACE_TEMPLATE = "{external_port}.{vlan_id}" def __init__(self, duthost, ptfhost, vmhost, tbinfo, dut_iface, - conn_graph_facts, exp_pkt, existing=True, is_mocked=False): + conn_graph_facts, exp_pkt, existing=True, is_mocked=False, + skip_traffic_test=False): """ @summary: Initialize the monitor. @@ -75,6 +76,7 @@ def __init__(self, duthost, ptfhost, vmhost, tbinfo, dut_iface, self.conn_graph_facts = conn_graph_facts self.captured_packets = [] self.matched_packets = [] + self.skip_traffic_test = skip_traffic_test if is_mocked: mg_facts = self.duthost.get_extended_minigraph_facts(self.tbinfo) ptf_iface = "eth%s" % mg_facts['minigraph_ptf_indices'][self.dut_iface] @@ -120,6 +122,9 @@ def __exit__(self, exc_type, exc_value, traceback): logging.info("the expected packet:\n%s", str(self.exp_pkt)) self.matched_packets = [p for p in self.captured_packets if match_exp_pkt(self.exp_pkt, p)] logging.info("received %d matched packets", len(self.matched_packets)) + if self.skip_traffic_test is True: + logging.info("Skip matched_packets verify due to traffic test was skipped.") + return if self.matched_packets: logging.info( "display the most recent matched captured packet:\n%s", diff --git a/tests/common/dualtor/tunnel_traffic_utils.py b/tests/common/dualtor/tunnel_traffic_utils.py index 790a28f4fee..b66e13ecd44 100644 --- a/tests/common/dualtor/tunnel_traffic_utils.py +++ b/tests/common/dualtor/tunnel_traffic_utils.py @@ -250,7 +250,7 @@ def _disassemble_ip_tos(tos): return " ,".join(check_res) def __init__(self, standby_tor, active_tor=None, existing=True, inner_packet=None, - check_items=("ttl", "tos", "queue"), packet_count=10): + check_items=("ttl", "tos", "queue"), packet_count=10, skip_traffic_test=False): """ Init the tunnel traffic monitor. @@ -262,6 +262,7 @@ def __init__(self, standby_tor, active_tor=None, existing=True, inner_packet=Non self.listen_ports = sorted(self._get_t1_ptf_port_indexes(standby_tor, tbinfo)) self.ptfadapter = ptfadapter self.packet_count = packet_count + self.skip_traffic_test = skip_traffic_test standby_tor_cfg_facts = self.standby_tor.config_facts( host=self.standby_tor.hostname, source="running" @@ -294,6 +295,9 @@ def __enter__(self): def __exit__(self, *exc_info): if exc_info[0]: return + if self.skip_traffic_test is True: + logging.info("Skip tunnel traffic verify due to traffic test was skipped.") + return try: port_index, rec_pkt = testutils.verify_packet_any_port( ptfadapter, diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 16d0bd5301f..05407667f87 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -206,7 +206,7 @@ copp/test_copp.py: skip: reason: "Topology not supported by COPP tests" conditions: - - "(topo_name not in ['ptf32', 'ptf64', 't0', 't0-64', 't0-52', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 'm0', 'mx'] and 't2' not in topo_type)" + - "(topo_name not in ['ptf32', 'ptf64', 't0', 't0-64', 't0-52', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 'm0', 'm0-2vlan', 'mx'] and 't2' not in topo_type)" copp/test_copp.py::TestCOPP::test_add_new_trap: skip: @@ -921,13 +921,6 @@ ntp/test_ntp.py::test_ntp: conditions: - https://github.com/sonic-net/sonic-buildimage/issues/19425 -ntp/test_ntp.py::test_ntp_long_jump_disabled: - # Due to NTP code bug, long jump will still happen after disable it. - # Set xfail flag for this test case - xfail: - strict: True - reason: "Known NTP bug" - ####################################### ##### pc ##### ####################################### @@ -1147,6 +1140,12 @@ platform_tests/test_secure_upgrade.py: conditions: - "'sn2' in platform or 'sn3' in platform or 'sn4' in platform" +platform_tests/test_service_warm_restart.py: + skip: + reason: "Testcase ignored due to sonic-mgmt issue: https://github.com/sonic-net/sonic-mgmt/issues/10362" + conditions: + - "https://github.com/sonic-net/sonic-mgmt/issues/10362" + ####################################### ##### qos ##### ####################################### @@ -1568,12 +1567,6 @@ syslog/test_syslog_source_ip.py: ####################################### ##### system_health ##### ####################################### -system_health/test_system_health.py::test_device_checker: - skip: - reason: "Temporary skip for Mellanox platforms" - conditions: - - "asic_type in ['mellanox']" - system_health/test_system_health.py::test_service_checker_with_process_exit: xfail: strict: True @@ -1634,15 +1627,15 @@ vlan/test_vlan.py::test_vlan_tc7_tagged_qinq_switch_on_outer_tag: skip: reason: "Unsupported platform." conditions: - - "asic_type not in ['mellanox', 'barefoot']" + - "asic_type not in ['mellanox', 'barefoot', 'cisco-8000']" vlan/test_vlan_ping.py: skip: - reason: "test_vlan_ping doesn't work on Broadcom platform. Ignored on dualtor topo and mellanox and Cisco-8000 setups due to Github issue: https://github.com/sonic-net/sonic-mgmt/issues/9642." + reason: "test_vlan_ping doesn't work on Broadcom platform. Ignored on dualtor topo and mellanox setups due to Github issue: https://github.com/sonic-net/sonic-mgmt/issues/9642." conditions_logical_operator: OR conditions: - "asic_type in ['broadcom']" - - "https://github.com/sonic-net/sonic-mgmt/issues/9642 and 'dualtor' in topo_name and asic_type in ['mellanox', 'cisco-8000']" + - "https://github.com/sonic-net/sonic-mgmt/issues/9642 and 'dualtor' in topo_name and asic_type in ['mellanox']" ####################################### ##### voq ##### diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_acl.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_acl.yaml index bfd3bf5b193..69853395a25 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_acl.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_acl.yaml @@ -250,12 +250,22 @@ acl/test_acl.py::TestAclWithPortToggle::test_udp_source_ip_match_dropped[ipv6-eg - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 +acl/test_acl.py::TestAclWithReboot: + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" + acl/test_acl.py::TestAclWithReboot::test_dest_ip_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: reason: "Egress issue in Nokia" conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_dest_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -263,6 +273,10 @@ acl/test_acl.py::TestAclWithReboot::test_dest_ip_match_dropped[ipv6-egress-uplin conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_dest_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -270,6 +284,10 @@ acl/test_acl.py::TestAclWithReboot::test_dest_ip_match_dropped[ipv6-egress-uplin conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_icmp_match_forwarded[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -277,6 +295,10 @@ acl/test_acl.py::TestAclWithReboot::test_icmp_match_forwarded[ipv6-egress-uplink conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_icmp_match_forwarded[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -284,6 +306,10 @@ acl/test_acl.py::TestAclWithReboot::test_icmp_match_forwarded[ipv6-egress-uplink conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_icmp_match_forwarded[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -291,6 +317,10 @@ acl/test_acl.py::TestAclWithReboot::test_icmp_match_forwarded[ipv6-egress-uplink conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_icmp_source_ip_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -298,6 +328,10 @@ acl/test_acl.py::TestAclWithReboot::test_icmp_source_ip_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_icmp_source_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -305,6 +339,10 @@ acl/test_acl.py::TestAclWithReboot::test_icmp_source_ip_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_icmp_source_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -312,6 +350,10 @@ acl/test_acl.py::TestAclWithReboot::test_icmp_source_ip_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_ip_proto_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -319,6 +361,10 @@ acl/test_acl.py::TestAclWithReboot::test_ip_proto_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_ip_proto_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -326,6 +372,10 @@ acl/test_acl.py::TestAclWithReboot::test_ip_proto_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_ip_proto_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -333,6 +383,10 @@ acl/test_acl.py::TestAclWithReboot::test_ip_proto_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_dport_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -340,6 +394,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_dport_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_dport_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -347,6 +405,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_dport_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_dport_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -354,6 +416,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_dport_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_dport_range_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -361,6 +427,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_dport_range_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_dport_range_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -368,7 +438,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_dport_range_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 - + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_dport_range_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -376,6 +449,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_dport_range_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_sport_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -383,6 +460,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_sport_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_sport_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -390,6 +471,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_sport_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_sport_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -397,6 +482,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_sport_match_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_sport_range_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -404,6 +493,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_sport_range_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_sport_range_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -411,6 +504,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_sport_range_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_l4_sport_range_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -418,6 +515,10 @@ acl/test_acl.py::TestAclWithReboot::test_l4_sport_range_match_dropped[ipv6-egres conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_rules_priority_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -425,6 +526,10 @@ acl/test_acl.py::TestAclWithReboot::test_rules_priority_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_rules_priority_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -432,6 +537,10 @@ acl/test_acl.py::TestAclWithReboot::test_rules_priority_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_rules_priority_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -439,6 +548,10 @@ acl/test_acl.py::TestAclWithReboot::test_rules_priority_dropped[ipv6-egress-upli conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_source_ip_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -446,6 +559,10 @@ acl/test_acl.py::TestAclWithReboot::test_source_ip_match_dropped[ipv6-egress-upl conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_source_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -453,12 +570,21 @@ acl/test_acl.py::TestAclWithReboot::test_source_ip_match_dropped[ipv6-egress-upl conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" + acl/test_acl.py::TestAclWithReboot::test_source_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: reason: "Egress issue in Nokia" conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_tcp_flags_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -466,6 +592,10 @@ acl/test_acl.py::TestAclWithReboot::test_tcp_flags_match_dropped[ipv6-egress-upl conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_tcp_flags_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -473,6 +603,10 @@ acl/test_acl.py::TestAclWithReboot::test_tcp_flags_match_dropped[ipv6-egress-upl conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_tcp_flags_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -480,6 +614,10 @@ acl/test_acl.py::TestAclWithReboot::test_tcp_flags_match_dropped[ipv6-egress-upl conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_udp_source_ip_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: @@ -487,6 +625,10 @@ acl/test_acl.py::TestAclWithReboot::test_udp_source_ip_match_dropped[ipv6-egress conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_udp_source_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan1000]: xfail: @@ -494,7 +636,10 @@ acl/test_acl.py::TestAclWithReboot::test_udp_source_ip_match_dropped[ipv6-egress conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 - + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestAclWithReboot::test_udp_source_ip_match_dropped[ipv6-egress-uplink->downlink-m0_vlan_scenario-Vlan2000]: xfail: @@ -502,6 +647,10 @@ acl/test_acl.py::TestAclWithReboot::test_udp_source_ip_match_dropped[ipv6-egress conditions: - "platform in ['armhf-nokia_ixs7215_52x-r0']" - https://github.com/sonic-net/sonic-mgmt/issues/8639 + skip: + reason: "Skip in t1-lag KVM test due to test time too long and t0 would cover this testbeds" + conditions: + - "topo_type in ['t1'] and asic_type in ['vs']" acl/test_acl.py::TestBasicAcl::test_dest_ip_match_dropped[ipv6-egress-uplink->downlink-default-Vlan1000]: xfail: diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml index b6a073ecf80..3059f273c56 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml @@ -924,15 +924,6 @@ platform_tests/test_sequential_restart.py::test_restart_syncd: skip: reason: "Restarting syncd is not supported yet" -####################################### -##### test_service_warm_restart.py #### -####################################### -platform_tests/test_service_warm_restart.py: - skip: - reason: "Skip test_service_warm_restart on mellanox platform" - conditions: - - "asic_type in ['mellanox']" - ####################################### ##### test_xcvr_info_in_db.py ##### ####################################### diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml index 43bd4bdf0f7..640d0896a9d 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml @@ -61,6 +61,57 @@ decap/test_decap.py: conditions: - "asic_type in ['vs']" +####################################### +##### dualtor ##### +####################################### +dualtor/test_ipinip.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +dualtor/test_orchagent_active_tor_downstream.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +dualtor/test_orchagent_mac_move.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +dualtor/test_orchagent_slb.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +dualtor/test_orchagent_standby_tor_downstream.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +dualtor/test_standby_tor_upstream_mux_toggle.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +dualtor/test_tor_ecn.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +dualtor/test_tunnel_memory_leak.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + ####################################### ##### ecmp ##### ####################################### diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py index c6469273c55..a1bd1190a1e 100644 --- a/tests/common/plugins/sanity_check/checks.py +++ b/tests/common/plugins/sanity_check/checks.py @@ -257,17 +257,20 @@ def _is_db_omem_over_threshold(command_output): total_omem = 0 re_omem = re.compile(r"omem=(\d+)") result = False + non_zero_output = [] for line in command_output: m = re_omem.search(line) if m: omem = int(m.group(1)) total_omem += omem + if omem > 0: + non_zero_output.append(line) logger.debug('total_omen={}, OMEM_THRESHOLD_BYTES={}'.format(total_omem, OMEM_THRESHOLD_BYTES)) if total_omem > OMEM_THRESHOLD_BYTES: result = True - return result, total_omem + return result, total_omem, non_zero_output @pytest.fixture(scope="module") @@ -288,11 +291,13 @@ def _check_dbmemory_on_dut(*args, **kwargs): # check the db memory on the redis instance running on each instance for asic in dut.asics: res = asic.run_redis_cli_cmd(redis_cmd)['stdout_lines'] - result, total_omem = _is_db_omem_over_threshold(res) + result, total_omem, non_zero_output = _is_db_omem_over_threshold(res) check_result["total_omem"] = total_omem if result: check_result["failed"] = True logging.info("{} db memory over the threshold ".format(str(asic.namespace or ''))) + logging.info("{} db memory omem non-zero output: \n{}" + .format(str(asic.namespace or ''), "\n".join(non_zero_output))) break logger.info("Done checking database memory on %s" % dut.hostname) results[dut.hostname] = check_result diff --git a/tests/common/snappi_tests/snappi_fixtures.py b/tests/common/snappi_tests/snappi_fixtures.py index 7a0e3b92717..c41be35a52a 100644 --- a/tests/common/snappi_tests/snappi_fixtures.py +++ b/tests/common/snappi_tests/snappi_fixtures.py @@ -580,7 +580,26 @@ def snappi_dut_base_config(duthost_list, pfc = l1_config.flow_control.ieee_802_1qbb pfc.pfc_delay = 0 - [setattr(pfc, 'pfc_class_{}'.format(i), i) for i in range(8)] + if pfcQueueGroupSize == 8: + pfc.pfc_class_0 = 0 + pfc.pfc_class_1 = 1 + pfc.pfc_class_2 = 2 + pfc.pfc_class_3 = 3 + pfc.pfc_class_4 = 4 + pfc.pfc_class_5 = 5 + pfc.pfc_class_6 = 6 + pfc.pfc_class_7 = 7 + elif pfcQueueGroupSize == 4: + pfc.pfc_class_0 = pfcQueueValueDict[0] + pfc.pfc_class_1 = pfcQueueValueDict[1] + pfc.pfc_class_2 = pfcQueueValueDict[2] + pfc.pfc_class_3 = pfcQueueValueDict[3] + pfc.pfc_class_4 = pfcQueueValueDict[4] + pfc.pfc_class_5 = pfcQueueValueDict[5] + pfc.pfc_class_6 = pfcQueueValueDict[6] + pfc.pfc_class_7 = pfcQueueValueDict[7] + else: + pytest_assert(False, 'pfcQueueGroupSize value is not 4 or 8') port_config_list = [] @@ -630,6 +649,7 @@ def _get_multidut_snappi_ports(line_card_choice, line_card_info): if port["peer_port"] in asic_port_map[asic] and hostname in port['peer_device']: port['asic_value'] = asic port['asic_type'] = host.facts["asic_type"] + port['duthost'] = host ports.append(port) return ports return _get_multidut_snappi_ports diff --git a/tests/configlet/util/base_test.py b/tests/configlet/util/base_test.py index 916f221839c..60bc3024307 100644 --- a/tests/configlet/util/base_test.py +++ b/tests/configlet/util/base_test.py @@ -85,7 +85,8 @@ def init(duthost): for i in [data_dir, orig_db_dir, no_t0_db_dir, clet_db_dir, patch_add_t0_dir, patch_rm_t0_dir, files_dir]: - os.mkdir(i) + if not os.path.exists(i): + os.mkdir(i) init_data["files_dir"] = files_dir init_data["data_dir"] = data_dir diff --git a/tests/decap/test_decap.py b/tests/decap/test_decap.py index 7205366a92b..7d559982f94 100644 --- a/tests/decap/test_decap.py +++ b/tests/decap/test_decap.py @@ -46,6 +46,20 @@ ] +@pytest.fixture(autouse=True) +def ignore_expected_loganalyzer_exceptions(duthosts, rand_one_dut_hostname, loganalyzer): + # Ignore in KVM test + KVMIgnoreRegex = [ + ".*unknown decap tunnel table attribute 'dst_ip'.*", + ".*Tunnel TEST_IPINIP_V4_TUNNEL cannot be removed since it doesn't exist.*", + ".*Tunnel TEST_IPINIP_V6_TUNNEL cannot be removed since it doesn't exist.*", + ] + duthost = duthosts[rand_one_dut_hostname] + if loganalyzer: # Skip if loganalyzer is disabled + if duthost.facts["asic_type"] == "vs": + loganalyzer[duthost.hostname].ignore_regex.extend(KVMIgnoreRegex) + + def remove_default_decap_cfg(duthosts): for duthost in duthosts: logger.info('Remove default decap cfg on {}'.format(duthost.hostname)) diff --git a/tests/drop_packets/drop_packets.py b/tests/drop_packets/drop_packets.py index 7bd7511df09..c17f83c2da8 100644 --- a/tests/drop_packets/drop_packets.py +++ b/tests/drop_packets/drop_packets.py @@ -27,6 +27,8 @@ LAB_CONNECTION_GRAPH_PATH = os.path.normpath((os.path.join(os.path.dirname(__file__), "../../ansible/files"))) ACL_COUNTERS_UPDATE_INTERVAL = 10 +ACL_TABLE_CREATE_INTERVAL = 30 +PORT_STATE_UPDATE_INTERNAL = 30 LOG_EXPECT_ACL_TABLE_CREATE_RE = ".*Created ACL table.*" LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*" LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*" @@ -313,7 +315,6 @@ def rif_port_down(duthosts, enum_rand_one_per_hwsku_frontend_hostname, setup, fa """Shut RIF interface and return neighbor IP address attached to this interface.""" duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="drop_packet_rif_port_down") - wait_after_ports_up = 30 if not setup["rif_members"]: pytest.skip("RIF interface is absent") @@ -334,6 +335,9 @@ def rif_port_down(duthosts, enum_rand_one_per_hwsku_frontend_hostname, setup, fa loganalyzer.expect_regex = [LOG_EXPECT_PORT_OPER_DOWN_RE.format(rif_member_iface)] with loganalyzer as _: fanout_neighbor.shutdown(fanout_intf) + # Add a delay to ensure loganalyzer can find a match in the log. Without this delay, there's a + # chance it might miss the matching log. + time.sleep(PORT_STATE_UPDATE_INTERNAL) time.sleep(1) @@ -342,7 +346,9 @@ def rif_port_down(duthosts, enum_rand_one_per_hwsku_frontend_hostname, setup, fa loganalyzer.expect_regex = [LOG_EXPECT_PORT_OPER_UP_RE.format(rif_member_iface)] with loganalyzer as _: fanout_neighbor.no_shutdown(fanout_intf) - time.sleep(wait_after_ports_up) + # Add a delay to ensure loganalyzer can find a match in the log. Without this delay, there's a + # chance it might miss the matching log. + time.sleep(PORT_STATE_UPDATE_INTERNAL) @pytest.fixture(params=["port_channel_members", "vlan_members", "rif_members"]) @@ -418,6 +424,8 @@ def acl_teardown(duthosts, dut_tmp_dir, dut_clear_conf_file_path): duthost.command("config acl update full {}".format(dut_clear_conf_file_path)) logger.info("Removing {}".format(dut_tmp_dir)) duthost.command("rm -rf {}".format(dut_tmp_dir)) + # Add a delay to ensure loganalyzer can find a match in the log. Without this delay, there's a + # chance it might miss the matching log. time.sleep(ACL_COUNTERS_UPDATE_INTERVAL) @@ -478,6 +486,10 @@ def create_or_remove_acl_egress_table(duthost, op): ','.join(table_port_list) ) ) + + # Add a delay to ensure loganalyzer can find a match in the log. Without this delay, there's a + # chance it might miss the matching log. + time.sleep(ACL_TABLE_CREATE_INTERVAL) elif op == "remove": logger.info("Removing ACL table \"{}\" on device {}".format(acl_table_config["table_name"], duthost)) sonic_host_or_asic_inst.command("config acl remove table {}".format(acl_table_config["table_name"])) diff --git a/tests/dualtor/conftest.py b/tests/dualtor/conftest.py index 7f13a7517ef..eb933e61f20 100644 --- a/tests/dualtor/conftest.py +++ b/tests/dualtor/conftest.py @@ -65,16 +65,17 @@ def pytest_addoption(parser): @pytest.fixture(scope="module", autouse=True) def common_setup_teardown(rand_selected_dut, request, tbinfo, vmhost): # Skip dualtor test cases on unsupported platform - supported_platforms = ['broadcom_td3_hwskus', 'broadcom_th2_hwskus', 'cisco_hwskus', 'mellanox_dualtor_hwskus'] - hostvars = get_host_visible_vars(rand_selected_dut.host.options['inventory'], rand_selected_dut.hostname) - hwsku = rand_selected_dut.facts['hwsku'] - skip = True - for platform in supported_platforms: - supported_skus = hostvars.get(platform, []) - if hwsku in supported_skus: - skip = False - break - py_require(not skip, "Skip on unsupported platform") + if rand_selected_dut.facts['asic_type'] != 'vs': + supported_platforms = ['broadcom_td3_hwskus', 'broadcom_th2_hwskus', 'cisco_hwskus', 'mellanox_dualtor_hwskus'] + hostvars = get_host_visible_vars(rand_selected_dut.host.options['inventory'], rand_selected_dut.hostname) + hwsku = rand_selected_dut.facts['hwsku'] + skip = True + for platform in supported_platforms: + supported_skus = hostvars.get(platform, []) + if hwsku in supported_skus: + skip = False + break + py_require(not skip, "Skip on unsupported platform") if 'dualtor' in tbinfo['topo']['name']: request.getfixturevalue('run_garp_service') diff --git a/tests/dualtor/test_ipinip.py b/tests/dualtor/test_ipinip.py index 15ed332782a..635c534d2a9 100644 --- a/tests/dualtor/test_ipinip.py +++ b/tests/dualtor/test_ipinip.py @@ -28,6 +28,8 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output from tests.common.dualtor.dual_tor_utils import config_active_active_dualtor_active_standby # noqa F401 from tests.common.dualtor.dual_tor_utils import validate_active_active_dualtor_setup # noqa F401 @@ -104,7 +106,7 @@ def build_expected_packet_to_server(encapsulated_packet, decrease_ttl=False): def test_decap_active_tor( build_encapsulated_packet, request, ptfhost, rand_selected_interface, ptfadapter, # noqa F401 - tbinfo, rand_selected_dut, tunnel_traffic_monitor): # noqa F401 + tbinfo, rand_selected_dut, tunnel_traffic_monitor, skip_traffic_test): # noqa F811 @contextlib.contextmanager def stop_garp(ptfhost): @@ -128,6 +130,9 @@ def stop_garp(ptfhost): ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf) + if skip_traffic_test is True: + logging.info("Skip following traffic test") + return with stop_garp(ptfhost): ptfadapter.dataplane.flush() testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet) @@ -137,7 +142,7 @@ def stop_garp(ptfhost): def test_decap_standby_tor( build_encapsulated_packet, request, rand_selected_interface, ptfadapter, # noqa F401 - tbinfo, rand_selected_dut, tunnel_traffic_monitor # noqa F401 + tbinfo, rand_selected_dut, tunnel_traffic_monitor, skip_traffic_test # noqa F401 ): def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt): @@ -166,6 +171,9 @@ def verify_downstream_packet_to_server(ptfadapter, port, exp_pkt): ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf) + if skip_traffic_test is True: + logging.info("Skip following traffic test") + return with tunnel_traffic_monitor(tor, existing=False): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet, count=10) time.sleep(2) @@ -295,7 +303,7 @@ def setup_active_active_ports(active_active_ports, rand_selected_dut, rand_unsel def test_encap_with_mirror_session(rand_selected_dut, rand_selected_interface, # noqa F811 ptfadapter, tbinfo, setup_mirror_session, toggle_all_simulator_ports_to_rand_unselected_tor, # noqa F811 - tunnel_traffic_monitor, # noqa F811 + tunnel_traffic_monitor, skip_traffic_test, # noqa F811 setup_standby_ports_on_rand_selected_tor): # noqa F811 """ A test case to verify the bounced back packet from Standby ToR to T1 doesn't have an unexpected vlan id (4095) @@ -314,5 +322,8 @@ def test_encap_with_mirror_session(rand_selected_dut, rand_selected_interface, logging.info("Sending packet from ptf t1 interface {}".format(src_port_id)) inner_packet = pkt_to_server[scapy.all.IP].copy() inner_packet[IP].ttl -= 1 + if skip_traffic_test is True: + logging.info("Skip following traffic test") + return with tunnel_traffic_monitor(rand_selected_dut, inner_packet=inner_packet, check_items=()): testutils.send(ptfadapter, src_port_id, pkt_to_server) diff --git a/tests/dualtor/test_orchagent_active_tor_downstream.py b/tests/dualtor/test_orchagent_active_tor_downstream.py index 88fb4f073f9..652e5b135ff 100644 --- a/tests/dualtor/test_orchagent_active_tor_downstream.py +++ b/tests/dualtor/test_orchagent_active_tor_downstream.py @@ -21,6 +21,9 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 +# from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until @@ -67,7 +70,7 @@ def neighbor_reachable(duthost, neighbor_ip): def test_active_tor_remove_neighbor_downstream_active( conn_graph_facts, ptfadapter, ptfhost, testbed_setup, rand_selected_dut, tbinfo, set_crm_polling_interval, - tunnel_traffic_monitor, vmhost # noqa F811 + tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 ): """ @Verify those two scenarios: @@ -101,18 +104,18 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send traffic to server %s from ptf t1 interface %s", server_ip, ptf_t1_intf) server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after removing neighbor entry", server_ip) server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) remove_neighbor_ct = remove_neighbor(ptfhost, tor, server_ip, ip_version, removed_neighbor) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ @@ -124,8 +127,8 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): logging.info("send traffic to server %s after neighbor entry is restored", server_ip) server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) with crm_neighbor_checker(tor, ip_version, expect_change=ip_version == "ipv6"), \ tunnel_monitor, server_traffic_monitor: @@ -145,7 +148,7 @@ def remove_neighbor(ptfhost, duthost, server_ip, ip_version, neighbor_details): def test_downstream_ecmp_nexthops( ptfadapter, rand_selected_dut, tbinfo, - toggle_all_simulator_ports, tor_mux_intfs, ip_version # noqa F811 + toggle_all_simulator_ports, tor_mux_intfs, ip_version, skip_traffic_test # noqa F811 ): nexthops_count = 4 set_mux_state(rand_selected_dut, tbinfo, 'active', tor_mux_intfs, toggle_all_simulator_ports) # noqa F405 @@ -171,7 +174,7 @@ def test_downstream_ecmp_nexthops( try: logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces) + tbinfo, nexthop_interfaces, skip_traffic_test) nexthop_interfaces_copy = nexthop_interfaces.copy() @@ -182,7 +185,7 @@ def test_downstream_ecmp_nexthops( nexthop_interfaces_copy.remove(interface) logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces_copy) + tbinfo, nexthop_interfaces_copy, skip_traffic_test) # Revert two mux states to active for index, interface in reversed(list(enumerate(nexthop_interfaces))): @@ -191,7 +194,7 @@ def test_downstream_ecmp_nexthops( nexthop_interfaces_copy.append(interface) logging.info("Verify traffic to this route destination is sent to single downlink or uplink") check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_addr, - tbinfo, nexthop_interfaces_copy) + tbinfo, nexthop_interfaces_copy, skip_traffic_test) finally: # Remove the nexthop route remove_static_routes(rand_selected_dut, dst_server_addr) diff --git a/tests/dualtor/test_orchagent_mac_move.py b/tests/dualtor/test_orchagent_mac_move.py index a87b60e9c4e..62226cfee4d 100644 --- a/tests/dualtor/test_orchagent_mac_move.py +++ b/tests/dualtor/test_orchagent_mac_move.py @@ -13,6 +13,9 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 +# from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output @@ -84,7 +87,7 @@ def test_mac_move( announce_new_neighbor, apply_active_state_to_orchagent, conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, set_crm_polling_interval, - tbinfo, tunnel_traffic_monitor, vmhost # noqa F811 + tbinfo, tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 ): tor = rand_selected_dut ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) @@ -95,10 +98,10 @@ def test_mac_move( announce_new_neighbor.send(None) logging.info("let new neighbor learnt on active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -108,10 +111,10 @@ def test_mac_move( announce_new_neighbor.send(lambda iface: set_dual_tor_state_to_orchagent(tor, "standby", [iface])) # noqa F405 logging.info("mac move to a standby port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=True, skip_traffic_test=skip_traffic_test) server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -119,8 +122,8 @@ def test_mac_move( # standby forwarding check after fdb ageout/flush tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -130,10 +133,10 @@ def test_mac_move( announce_new_neighbor.send(None) logging.info("mac move to another active port %s", test_port) pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, NEW_NEIGHBOR_IPV4_ADDR) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=False) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=False, skip_traffic_test=skip_traffic_test) server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=True, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=True, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) @@ -143,8 +146,8 @@ def test_mac_move( if not (tor.facts['asic_type'] == 'mellanox' or tor.facts['asic_type'] == 'cisco-8000'): tor.shell("fdbclear") server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_port, - conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) # noqa F405 + tor, ptfhost, vmhost, tbinfo, test_port, conn_graph_facts, exp_pkt, + existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test # noqa F405 ) with crm_neighbor_checker(tor), tunnel_monitor, server_traffic_monitor: testutils.send(ptfadapter, ptf_t1_intf_index, pkt, count=10) diff --git a/tests/dualtor/test_orchagent_slb.py b/tests/dualtor/test_orchagent_slb.py index bb55a415d2c..a6a3f30570f 100644 --- a/tests/dualtor/test_orchagent_slb.py +++ b/tests/dualtor/test_orchagent_slb.py @@ -2,6 +2,7 @@ import pytest import random import time +import logging import scapy.all as scapyall from ptf import testutils @@ -19,6 +20,8 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.helpers import bgp from tests.common.utilities import is_ipv4_address @@ -215,7 +218,7 @@ def test_orchagent_slb( force_active_tor, upper_tor_host, lower_tor_host, # noqa F811 ptfadapter, ptfhost, setup_interfaces, toggle_all_simulator_ports_to_upper_tor, tbinfo, # noqa F811 - tunnel_traffic_monitor, vmhost # noqa F811 + tunnel_traffic_monitor, vmhost, skip_traffic_test # noqa F811 ): def verify_bgp_session(duthost, bgp_neighbor): @@ -233,7 +236,11 @@ def verify_route(duthost, route, existing=True): else: assert len(existing_route["nexthops"]) == 0 - def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True): + def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_existed=True, + skip_traffic_test=skip_traffic_test): + if skip_traffic_test is True: + logging.info("Skip traffic test.") + return prefix = ipaddress.ip_network(route["prefix"]) dst_host = str(next(prefix.hosts())) pkt, exp_pkt = build_packet_to_server(duthost, ptfadapter, dst_host) @@ -288,11 +295,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 3: verify the route by sending some downstream traffic verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=True, is_route_existed=True + is_duthost_active=True, is_route_existed=True, skip_traffic_test=skip_traffic_test ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=False, is_route_existed=True + is_duthost_active=False, is_route_existed=True, skip_traffic_test=skip_traffic_test ) # STEP 4: withdraw the announced route to both ToRs @@ -307,11 +314,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 5: verify the route is removed by verifying that downstream traffic is dropped verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=True, is_route_existed=False + is_duthost_active=True, is_route_existed=False, skip_traffic_test=skip_traffic_test ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=False, is_route_existed=False + is_duthost_active=False, is_route_existed=False, skip_traffic_test=skip_traffic_test ) # STEP 6: toggle mux state change @@ -334,11 +341,11 @@ def verify_traffic(duthost, connection, route, is_duthost_active=True, is_route_ # STEP 8: verify the route by sending some downstream traffic verify_traffic( upper_tor_host, connections["upper_tor"], constants.route, - is_duthost_active=False, is_route_existed=True + is_duthost_active=False, is_route_existed=True, skip_traffic_test=skip_traffic_test ) verify_traffic( lower_tor_host, connections["lower_tor"], constants.route, - is_duthost_active=True, is_route_existed=True + is_duthost_active=True, is_route_existed=True, skip_traffic_test=skip_traffic_test ) # STEP 9: verify teardown diff --git a/tests/dualtor/test_orchagent_standby_tor_downstream.py b/tests/dualtor/test_orchagent_standby_tor_downstream.py index 76858681313..abf2304d7f1 100644 --- a/tests/dualtor/test_orchagent_standby_tor_downstream.py +++ b/tests/dualtor/test_orchagent_standby_tor_downstream.py @@ -19,6 +19,9 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa: F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa: F401 from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa: F401 +# from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.helpers.assertions import pytest_assert as pt_assert from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor # noqa: F401 from tests.common.dualtor.server_traffic_utils import ServerTrafficMonitor @@ -61,12 +64,13 @@ def get_function_completeness_level(pytestconfig): @pytest.fixture def get_testbed_params(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - ip_version, setup_testbed_ipv6, get_function_completeness_level): + ip_version, setup_testbed_ipv6, get_function_completeness_level, skip_traffic_test): # noqa F811 """Return a function to get testbed params.""" def _get_testbed_params(): params = dualtor_info(ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, get_function_completeness_level) params["check_ipv6"] = (ip_version == "ipv6") + params["skip_traffic_test"] = skip_traffic_test return params return _get_testbed_params @@ -144,7 +148,8 @@ def test_standby_tor_downstream(rand_selected_dut, get_testbed_params): def test_standby_tor_downstream_t1_link_recovered( - rand_selected_dut, verify_crm_nexthop_counter_not_increased, tbinfo, get_testbed_params + rand_selected_dut, verify_crm_nexthop_counter_not_increased, + tbinfo, get_testbed_params ): """ Verify traffic is distributed evenly after t1 link is recovered; @@ -205,7 +210,7 @@ def route_matches_expected_state(duthost, route_ip, expect_route): @pytest.fixture def remove_peer_loopback_route(rand_selected_dut, rand_unselected_dut, - shutdown_bgp_sessions, get_testbed_params): # noqa: F811 + shutdown_bgp_sessions, get_testbed_params): # noqa F811 """ Remove routes to peer ToR loopback IP by shutting down BGP sessions on the peer """ @@ -271,9 +276,9 @@ def test_standby_tor_downstream_loopback_route_readded( def test_standby_tor_remove_neighbor_downstream_standby( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - set_crm_polling_interval, tunnel_traffic_monitor, # noqa: F811 + set_crm_polling_interval, tunnel_traffic_monitor, # noqa: F811 vmhost, get_testbed_params, - ip_version + ip_version, skip_traffic_test # noqa: F811 ): """ @summary: Verify that after removing neighbor entry for a server over standby @@ -304,15 +309,15 @@ def stop_neighbor_advertiser(ptfhost, ip_version): pkt, exp_pkt = build_packet_to_server(tor, ptfadapter, target_server) ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send traffic to server %s from ptf t1 interface %s", target_server, ptf_t1_intf) - tunnel_monitor = tunnel_traffic_monitor(tor, existing=True) + tunnel_monitor = tunnel_traffic_monitor(tor, existing=True, skip_traffic_test=skip_traffic_test) with tunnel_monitor: testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), pkt, count=10) logging.info("send traffic to server %s after removing neighbor entry", target_server) tunnel_monitor.existing = False server_traffic_monitor = ServerTrafficMonitor( - tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], - conn_graph_facts, exp_pkt, existing=False, is_mocked=is_mocked_dualtor(tbinfo) + tor, ptfhost, vmhost, tbinfo, test_params["selected_port"], conn_graph_facts, exp_pkt, + existing=False, is_mocked=is_mocked_dualtor(tbinfo), skip_traffic_test=skip_traffic_test ) # for real dualtor testbed, leave the neighbor restoration to garp service flush_neighbor_ct = flush_neighbor(tor, target_server, restore=is_t0_mocked_dualtor) @@ -329,9 +334,9 @@ def stop_neighbor_advertiser(ptfhost, ip_version): def test_downstream_standby_mux_toggle_active( conn_graph_facts, ptfadapter, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, - tunnel_traffic_monitor, vmhost, # noqa: F811 - toggle_all_simulator_ports, tor_mux_intfs, # noqa: F811 - ip_version, get_testbed_params + tunnel_traffic_monitor, vmhost, # noqa: F811 + toggle_all_simulator_ports, tor_mux_intfs, # noqa: F811 + ip_version, get_testbed_params, skip_traffic_test # noqa: F811 ): # set rand_selected_dut as standby and rand_unselected_dut to active tor test_params = get_testbed_params() @@ -345,7 +350,10 @@ def test_downstream_standby_mux_toggle_active( pkt, exp_pkt = build_packet_to_server(rand_selected_dut, ptfadapter, random_dst_ip) ptf_t1_intf = random.choice(get_t1_ptf_ports(rand_selected_dut, tbinfo)) - def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, expect_server_traffic=True): + def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, + expect_server_traffic=True, skip_traffic_test=False): + if skip_traffic_test is True: + return tunnel_monitor = tunnel_traffic_monitor(rand_selected_dut, existing=True) server_traffic_monitor = ServerTrafficMonitor( torhost, ptfhost, vmhost, tbinfo, test_params["selected_port"], @@ -363,14 +371,16 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, expec time.sleep(30) logger.info("Step 1.2: Verify traffic to this route dst is forwarded to Active ToR and equally distributed") check_tunnel_balance(**test_params) - monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, expect_tunnel_traffic=True) + monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, + expect_tunnel_traffic=True, skip_traffic_test=skip_traffic_test) logger.info("Stage 2: Verify Active Forwarding") logger.info("Step 2.1: Simulate Mux state change to active") set_mux_state(rand_selected_dut, tbinfo, 'active', tor_mux_intfs, toggle_all_simulator_ports) time.sleep(30) logger.info("Step 2.2: Verify traffic to this route dst is forwarded directly to server") - monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=True, expect_tunnel_traffic=False) + monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=True, + expect_tunnel_traffic=False, skip_traffic_test=skip_traffic_test) logger.info("Stage 3: Verify Standby Forwarding Again") logger.info("Step 3.1: Simulate Mux state change to standby") @@ -378,7 +388,8 @@ def monitor_tunnel_and_server_traffic(torhost, expect_tunnel_traffic=True, expec time.sleep(30) logger.info("Step 3.2: Verify traffic to this route dst \ is now redirected back to Active ToR and equally distributed") - monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, expect_tunnel_traffic=True) + monitor_tunnel_and_server_traffic(rand_selected_dut, expect_server_traffic=False, + expect_tunnel_traffic=True, skip_traffic_test=skip_traffic_test) check_tunnel_balance(**test_params) remove_static_routes(rand_selected_dut, random_dst_ip) diff --git a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py index d18659e6b1f..4dcefba2f09 100644 --- a/tests/dualtor/test_standby_tor_upstream_mux_toggle.py +++ b/tests/dualtor/test_standby_tor_upstream_mux_toggle.py @@ -11,6 +11,9 @@ from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service, \ run_icmp_responder # noqa F401 +# from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 logger = logging.getLogger(__file__) @@ -33,8 +36,8 @@ def test_cleanup(rand_selected_dut): def test_standby_tor_upstream_mux_toggle( - rand_selected_dut, tbinfo, ptfadapter, rand_selected_interface, # noqa F811 - toggle_all_simulator_ports, set_crm_polling_interval): # noqa F811 + rand_selected_dut, tbinfo, ptfadapter, rand_selected_interface, # noqa F811 + toggle_all_simulator_ports, set_crm_polling_interval, skip_traffic_test): # noqa F811 itfs, ip = rand_selected_interface PKT_NUM = 100 # Step 1. Set mux state to standby and verify traffic is dropped by ACL rule and drop counters incremented @@ -49,7 +52,8 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=True) + drop=True, + skip_traffic_test=skip_traffic_test) time.sleep(5) # Step 2. Toggle mux state to active, and verify traffic is not dropped by ACL and fwd-ed to uplinks; @@ -64,7 +68,8 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=False) + drop=False, + skip_traffic_test=skip_traffic_test) # Step 3. Toggle mux state to standby, and verify traffic is dropped by ACL; # verify CRM show and no nexthop objects are stale @@ -78,7 +83,8 @@ def test_standby_tor_upstream_mux_toggle( itfs=itfs, server_ip=ip['server_ipv4'].split('/')[0], pkt_num=PKT_NUM, - drop=True) + drop=True, + skip_traffic_test=skip_traffic_test) crm_facts1 = rand_selected_dut.get_crm_facts() unmatched_crm_facts = compare_crm_facts(crm_facts0, crm_facts1) pt_assert(len(unmatched_crm_facts) == 0, 'Unmatched CRM facts: {}' diff --git a/tests/dualtor/test_tor_ecn.py b/tests/dualtor/test_tor_ecn.py index 8aae5aaa27c..eed5bf082ef 100644 --- a/tests/dualtor/test_tor_ecn.py +++ b/tests/dualtor/test_tor_ecn.py @@ -28,6 +28,8 @@ from tests.common.fixtures.ptfhost_utils import run_icmp_responder # noqa F401 from tests.common.fixtures.ptfhost_utils import run_garp_service # noqa F401 from tests.common.fixtures.ptfhost_utils import change_mac_addresses # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import dump_scapy_packet_show_output from tests.common.dualtor.tunnel_traffic_utils import derive_queue_id_from_dscp, derive_out_dscp_from_inner_dscp from tests.common.dualtor.dual_tor_utils import config_active_active_dualtor_active_standby # noqa F401 @@ -275,7 +277,7 @@ def test_dscp_to_queue_during_decap_on_active( inner_dscp, ptfhost, setup_dualtor_tor_active, request, rand_selected_interface, ptfadapter, # noqa F811 tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 - duthosts, rand_one_dut_hostname + duthosts, rand_one_dut_hostname, skip_traffic_test # noqa F811 ): """ Test if DSCP to Q mapping for inner header is matching with outer header during decap on active @@ -295,6 +297,9 @@ def test_dscp_to_queue_during_decap_on_active( duthost.shell('sonic-clear queuecounters') logging.info("Clearing queue counters before starting traffic") + if skip_traffic_test is True: + logging.info("Skip following test due traffic test skipped") + return with stop_garp(ptfhost): ptfadapter.dataplane.flush() ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) @@ -346,7 +351,8 @@ def test_dscp_to_queue_during_encap_on_standby( duthosts, rand_one_dut_hostname, write_standby, - setup_standby_ports_on_rand_selected_tor # noqa F811 + setup_standby_ports_on_rand_selected_tor, # noqa F811 + skip_traffic_test # noqa F811 ): """ Test if DSCP to Q mapping for outer header is matching with inner header during encap on standby @@ -367,6 +373,9 @@ def test_dscp_to_queue_during_encap_on_standby( ptfadapter.dataplane.flush() ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send IP packet from ptf t1 interface %s", ptf_t1_intf) + if skip_traffic_test is True: + logging.info("Skip following test due traffic test skipped") + return with tunnel_traffic_monitor(tor, existing=True, packet_count=PACKET_NUM): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), non_encapsulated_packet, count=PACKET_NUM) @@ -375,7 +384,8 @@ def test_dscp_to_queue_during_encap_on_standby( def test_ecn_during_decap_on_active( inner_dscp, ptfhost, setup_dualtor_tor_active, request, rand_selected_interface, ptfadapter, # noqa F811 - tbinfo, rand_selected_dut, tunnel_traffic_monitor # noqa F811 + tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 + skip_traffic_test # noqa F811 ): """ Test if the ECN stamping on inner header is matching with outer during decap on active @@ -395,6 +405,10 @@ def test_ecn_during_decap_on_active( exp_tos = encapsulated_packet[IP].payload[IP].tos exp_ecn = exp_tos & 3 + + if skip_traffic_test is True: + logging.info("Skip following test due traffic test skipped") + return with stop_garp(ptfhost): tor.shell("portstat -c") tor.shell("show arp") @@ -411,7 +425,8 @@ def test_ecn_during_encap_on_standby( rand_selected_interface, ptfadapter, # noqa F811 tbinfo, rand_selected_dut, tunnel_traffic_monitor, # noqa F811 write_standby, - setup_standby_ports_on_rand_selected_tor # noqa F811 + setup_standby_ports_on_rand_selected_tor, # noqa F811 + skip_traffic_test # noqa F811 ): """ Test if the ECN stamping on outer header is matching with inner during encap on standby @@ -426,5 +441,8 @@ def test_ecn_during_encap_on_standby( ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo)) logging.info("send IP packet from ptf t1 interface %s", ptf_t1_intf) + if skip_traffic_test is True: + logging.info("Skip following test due traffic test skipped") + return with tunnel_traffic_monitor(tor, existing=True, packet_count=PACKET_NUM): testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), non_encapsulated_packet, count=PACKET_NUM) diff --git a/tests/dualtor/test_tunnel_memory_leak.py b/tests/dualtor/test_tunnel_memory_leak.py index 1613247cc08..c1d3d8ce5fd 100644 --- a/tests/dualtor/test_tunnel_memory_leak.py +++ b/tests/dualtor/test_tunnel_memory_leak.py @@ -11,9 +11,9 @@ import time import contextlib from ptf import testutils -from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_upper_tor # noqa: F401 -from tests.common.dualtor.dual_tor_common import cable_type # noqa: F401 -from tests.common.dualtor.dual_tor_utils import upper_tor_host, lower_tor_host # noqa: F401 +from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_upper_tor # noqa F401 +from tests.common.dualtor.dual_tor_common import cable_type # noqa F401 +from tests.common.dualtor.dual_tor_utils import upper_tor_host, lower_tor_host # noqa F401 from tests.common.dualtor.server_traffic_utils import ServerTrafficMonitor from tests.common.helpers.assertions import pytest_assert from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports @@ -21,7 +21,9 @@ from tests.common.dualtor.dual_tor_utils import build_packet_to_server from tests.common.dualtor.dual_tor_utils import delete_neighbor from tests.common.helpers.dut_utils import get_program_info -from tests.common.fixtures.ptfhost_utils import run_garp_service, run_icmp_responder # noqa: F401 +from tests.common.fixtures.ptfhost_utils import run_garp_service, run_icmp_responder # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa F401 from tests.common.utilities import wait_until @@ -114,8 +116,9 @@ def _check_memory(duthost): return not wait_until(timeout, interval, delay, _check_memory, duthost) -def test_tunnel_memory_leak(toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa: F811 - ptfhost, ptfadapter, conn_graph_facts, tbinfo, vmhost, run_arp_responder): # noqa: F811 +def test_tunnel_memory_leak(toggle_all_simulator_ports_to_upper_tor, upper_tor_host, lower_tor_host, # noqa F811 + ptfhost, ptfadapter, conn_graph_facts, tbinfo, vmhost, run_arp_responder, # noqa F811 + skip_traffic_test): # noqa F811 """ Test if there is memory leak for service tunnel_packet_handler. Send ip packets from standby TOR T1 to Server, standby TOR will @@ -169,6 +172,9 @@ def prepare_services(ptfhost): pkt, exp_pkt = build_packet_to_server(lower_tor_host, ptfadapter, server_ipv4) + if skip_traffic_test is True: + logging.info("Skip traffic test.") + continue server_traffic_monitor = ServerTrafficMonitor( upper_tor_host, ptfhost, vmhost, tbinfo, iface, conn_graph_facts, exp_pkt, existing=True, is_mocked=False @@ -182,9 +188,11 @@ def prepare_services(ptfhost): mem_usage, mem_limit, mem_percent = get_memory_info(upper_tor_host) logging.info( "SWSS MEM USAGE:{} LIMIT:{} PERCENT:{}".format(mem_usage, mem_limit, mem_percent)) - pytest_assert(validate_neighbor_entry_exist(upper_tor_host, server_ipv4), - "The server ip {} doesn't exist in neighbor table on dut {}. \ - tunnel_packet_handler isn't triggered.".format(server_ipv4, upper_tor_host.hostname)) + if not skip_traffic_test: + pytest_assert(validate_neighbor_entry_exist(upper_tor_host, server_ipv4), + "The server ip {} doesn't exist in neighbor table on dut {}. \ + tunnel_packet_handler isn't triggered." + .format(server_ipv4, upper_tor_host.hostname)) except Exception as e: logging.error("Capture exception {}, continue the process.".format(repr(e))) if len(server_traffic_monitor.matched_packets) == 0: diff --git a/tests/flow_counter/flow_counter_utils.py b/tests/flow_counter/flow_counter_utils.py index b7c30b7f5b9..ad1907ebb37 100644 --- a/tests/flow_counter/flow_counter_utils.py +++ b/tests/flow_counter/flow_counter_utils.py @@ -120,6 +120,7 @@ def is_route_flow_counter_supported(duthosts, tbinfo, enum_rand_one_per_hwsku_ho if rand_selected_dut.facts['asic_type'] == 'vs': # vs platform always set SAI capability to enabled, however, it does not really support all SAI atrributes. # Currently, vs platform does not support route flow counter. + logger.info('Route flow counter is not supported on vs platform') return False skip, _ = check_skip_release(rand_selected_dut, skip_versions) if skip: diff --git a/tests/generic_config_updater/test_pg_headroom_update.py b/tests/generic_config_updater/test_pg_headroom_update.py index 036537c785b..dfe00bae0a1 100644 --- a/tests/generic_config_updater/test_pg_headroom_update.py +++ b/tests/generic_config_updater/test_pg_headroom_update.py @@ -8,7 +8,7 @@ from tests.generic_config_updater.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.generic_config_updater.gu_utils import generate_tmpfile, delete_tmpfile from tests.generic_config_updater.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload -from tests.generic_config_updater.gu_utils import is_valid_platform_and_version +from tests.generic_config_updater.gu_utils import is_valid_platform_and_version, get_asic_name pytestmark = [ pytest.mark.topology('any'), @@ -79,7 +79,7 @@ def _confirm_value_in_app_and_asic_db(): @pytest.mark.parametrize("operation", ["replace"]) def test_pg_headroom_update(duthost, ensure_dut_readiness, operation, skip_when_buffer_is_dynamic_model): - asic_type = duthost.get_asic_name() + asic_type = get_asic_name(duthost) pytest_require("td2" not in asic_type, "PG headroom should be skipped on TD2") tmpfile = generate_tmpfile(duthost) diff --git a/tests/gnmi/helper.py b/tests/gnmi/helper.py index f4426801beb..496d47454c6 100644 --- a/tests/gnmi/helper.py +++ b/tests/gnmi/helper.py @@ -234,6 +234,125 @@ def gnmi_get(duthost, ptfhost, path_list): raise Exception("error:" + msg) +# py_gnmicli does not fully support POLLING mode +# Use gnmi_cli instead +def gnmi_subscribe_polling(duthost, ptfhost, path_list, interval_ms, count): + """ + Send GNMI subscribe request with GNMI client + + Args: + duthost: fixture for duthost + ptfhost: fixture for ptfhost + path_list: list for get path + interval_ms: interval, unit is ms + count: update count + + Returns: + msg: gnmi client output + """ + if path_list is None: + logger.error("path_list is None") + return "", "" + env = GNMIEnvironment(duthost, GNMIEnvironment.GNMI_MODE) + ip = duthost.mgmt_ip + port = env.gnmi_port + interval = interval_ms / 1000.0 + # Run gnmi_cli in gnmi container as workaround + cmd = "docker exec %s gnmi_cli -client_types=gnmi -a %s:%s " % (env.gnmi_container, ip, port) + cmd += "-client_crt /etc/sonic/telemetry/gnmiclient.crt " + cmd += "-client_key /etc/sonic/telemetry/gnmiclient.key " + cmd += "-ca_crt /etc/sonic/telemetry/gnmiCA.pem " + cmd += "-logtostderr " + # Use sonic-db as default origin + cmd += '-origin=sonic-db ' + cmd += '-query_type=polling ' + cmd += '-polling_interval %us -count %u ' % (int(interval), count) + for path in path_list: + path = path.replace('sonic-db:', '') + cmd += '-q %s ' % (path) + output = duthost.shell(cmd, module_ignore_errors=True) + return output['stdout'], output['stderr'] + + +def gnmi_subscribe_streaming_sample(duthost, ptfhost, path_list, interval_ms, count): + """ + Send GNMI subscribe request with GNMI client + + Args: + duthost: fixture for duthost + ptfhost: fixture for ptfhost + path_list: list for get path + interval_ms: interval, unit is ms + count: update count + + Returns: + msg: gnmi client output + """ + if path_list is None: + logger.error("path_list is None") + return "", "" + env = GNMIEnvironment(duthost, GNMIEnvironment.GNMI_MODE) + ip = duthost.mgmt_ip + port = env.gnmi_port + cmd = 'python2 /root/gnxi/gnmi_cli_py/py_gnmicli.py ' + cmd += '--timeout 30 ' + cmd += '-t %s -p %u ' % (ip, port) + cmd += '-xo sonic-db ' + cmd += '-rcert /root/gnmiCA.pem ' + cmd += '-pkey /root/gnmiclient.key ' + cmd += '-cchain /root/gnmiclient.crt ' + cmd += '--encoding 4 ' + cmd += '-m subscribe ' + cmd += '--subscribe_mode 0 --submode 2 --create_connections 1 ' + cmd += '--interval %u --update_count %u ' % (interval_ms, count) + cmd += '--xpath ' + for path in path_list: + path = path.replace('sonic-db:', '') + cmd += " " + path + output = ptfhost.shell(cmd, module_ignore_errors=True) + msg = output['stdout'].replace('\\', '') + return msg, output['stderr'] + + +def gnmi_subscribe_streaming_onchange(duthost, ptfhost, path_list, count): + """ + Send GNMI subscribe request with GNMI client + + Args: + duthost: fixture for duthost + ptfhost: fixture for ptfhost + path_list: list for get path + count: update count + + Returns: + msg: gnmi client output + """ + if path_list is None: + logger.error("path_list is None") + return "", "" + env = GNMIEnvironment(duthost, GNMIEnvironment.GNMI_MODE) + ip = duthost.mgmt_ip + port = env.gnmi_port + cmd = 'python2 /root/gnxi/gnmi_cli_py/py_gnmicli.py ' + cmd += '--timeout 30 ' + cmd += '-t %s -p %u ' % (ip, port) + cmd += '-xo sonic-db ' + cmd += '-rcert /root/gnmiCA.pem ' + cmd += '-pkey /root/gnmiclient.key ' + cmd += '-cchain /root/gnmiclient.crt ' + cmd += '--encoding 4 ' + cmd += '-m subscribe ' + cmd += '--subscribe_mode 0 --submode 1 --create_connections 1 ' + cmd += '--update_count %u ' % count + cmd += '--xpath ' + for path in path_list: + path = path.replace('sonic-db:', '') + cmd += " " + path + output = ptfhost.shell(cmd, module_ignore_errors=True) + msg = output['stdout'].replace('\\', '') + return msg, output['stderr'] + + def gnoi_reboot(duthost, method, delay, message): env = GNMIEnvironment(duthost, GNMIEnvironment.GNMI_MODE) ip = duthost.mgmt_ip @@ -251,3 +370,21 @@ def gnoi_reboot(duthost, method, delay, message): return -1, output['stderr'] else: return 0, output['stdout'] + + +def gnoi_request(duthost, localhost, rpc, request_json_data): + env = GNMIEnvironment(duthost, GNMIEnvironment.GNMI_MODE) + ip = duthost.mgmt_ip + port = env.gnmi_port + cmd = "docker exec %s gnoi_client -target %s:%s " % (env.gnmi_container, ip, port) + cmd += "-cert /etc/sonic/telemetry/gnmiclient.crt " + cmd += "-key /etc/sonic/telemetry/gnmiclient.key " + cmd += "-ca /etc/sonic/telemetry/gnmiCA.pem " + cmd += "-logtostderr -rpc {} ".format(rpc) + cmd += f'-jsonin \'{request_json_data}\'' + output = duthost.shell(cmd, module_ignore_errors=True) + if output['stderr']: + logger.error(output['stderr']) + return -1, output['stderr'] + else: + return 0, output['stdout'] diff --git a/tests/gnmi/test_gnmi_configdb.py b/tests/gnmi/test_gnmi_configdb.py index bfef3c330f9..53e3b47ee06 100644 --- a/tests/gnmi/test_gnmi_configdb.py +++ b/tests/gnmi/test_gnmi_configdb.py @@ -1,8 +1,13 @@ import json import logging +import multiprocessing import pytest +import re +import time from .helper import gnmi_set, gnmi_get, gnoi_reboot +from .helper import gnmi_subscribe_polling +from .helper import gnmi_subscribe_streaming_sample, gnmi_subscribe_streaming_onchange from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from tests.common.platform.processes_utils import wait_critical_processes @@ -105,6 +110,114 @@ def test_gnmi_configdb_incremental_02(duthosts, rand_one_dut_hostname, ptfhost): pytest.fail("Set request with invalid path") +test_data_metadata = [ + { + "name": "Subscribe table for DEVICE_METADATA", + "path": "/sonic-db:CONFIG_DB/localhost/DEVICE_METADATA" + }, + { + "name": "Subscribe table key for DEVICE_METADATA", + "path": "/sonic-db:CONFIG_DB/localhost/DEVICE_METADATA/localhost" + }, + { + "name": "Subscribe table field for DEVICE_METADATA", + "path": "/sonic-db:CONFIG_DB/localhost/DEVICE_METADATA/localhost/bgp_asn" + } +] + + +@pytest.mark.parametrize('test_data', test_data_metadata) +def test_gnmi_configdb_polling_01(duthosts, rand_one_dut_hostname, ptfhost, test_data): + ''' + Verify GNMI subscribe API, streaming onchange mode + Subscribe polling mode + ''' + duthost = duthosts[rand_one_dut_hostname] + exp_cnt = 3 + path_list = [test_data["path"]] + msg, _ = gnmi_subscribe_polling(duthost, ptfhost, path_list, 1000, exp_cnt) + assert msg.count("bgp_asn") >= exp_cnt, test_data["name"] + ": " + msg + + +@pytest.mark.parametrize('test_data', test_data_metadata) +def test_gnmi_configdb_streaming_sample_01(duthosts, rand_one_dut_hostname, ptfhost, test_data): + ''' + Verify GNMI subscribe API, streaming onchange mode + Subscribe streaming sample mode + ''' + duthost = duthosts[rand_one_dut_hostname] + exp_cnt = 5 + path_list = [test_data["path"]] + msg, _ = gnmi_subscribe_streaming_sample(duthost, ptfhost, path_list, 0, exp_cnt) + assert msg.count("bgp_asn") >= exp_cnt, test_data["name"] + ": " + msg + + +@pytest.mark.parametrize('test_data', test_data_metadata) +def test_gnmi_configdb_streaming_onchange_01(duthosts, rand_one_dut_hostname, ptfhost, test_data): + ''' + Verify GNMI subscribe API, streaming onchange mode + Subscribe streaming onchange mode + ''' + duthost = duthosts[rand_one_dut_hostname] + run_flag = multiprocessing.Value('I', True) + + # Update DEVICE_METADATA table to trigger onchange event + def worker(duthost, run_flag): + for i in range(100): + if not run_flag.value: + break + time.sleep(0.5) + cmd = "sonic-db-cli CONFIG_DB hdel \"DEVICE_METADATA|localhost\" bgp_asn " + duthost.shell(cmd, module_ignore_errors=True) + time.sleep(0.5) + cmd = "sonic-db-cli CONFIG_DB hset \"DEVICE_METADATA|localhost\" bgp_asn " + str(i+1000) + duthost.shell(cmd, module_ignore_errors=True) + + client_task = multiprocessing.Process(target=worker, args=(duthost, run_flag,)) + client_task.start() + exp_cnt = 5 + path_list = [test_data["path"]] + msg, _ = gnmi_subscribe_streaming_onchange(duthost, ptfhost, path_list, exp_cnt*2) + run_flag.value = False + client_task.join() + assert msg.count("bgp_asn") >= exp_cnt, test_data["name"] + ": " + msg + + +def test_gnmi_configdb_streaming_onchange_02(duthosts, rand_one_dut_hostname, ptfhost): + ''' + Verify GNMI subscribe API, streaming onchange mode + Subscribe table, and verify gnmi output has table key + ''' + duthost = duthosts[rand_one_dut_hostname] + run_flag = multiprocessing.Value('I', True) + + # Update DEVICE_METADATA table to trigger onchange event + def worker(duthost, run_flag): + for i in range(100): + if not run_flag.value: + break + time.sleep(0.5) + cmd = "sonic-db-cli CONFIG_DB hset \"DEVICE_METADATA|localhost\" bgp_asn " + str(i+1000) + duthost.shell(cmd, module_ignore_errors=True) + + client_task = multiprocessing.Process(target=worker, args=(duthost, run_flag,)) + client_task.start() + exp_cnt = 3 + path_list = ["/sonic-db:CONFIG_DB/localhost/DEVICE_METADATA"] + msg, _ = gnmi_subscribe_streaming_onchange(duthost, ptfhost, path_list, exp_cnt) + run_flag.value = False + client_task.join() + + match_list = re.findall("json_ietf_val: \"({.*?})\"", msg) + assert len(match_list) >= exp_cnt, "Missing json_ietf_val in gnmi response: " + msg + for match in match_list: + result = json.loads(match) + # Verify table key + assert "localhost" in result, "Invalid result: " + match + # Verify table field + assert "bgp_asn" in result["localhost"], "Invalid result: " + match + + def test_gnmi_configdb_full_01(duthosts, rand_one_dut_hostname, ptfhost): ''' Verify GNMI native write, full config for configDB diff --git a/tests/gnmi/test_gnmi_countersdb.py b/tests/gnmi/test_gnmi_countersdb.py new file mode 100644 index 00000000000..182737b20d6 --- /dev/null +++ b/tests/gnmi/test_gnmi_countersdb.py @@ -0,0 +1,164 @@ +import logging +import pytest +import re + +from .helper import gnmi_get, gnmi_subscribe_polling, gnmi_subscribe_streaming_sample +from tests.common.helpers.assertions import pytest_assert + + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology('any'), + pytest.mark.disable_loganalyzer +] + + +def test_gnmi_queue_buffer_cnt(duthosts, rand_one_dut_hostname, ptfhost): + """ + Check number of queue counters + """ + duthost = duthosts[rand_one_dut_hostname] + if duthost.is_supervisor_node(): + pytest.skip("Skipping test as no Ethernet0 frontpanel port on supervisor") + logger.info('start gnmi output testing') + iface = "Ethernet0" + # Get UC for Ethernet0 + dut_command = "show queue counters %s" % iface + result = duthost.shell(dut_command, module_ignore_errors=True) + uc_list = re.findall(r"UC(\d+)", result["stdout"]) + for i in uc_list: + # Read UC + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS_QUEUE_NAME_MAP/" + iface + ":" + str(i)] + msg_list = gnmi_get(duthost, ptfhost, path_list) + result = msg_list[0] + pytest_assert("oid" in result, result) + # Read invalid UC + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS_QUEUE_NAME_MAP/" + iface + ":abc"] + try: + msg_list = gnmi_get(duthost, ptfhost, path_list) + except Exception as e: + assert "GRPC error" in str(e), str(e) + else: + pytest.fail("Should fail for invalid path: " + path_list[0]) + + +def test_gnmi_output(duthosts, rand_one_dut_hostname, ptfhost): + """ + Read COUNTERS table + Get table key from COUNTERS_PORT_NAME_MAP + """ + duthost = duthosts[rand_one_dut_hostname] + if duthost.is_supervisor_node(): + pytest.skip("Skipping test as no Ethernet0 frontpanel port on supervisor") + logger.info('start gnmi output testing') + # Get COUNTERS table key for Ethernet0 + dut_command = "sonic-db-cli COUNTERS_DB hget COUNTERS_PORT_NAME_MAP Ethernet0" + result = duthost.shell(dut_command, module_ignore_errors=True) + counter_key = result['stdout'].strip() + assert "oid" in counter_key, "Invalid oid: " + counter_key + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS/" + counter_key] + msg_list = gnmi_get(duthost, ptfhost, path_list) + result = msg_list[0] + logger.info("GNMI Server output") + logger.info(result) + pytest_assert("SAI_PORT_STAT_IF_IN_ERRORS" in result, + "SAI_PORT_STAT_IF_IN_ERRORS not found in gnmi_output: " + result) + + +test_data_counters_port_name_map = [ + { + "name": "Subscribe table for COUNTERS_PORT_NAME_MAP", + "path": "/sonic-db:COUNTERS_DB/localhost/COUNTERS_PORT_NAME_MAP" + }, + { + "name": "Subscribe table field for COUNTERS_PORT_NAME_MAP", + "path": "/sonic-db:COUNTERS_DB/localhost/COUNTERS_PORT_NAME_MAP/Ethernet0" + } +] + + +@pytest.mark.parametrize('test_data', test_data_counters_port_name_map) +def test_gnmi_counterdb_polling_01(duthosts, rand_one_dut_hostname, ptfhost, test_data): + ''' + Verify GNMI subscribe API + Subscribe polling mode for COUNTERS_PORT_NAME_MAP + ''' + duthost = duthosts[rand_one_dut_hostname] + if duthost.is_supervisor_node(): + pytest.skip("Skipping test as no Ethernet0 frontpanel port on supervisor") + exp_cnt = 3 + path_list = [test_data["path"]] + msg, _ = gnmi_subscribe_polling(duthost, ptfhost, path_list, 1000, exp_cnt) + assert msg.count("oid") >= exp_cnt, test_data["name"] + ": " + msg + + +def test_gnmi_counterdb_polling_02(duthosts, rand_one_dut_hostname, ptfhost): + ''' + Verify GNMI subscribe API + Subscribe polling mode for COUNTERS + ''' + duthost = duthosts[rand_one_dut_hostname] + if duthost.is_supervisor_node(): + pytest.skip("Skipping test as no Ethernet0 frontpanel port on supervisor") + exp_cnt = 3 + # Get COUNTERS table key for Ethernet0 + dut_command = "sonic-db-cli COUNTERS_DB hget COUNTERS_PORT_NAME_MAP Ethernet0" + result = duthost.shell(dut_command, module_ignore_errors=True) + counter_key = result['stdout'].strip() + assert "oid" in counter_key, "Invalid oid: " + counter_key + # Subscribe table + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS/"] + msg, _ = gnmi_subscribe_polling(duthost, ptfhost, path_list, 1000, exp_cnt) + assert msg.count("SAI_PORT_STAT_IF_IN_ERRORS") >= exp_cnt, msg + # Subscribe table key + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS/" + counter_key] + msg, _ = gnmi_subscribe_polling(duthost, ptfhost, path_list, 1000, exp_cnt) + assert msg.count("SAI_PORT_STAT_IF_IN_ERRORS") >= exp_cnt, msg + # Subscribe table field + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS/" + counter_key + "/SAI_PORT_STAT_IF_IN_ERRORS"] + msg, _ = gnmi_subscribe_polling(duthost, ptfhost, path_list, 1000, exp_cnt) + assert msg.count("SAI_PORT_STAT_IF_IN_ERRORS") >= exp_cnt, msg + + +@pytest.mark.parametrize('test_data', test_data_counters_port_name_map) +def test_gnmi_counterdb_streaming_sample_01(duthosts, rand_one_dut_hostname, ptfhost, test_data): + ''' + Verify GNMI subscribe API + Subscribe streaming sample mode for COUNTERS_PORT_NAME_MAP + ''' + duthost = duthosts[rand_one_dut_hostname] + if duthost.is_supervisor_node(): + pytest.skip("Skipping test as no Ethernet0 frontpanel port on supervisor") + exp_cnt = 3 + path_list = [test_data["path"]] + msg, _ = gnmi_subscribe_streaming_sample(duthost, ptfhost, path_list, 0, exp_cnt) + assert msg.count("oid") >= exp_cnt, test_data["name"] + ": " + msg + + +def test_gnmi_counterdb_streaming_sample_02(duthosts, rand_one_dut_hostname, ptfhost): + ''' + Verify GNMI subscribe API + Subscribe streaming sample mode for COUNTERS + ''' + duthost = duthosts[rand_one_dut_hostname] + if duthost.is_supervisor_node(): + pytest.skip("Skipping test as no Ethernet0 frontpanel port on supervisor") + exp_cnt = 3 + # Get COUNTERS table key for Ethernet0 + dut_command = "sonic-db-cli COUNTERS_DB hget COUNTERS_PORT_NAME_MAP Ethernet0" + result = duthost.shell(dut_command, module_ignore_errors=True) + counter_key = result['stdout'].strip() + assert "oid" in counter_key, "Invalid oid: " + counter_key + # Subscribe table + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS/"] + msg, _ = gnmi_subscribe_streaming_sample(duthost, ptfhost, path_list, 0, exp_cnt) + assert msg.count("SAI_PORT_STAT_IF_IN_ERRORS") >= exp_cnt, msg + # Subscribe table key + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS/" + counter_key] + msg, _ = gnmi_subscribe_streaming_sample(duthost, ptfhost, path_list, 0, exp_cnt) + assert msg.count("SAI_PORT_STAT_IF_IN_ERRORS") >= exp_cnt, msg + # Subscribe table field + path_list = ["/sonic-db:COUNTERS_DB/localhost/COUNTERS/" + counter_key + "/SAI_PORT_STAT_IF_IN_ERRORS"] + msg, _ = gnmi_subscribe_streaming_sample(duthost, ptfhost, path_list, 0, exp_cnt) + assert msg.count("SAI_PORT_STAT_IF_IN_ERRORS") >= exp_cnt, msg diff --git a/tests/gnmi/test_gnoi_killprocess.py b/tests/gnmi/test_gnoi_killprocess.py new file mode 100644 index 00000000000..fabdf281653 --- /dev/null +++ b/tests/gnmi/test_gnoi_killprocess.py @@ -0,0 +1,79 @@ +import pytest +from .helper import gnoi_request +from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.dut_utils import is_container_running + + +# This test ensures functionality of KillProcess API to kill and restart a process when a valid process name is passed +# When an invalid process name is passed, this test ensures that the expected error is returned +@pytest.mark.parametrize("process,is_valid, expected_msg", [ + ("gnmi", False, "Dbus does not support gnmi service management"), + ("nonexistent", False, "Dbus does not support nonexistent service management"), + ("", False, "Dbus stop_service called with no service specified"), + ("snmp", True, ""), + ("dhcp_relay", True, ""), + ("radv", True, ""), + ("restapi", True, ""), + ("lldp", True, ""), + ("sshd", True, ""), + ("swss", True, ""), + ("pmon", True, ""), + ("rsyslog", True, ""), + ("telemetry", True, "") +]) +def test_gnoi_killprocess_then_restart(duthosts, rand_one_dut_hostname, localhost, process, is_valid, expected_msg): + duthost = duthosts[rand_one_dut_hostname] + + if process and process != "nonexistent": + pytest_assert(duthost.is_host_service_running(process), + "{} should be running before KillProcess test attempts to kill this process".format(process)) + + request_kill_json_data = '{{"name": "{}", "signal": 1}}'.format(process) + ret, msg = gnoi_request(duthost, localhost, "KillProcess", request_kill_json_data) + if is_valid: + pytest_assert(ret == 0, "KillProcess API unexpectedly reported failure") + pytest_assert(not is_container_running(duthost, process), + "{} found running after KillProcess reported success".format(process)) + + request_restart_json_data = '{{"name": "{}", "restart": true, "signal": 1}}'.format(process) + ret, msg = gnoi_request(duthost, localhost, "KillProcess", request_restart_json_data) + pytest_assert(ret == 0, + "KillProcess API unexpectedly reported failure when attempting to restart {}".format(process)) + pytest_assert(duthost.is_host_service_running(process), + "{} not running after KillProcess reported successful restart".format(process)) + else: + pytest_assert(ret != 0, "KillProcess API unexpectedly succeeded with invalid request parameters") + pytest_assert(expected_msg in msg, "Unexpected error message in response to invalid gNOI request") + + pytest_assert(duthost.critical_services_fully_started, "System unhealthy after gNOI API request") + + +# This test performs additional verification of the restart request under KillProcess API +# This test focuses on edge conditions of restart value in the request, so we only test against one service: snmp +@pytest.mark.parametrize("request_restart_value, is_valid", [ + ("invalid", False), + ("", False) +]) +def test_gnoi_killprocess_restart(duthosts, rand_one_dut_hostname, localhost, request_restart_value, is_valid): + duthost = duthosts[rand_one_dut_hostname] + request_json_data = f'{{"name": "snmp", "restart": {request_restart_value}, "signal": 1}}' + ret, msg = gnoi_request(duthost, localhost, "KillProcess", request_json_data) + if is_valid: + pytest_assert(ret == 0, "KillProcess API unexpectedly reported failure") + pytest_assert(is_container_running(duthost, "snmp"), + "snmp not running after KillProcess API reported successful restart") + else: + pytest_assert(ret != 0, "KillProcess API unexpectedly succeeded with invalid request parameters") + pytest_assert("panic" in msg, "Unexpected error message in response to invalid gNOI request") + pytest_assert(duthost.critical_services_fully_started, "System unhealthy after gNOI API request") + + +def test_invalid_signal(duthosts, rand_one_dut_hostname, localhost): + duthost = duthosts[rand_one_dut_hostname] + request_json_data = '{"name": "snmp", "restart": true, "signal": 2}' + ret, msg = gnoi_request(duthost, localhost, "KillProcess", request_json_data) + + pytest_assert(ret != 0, "KillProcess API unexpectedly succeeded with invalid request parameters") + pytest_assert("KillProcess only supports SIGNAL_TERM (option 1)" in msg, + "Unexpected error message in response to invalid gNOI request") + pytest_assert(duthost.critical_services_fully_started, "System unhealthy after gNOI API request") diff --git a/tests/ip/test_mgmt_ipv6_only.py b/tests/ip/test_mgmt_ipv6_only.py index db94ec7e587..0e2d7692204 100644 --- a/tests/ip/test_mgmt_ipv6_only.py +++ b/tests/ip/test_mgmt_ipv6_only.py @@ -62,6 +62,28 @@ def log_eth0_interface_info(duthosts): logging.debug(f"Checking host[{duthost.hostname}] ifconfig eth0:[{duthost_interface}] after fixture") +def log_tacacs(duthosts, ptfhost): + for duthost in duthosts: + # Print debug info for ipv6 pingability + ptfhost_vars = ptfhost.host.options['inventory_manager'].get_host(ptfhost.hostname).vars + if 'ansible_hostv6' in ptfhost_vars: + tacacs_server_ip = ptfhost_vars['ansible_hostv6'] + ping_result = duthost.shell(f"ping {tacacs_server_ip} -c 1 -W 3", module_ignore_errors=True)["stdout"] + logging.debug(f"Checking ping_result [{ping_result}]") + + # Print debug info for mgmt interfaces and forced mgmt routes + mgmt_interface_keys = duthost.command("sonic-db-cli CONFIG_DB keys 'MGMT_INTERFACE|*'")['stdout'] + logging.debug(f"mgmt_interface_keys: {mgmt_interface_keys}") + for intf_key in mgmt_interface_keys.split('\n'): + logging.debug(f"interface key: {intf_key}") + intf_values = intf_key.split('|') + if len(intf_values) != 3: + logging.debug(f"Unexpected interface key: {intf_key}") + continue + forced_mgmt_rte = duthost.command(f"sonic-db-cli CONFIG_DB HGET '{intf_key}' forced_mgmt_routes@")['stdout'] + logging.debug(f"forced_mgmt_routes: {forced_mgmt_rte}, interface address: {intf_values[2]}") + + def test_bgp_facts_ipv6_only(duthosts, enum_frontend_dut_hostname, enum_asic_index, convert_and_restore_config_db_to_ipv6_only): # noqa F811 # Add a temporary debug log to see if DUTs are reachable via IPv6 mgmt-ip. Will remove later @@ -134,6 +156,7 @@ def test_ro_user_ipv6_only(localhost, ptfhost, duthosts, enum_rand_one_per_hwsku log_eth0_interface_info(duthosts) duthost = duthosts[enum_rand_one_per_hwsku_hostname] dutipv6 = get_mgmt_ipv6(duthost) + log_tacacs(duthosts, ptfhost) res = ssh_remote_run_retry(localhost, dutipv6, ptfhost, tacacs_creds['tacacs_ro_user'], tacacs_creds['tacacs_ro_user_passwd'], 'cat /etc/passwd') @@ -148,6 +171,7 @@ def test_rw_user_ipv6_only(localhost, ptfhost, duthosts, enum_rand_one_per_hwsku log_eth0_interface_info(duthosts) duthost = duthosts[enum_rand_one_per_hwsku_hostname] dutipv6 = get_mgmt_ipv6(duthost) + log_tacacs(duthosts, ptfhost) res = ssh_remote_run_retry(localhost, dutipv6, ptfhost, tacacs_creds['tacacs_rw_user'], tacacs_creds['tacacs_rw_user_passwd'], "cat /etc/passwd") diff --git a/tests/mvrf/test_mgmtvrf.py b/tests/mvrf/test_mgmtvrf.py index ff639647d03..8a184579fc4 100644 --- a/tests/mvrf/test_mgmtvrf.py +++ b/tests/mvrf/test_mgmtvrf.py @@ -228,7 +228,15 @@ def test_ntp(self, duthosts, rand_one_dut_hostname, ptfhost, check_ntp_sync, ntp # Check if ntp was not in sync with ntp server before enabling mvrf, if yes then setup ntp server on ptf if check_ntp_sync: setup_ntp(ptfhost, duthost, ntp_servers) - ntp_uid = ":".join(duthost.command("getent passwd ntp")['stdout'].split(':')[2:4]) + + # There is no entry ntp in `/etc/passwd` on kvm testbed. + cmd = "getent passwd ntp" + ntp_uid_output = duthost.command(cmd, module_ignore_errors=True) + if duthost.facts["asic_type"] == "vs" and ntp_uid_output['rc'] == 2: + return + assert ntp_uid_output['rc'] == 0, "Run command '{}' failed".format(cmd) + ntp_uid = ":".join(ntp_uid_output['stdout'].split(':')[2:4]) + force_ntp = "timeout 20 ntpd -gq -u {}".format(ntp_uid) duthost.service(name="ntp", state="stopped") logger.info("Ntp restart in mgmt vrf") @@ -276,8 +284,21 @@ def test_warmboot(self, duthosts, rand_one_dut_hostname, localhost, ptfhost, cre reboot(duthost, localhost, reboot_type="warm") pytest_assert(wait_until(120, 20, 0, duthost.critical_services_fully_started), "Not all critical services are fully started") + # Change default critical services to check services that starts with bootOn timer - duthost.reset_critical_services_tracking_list(['snmp', 'telemetry', 'mgmt-framework']) + # In some images, we have gnmi container only + # In some images, we have telemetry container only + # And in some images, we have both gnmi and telemetry container + critical_services = ['snmp', 'mgmt-framework'] + cmd = "docker ps | grep -w gnmi" + if duthost.shell(cmd, module_ignore_errors=True)['rc'] == 0: + critical_services.append('gnmi') + + cmd = "docker ps | grep -w telemetry" + if duthost.shell(cmd, module_ignore_errors=True)['rc'] == 0: + critical_services.append('telemetry') + duthost.reset_critical_services_tracking_list(critical_services) + pytest_assert(wait_until(180, 20, 0, duthost.critical_services_fully_started), "Not all services which start with bootOn timer are fully started") self.basic_check_after_reboot(duthost, localhost, ptfhost, creds) diff --git a/tests/ntp/test_ntp.py b/tests/ntp/test_ntp.py index 9236f665191..ecbcf68b209 100644 --- a/tests/ntp/test_ntp.py +++ b/tests/ntp/test_ntp.py @@ -39,14 +39,16 @@ def config_long_jump(duthost, enable=False): regex = "s/NTPD_OPTS='-g'/NTPD_OPTS='-x'/" if using_ntpsec: - duthost.command("sed -i '%s' /etc/default/ntpsec" % regex) + duthost.command("sudo sed -i '%s' /etc/default/ntpsec" % regex) else: - duthost.command("sed -i %s /etc/default/ntp" % regex) + duthost.command("sudo sed -i %s /etc/default/ntp" % regex) duthost.service(name='ntp', state='restarted') @pytest.fixture(scope="module") def setup_ntp(ptfhost, duthosts, rand_one_dut_hostname, ptf_use_ipv6): + if ptf_use_ipv6 and not ptfhost.mgmt_ipv6: + pytest.skip("No IPv6 address on PTF host") with _context_for_setup_ntp(ptfhost, duthosts, rand_one_dut_hostname, ptf_use_ipv6) as result: yield result @@ -146,8 +148,8 @@ def test_ntp_long_jump_disabled(duthosts, rand_one_dut_hostname, setup_ntp, setu config_long_jump(duthost, enable=False) - if wait_until(720, 10, 0, check_ntp_status, duthost): - pytest.fail("NTP long jump disable failed") + pytest_assert(wait_until(720, 10, 0, check_ntp_status, duthost), + "NTP long jump disable failed") def run_ntp(duthosts, rand_one_dut_hostname, setup_ntp): diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py index db2b6069f06..e2123188541 100644 --- a/tests/pfcwd/conftest.py +++ b/tests/pfcwd/conftest.py @@ -107,7 +107,7 @@ def setup_pfc_test( duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] mg_facts = duthost.get_extended_minigraph_facts(tbinfo) port_list = list(mg_facts['minigraph_ports'].keys()) - neighbors = conn_graph_facts['device_conn'][duthost.hostname] + neighbors = conn_graph_facts['device_conn'].get(duthost.hostname, {}) dut_eth0_ip = duthost.mgmt_ip vlan_nw = None diff --git a/tests/pfcwd/files/pfcwd_helper.py b/tests/pfcwd/files/pfcwd_helper.py index 4de4a99ce4c..5c8308249c4 100644 --- a/tests/pfcwd/files/pfcwd_helper.py +++ b/tests/pfcwd/files/pfcwd_helper.py @@ -104,21 +104,22 @@ def parse_intf_list(self): if item['peer_addr'] == pfc_wd_test_port_addr: pfc_wd_test_neighbor_addr = item['addr'] - self.test_ports[pfc_wd_test_port] = {'test_neighbor_addr': pfc_wd_test_neighbor_addr, - 'rx_port': [self.pfc_wd_rx_port], - 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, - 'peer_device': self.neighbors[pfc_wd_test_port]['peerdevice'], - 'test_port_id': pfc_wd_test_port_id, - 'rx_port_id': [self.pfc_wd_rx_port_id], - 'test_port_type': 'interface' - } + self.test_ports[pfc_wd_test_port] = { + 'test_neighbor_addr': pfc_wd_test_neighbor_addr, + 'rx_port': [self.pfc_wd_rx_port], + 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'peer_device': self.neighbors.get(pfc_wd_test_port, {}).get('peerdevice', ''), + 'test_port_id': pfc_wd_test_port_id, + 'rx_port_id': [self.pfc_wd_rx_port_id], + 'test_port_type': 'interface' + } # populate info for the first port if first_pair: self.test_ports[self.pfc_wd_rx_port] = { 'test_neighbor_addr': self.pfc_wd_rx_neighbor_addr, 'rx_port': [pfc_wd_test_port], 'rx_neighbor_addr': pfc_wd_test_neighbor_addr, - 'peer_device': self.neighbors[self.pfc_wd_rx_port]['peerdevice'], + 'peer_device': self.neighbors.get(self.pfc_wd_rx_port, {}).get('peerdevice', ''), 'test_port_id': self.pfc_wd_rx_port_id, 'rx_port_id': [pfc_wd_test_port_id], 'test_port_type': 'interface' @@ -173,7 +174,7 @@ def parse_pc_list(self): self.test_ports[port] = {'test_neighbor_addr': pfc_wd_test_neighbor_addr, 'rx_port': self.pfc_wd_rx_port, 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, - 'peer_device': self.neighbors[port]['peerdevice'], + 'peer_device': self.neighbors.get(port, {}).get('peerdevice', ''), 'test_port_id': self.port_idx_info[port], 'rx_port_id': self.pfc_wd_rx_port_id, 'test_portchannel_members': pfc_wd_test_port_id, @@ -185,7 +186,7 @@ def parse_pc_list(self): self.test_ports[port] = {'test_neighbor_addr': self.pfc_wd_rx_neighbor_addr, 'rx_port': pfc_wd_test_port, 'rx_neighbor_addr': pfc_wd_test_neighbor_addr, - 'peer_device': self.neighbors[port]['peerdevice'], + 'peer_device': self.neighbors.get(port, {}).get('peerdevice', ''), 'test_port_id': self.port_idx_info[port], 'rx_port_id': pfc_wd_test_port_id, 'test_portchannel_members': self.pfc_wd_rx_port_id, @@ -222,7 +223,7 @@ def parse_vlan_list(self): temp_ports[item] = {'test_neighbor_addr': self.vlan_nw, 'rx_port': rx_port, 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, - 'peer_device': self.neighbors[item]['peerdevice'], + 'peer_device': self.neighbors.get(item, {}).get('peerdevice', ''), 'test_port_id': self.port_idx_info[item], 'rx_port_id': rx_port_id, 'test_port_type': 'vlan' @@ -267,23 +268,24 @@ def parse_vlan_sub_interface_list(self): if item['peer_addr'] == pfc_wd_test_port_addr: pfc_wd_test_neighbor_addr = item['addr'] - self.test_ports[pfc_wd_test_port] = {'test_neighbor_addr': pfc_wd_test_neighbor_addr, - 'rx_port': [self.pfc_wd_rx_port], - 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, - 'peer_device': self.neighbors[pfc_wd_test_port]['peerdevice'], - 'test_port_id': pfc_wd_test_port_id, - 'rx_port_id': [self.pfc_wd_rx_port_id], - 'rx_port_vlan_id': self.pfc_wd_rx_port_vlan_id, - 'test_port_vlan_id': vlan_id, - 'test_port_type': 'interface' - } + self.test_ports[pfc_wd_test_port] = { + 'test_neighbor_addr': pfc_wd_test_neighbor_addr, + 'rx_port': [self.pfc_wd_rx_port], + 'rx_neighbor_addr': self.pfc_wd_rx_neighbor_addr, + 'peer_device': self.neighbors.get(pfc_wd_test_port, {}).get('peerdevice', ''), + 'test_port_id': pfc_wd_test_port_id, + 'rx_port_id': [self.pfc_wd_rx_port_id], + 'rx_port_vlan_id': self.pfc_wd_rx_port_vlan_id, + 'test_port_vlan_id': vlan_id, + 'test_port_type': 'interface' + } # populate info for the first port if first_pair: self.test_ports[self.pfc_wd_rx_port] = { 'test_neighbor_addr': self.pfc_wd_rx_neighbor_addr, 'rx_port': [pfc_wd_test_port], 'rx_neighbor_addr': pfc_wd_test_neighbor_addr, - 'peer_device': self.neighbors[self.pfc_wd_rx_port]['peerdevice'], + 'peer_device': self.neighbors.get(self.pfc_wd_rx_port, {}).get('peerdevice', ''), 'test_port_id': self.pfc_wd_rx_port_id, 'rx_port_id': [pfc_wd_test_port_id], 'rx_port_vlan_id': vlan_id, diff --git a/tests/pfcwd/test_pfcwd_timer_accuracy.py b/tests/pfcwd/test_pfcwd_timer_accuracy.py index a8418f3b684..81e869494f7 100644 --- a/tests/pfcwd/test_pfcwd_timer_accuracy.py +++ b/tests/pfcwd/test_pfcwd_timer_accuracy.py @@ -218,6 +218,8 @@ def verify_pfcwd_timers(self): self.all_detect_time.sort() self.all_restore_time.sort() logger.info("Verify that real detection time is not greater than configured") + logger.info("all detect time {}".format(self.all_detect_time)) + logger.info("all restore time {}".format(self.all_restore_time)) config_detect_time = self.timers['pfc_wd_detect_time'] + self.timers['pfc_wd_poll_time'] err_msg = ("Real detection time is greater than configured: Real detect time: {} " "Expected: {} (wd_detect_time + wd_poll_time)".format(self.all_detect_time[9], diff --git a/tests/platform_tests/conftest.py b/tests/platform_tests/conftest.py index 74b15476c46..3f322af1276 100644 --- a/tests/platform_tests/conftest.py +++ b/tests/platform_tests/conftest.py @@ -58,7 +58,7 @@ def skip_on_simx(duthosts, rand_one_dut_hostname): @pytest.fixture(scope="module") -def xcvr_skip_list(duthosts, dpu_npu_port_list): +def xcvr_skip_list(duthosts, dpu_npu_port_list, tbinfo): intf_skip_list = {} for dut in duthosts: platform = dut.facts['platform'] @@ -89,6 +89,12 @@ def xcvr_skip_list(duthosts, dpu_npu_port_list): logging.debug('Skipping sfp interfaces: {}'.format(sfp_list)) intf_skip_list[dut.hostname].extend(sfp_list) + # For Mx topo, skip the SFP interfaces because they are admin down + if tbinfo['topo']['name'] == "mx" and hwsku in ["Arista-720DT-G48S4", "Nokia-7215"]: + sfp_list = ['Ethernet48', 'Ethernet49', 'Ethernet50', 'Ethernet51'] + logging.debug('Skipping sfp interfaces: {}'.format(sfp_list)) + intf_skip_list[dut.hostname].extend(sfp_list) + return intf_skip_list diff --git a/tests/platform_tests/test_advanced_reboot.py b/tests/platform_tests/test_advanced_reboot.py index c1bd133a622..4a02ea57ae7 100644 --- a/tests/platform_tests/test_advanced_reboot.py +++ b/tests/platform_tests/test_advanced_reboot.py @@ -29,9 +29,28 @@ logger = logging.getLogger() +def check_if_ssd(duthost): + try: + output = duthost.command("lsblk -d -o NAME,ROTA") + lines = output['stdout'].strip().split('\n') + for line in lines[1:]: + name, rota = line.split() + if name.startswith('sd') and int(rota) == 0: + return True + return False + except Exception as e: + logger.error(f"Error while checking SSD: {e}") + return False + + @pytest.fixture(scope="module", params=[SINGLE_TOR_MODE, DUAL_TOR_MODE]) -def testing_config(request, tbinfo): +def testing_config(request, duthosts, rand_one_dut_hostname, tbinfo): testing_mode = request.param + duthost = duthosts[rand_one_dut_hostname] + is_ssd = check_if_ssd(duthost) + neighbor_type = request.config.getoption("--neighbor_type") + if duthost.facts['platform'] == 'x86_64-arista_7050cx3_32s' and not is_ssd and neighbor_type == 'eos': + pytest.skip("skip advanced reboot tests on 7050 devices without SSD") if 'dualtor' in tbinfo['topo']['name']: if testing_mode == SINGLE_TOR_MODE: pytest.skip("skip SINGLE_TOR_MODE tests on Dual ToR testbeds") diff --git a/tests/platform_tests/test_platform_info.py b/tests/platform_tests/test_platform_info.py index 827dfbe3e02..d458ebc97fc 100644 --- a/tests/platform_tests/test_platform_info.py +++ b/tests/platform_tests/test_platform_info.py @@ -27,6 +27,8 @@ CMD_PLATFORM_FANSTATUS = "show platform fan" CMD_PLATFORM_TEMPER = "show platform temperature" +PDU_WAIT_TIME = 20 + THERMAL_CONTROL_TEST_WAIT_TIME = 65 THERMAL_CONTROL_TEST_CHECK_INTERVAL = 5 @@ -296,7 +298,7 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, logging.info("Turn off outlet {}".format(outlet)) pdu_ctrl.turn_off_outlet(outlet) - time.sleep(10) + time.sleep(PDU_WAIT_TIME) cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: @@ -310,7 +312,7 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, logging.info("Turn on outlet {}".format(outlet)) pdu_ctrl.turn_on_outlet(outlet) - time.sleep(10) + time.sleep(PDU_WAIT_TIME) cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: diff --git a/tests/process_monitoring/test_critical_process_monitoring.py b/tests/process_monitoring/test_critical_process_monitoring.py index dc7774abe75..3be0b3191da 100755 --- a/tests/process_monitoring/test_critical_process_monitoring.py +++ b/tests/process_monitoring/test_critical_process_monitoring.py @@ -486,6 +486,10 @@ def ensure_process_is_running(duthost, container_name, critical_process): Returns: None. """ + if critical_process in ["dhcp6relay", "dhcprelayd"]: + # For dhcp-relay container, the process name in supervisord started 'dhcp-relay: + the process name' + critical_process = "dhcp-relay" + ":" + critical_process + logger.info("Checking whether process '{}' in container '{}' is running..." .format(critical_process, container_name)) program_status, program_pid = get_program_info(duthost, container_name, critical_process) diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py index 24c17fc80a4..c8be0acb984 100644 --- a/tests/ptf_runner.py +++ b/tests/ptf_runner.py @@ -49,7 +49,7 @@ def get_dut_type(host): def ptf_runner(host, testdir, testname, platform_dir=None, params={}, platform="remote", qlen=0, relax=True, debug_level="info", socket_recv_size=None, log_file=None, device_sockets=[], timeout=0, custom_options="", - module_ignore_errors=False, is_python3=False, async_mode=False): + module_ignore_errors=False, is_python3=False, async_mode=False, pdb=False): # Call virtual env ptf for migrated py3 scripts. # ptf will load all scripts under ptftests, it will throw error for py2 scripts. # So move migrated scripts to seperated py3 folder avoid impacting py2 scripts. @@ -98,7 +98,7 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={}, if device_sockets: cmd += " ".join(map(" --device-socket {}".format, device_sockets)) - if timeout: + if timeout and not pdb: cmd += " --test-case-timeout {}".format(int(timeout)) if custom_options: @@ -111,6 +111,15 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={}, host.create_macsec_info() try: + if pdb: + # Write command to file. Use short test name for simpler launch in ptf container. + script_name = "/tmp/" + testname.split(".")[-1] + ".sh" + with open(script_name, 'w') as f: + f.write(cmd) + host.copy(src=script_name, dest="/root/") + print("Run command from ptf: sh {}".format(script_name)) + import pdb + pdb.set_trace() result = host.shell(cmd, chdir="/root", module_ignore_errors=module_ignore_errors, module_async=async_mode) if not async_mode: if log_file: diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 64f8c611f50..1c4eff002a3 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -114,7 +114,7 @@ def dutTestParams(self, duthosts, dut_test_params_qos, tbinfo, get_src_dst_asic_ yield dut_test_params_qos - def runPtfTest(self, ptfhost, testCase='', testParams={}, relax=False): + def runPtfTest(self, ptfhost, testCase='', testParams={}, relax=False, pdb=False): """ Runs QoS SAI test case on PTF host @@ -148,7 +148,8 @@ def runPtfTest(self, ptfhost, testCase='', testParams={}, relax=False): relax=relax, timeout=1200, socket_recv_size=16384, - custom_options=custom_options + custom_options=custom_options, + pdb=pdb ) @@ -1043,7 +1044,7 @@ def dutConfig( # Map port IDs to system port for dnx chassis if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': - sys_key = src_asic.namespace + '|' + iface + sys_key = src_asic.namespace + '|' + iface if src_asic.namespace else iface if sys_key in src_system_port: system_port = src_system_port[sys_key]['system_port_id'] sysPort = {'port': iface, 'system_port': system_port, 'port_type': iface} @@ -1060,7 +1061,7 @@ def dutConfig( if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': for portName in src_mgFacts["minigraph_portchannels"][iface]["members"]: - sys_key = src_asic.namespace + '|' + portName + sys_key = src_asic.namespace + '|' + portName if src_asic.namespace else portName port_Index = src_mgFacts["minigraph_ptf_indices"][portName] if sys_key in src_system_port: system_port = src_system_port[sys_key]['system_port_id'] @@ -1096,7 +1097,7 @@ def dutConfig( # Map port IDs to system port IDs if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': - sys_key = dst_asic.namespace + '|' + iface + sys_key = dst_asic.namespace + '|' + iface if dst_asic.namespace else iface if sys_key in dst_system_port: system_port = dst_system_port[sys_key]['system_port_id'] sysPort = {'port': iface, 'system_port': system_port, 'port_type': iface} @@ -1113,7 +1114,7 @@ def dutConfig( if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': for portName in dst_mgFacts["minigraph_portchannels"][iface]["members"]: - sys_key = dst_asic.namespace + '|' + portName + sys_key = dst_asic.namespace + '|' + portName if dst_asic.namespace else portName port_Index = dst_mgFacts["minigraph_ptf_indices"][portName] if sys_key in dst_system_port: system_port = dst_system_port[sys_key]['system_port_id'] diff --git a/tests/route/test_route_flow_counter.py b/tests/route/test_route_flow_counter.py index 9bab1164ade..823a8257987 100644 --- a/tests/route/test_route_flow_counter.py +++ b/tests/route/test_route_flow_counter.py @@ -10,7 +10,8 @@ allure.logger = logger pytestmark = [ - pytest.mark.topology("any") + pytest.mark.topology("any"), + pytest.mark.device_type('physical') ] test_update_route_pattern_para = [ @@ -214,9 +215,4 @@ def _get_nexthop(self, duthost, ipv6): else: cmd = 'show ip bgp summary' parse_result = duthost.show_and_parse(cmd) - if "neighbor" in parse_result[0]: - return parse_result[0]['neighbor'] - elif "neighbhor" in parse_result[0]: - return parse_result[0]['neighbhor'] - else: - raise ValueError("Unexpected neighbor key in bgp summary output") + return parse_result[0]['neighbhor'] diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 2a309e71052..35e801676ee 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -71,11 +71,16 @@ function validate_parameters() RET=2 fi - if [[ -z ${TOPOLOGY} && -z ${TEST_CASES} ]]; then - echo "Neither TOPOLOGY (-t) nor test case list (-c) is set.." + if [[ -z ${TOPOLOGY} && -z ${TEST_CASES} && -z ${TEST_CASES_FILE} ]]; then + echo "Neither TOPOLOGY (-t) nor test case list (-c) nor test case list file (-F) is set.." RET=3 fi + if [[ ${TEST_CASES} && ${TEST_CASES_FILE} ]]; then + echo "Specified both a test case list (-c) and a test case list file (-F).." + RET=4 + fi + if [[ ${RET} != 0 ]]; then show_help_and_exit ${RET} fi @@ -132,7 +137,7 @@ function setup_test_options() # expanded to matched test scripts by bash. Among the expanded scripts, we may want to skip a few. Then we can # explicitly specify the script to be skipped. ignores=$(python3 -c "print('|'.join('''$SKIP_FOLDERS'''.split()))") - if [[ -z ${TEST_CASES} ]]; then + if [[ -z ${TEST_CASES} && -z ${TEST_CASES_FILE} ]]; then # When TEST_CASES is not specified, find all the possible scripts, ignore the scripts under $SKIP_FOLDERS all_scripts=$(find ./ -name 'test_*.py' | sed s:^./:: | grep -vE "^(${ignores})") ignore_files=("test_pretest.py" "test_posttest.py") @@ -144,6 +149,9 @@ function setup_test_options() fi done else + if [[ ${TEST_CASES_FILE} ]]; then + TEST_CASES="${TEST_CASES} $(cat ${TEST_CASES_FILE} | tr '\n' ' ')" + fi # When TEST_CASES is specified, ignore the scripts under $SKIP_FOLDERS all_scripts="" for test_script in ${TEST_CASES}; do @@ -250,6 +258,7 @@ function run_debug_tests() echo "SKIP_SCRIPTS: ${SKIP_SCRIPTS}" echo "SKIP_FOLDERS: ${SKIP_FOLDERS}" echo "TEST_CASES: ${TEST_CASES}" + echo "TEST_CASES_FILE: ${TEST_CASES_FILE}" echo "TEST_FILTER: ${TEST_FILTER}" echo "TEST_INPUT_ORDER: ${TEST_INPUT_ORDER}" echo "TEST_MAX_FAIL: ${TEST_MAX_FAIL}" @@ -354,7 +363,7 @@ function run_individual_tests() setup_environment -while getopts "h?a:b:c:C:d:e:Ef:i:I:k:l:m:n:oOp:q:rs:S:t:ux" opt; do +while getopts "h?a:b:c:C:d:e:Ef:F:i:I:k:l:m:n:oOp:q:rs:S:t:ux" opt; do case ${opt} in h|\? ) show_help_and_exit 0 @@ -384,6 +393,9 @@ while getopts "h?a:b:c:C:d:e:Ef:i:I:k:l:m:n:oOp:q:rs:S:t:ux" opt; do f ) TESTBED_FILE=${OPTARG} ;; + F ) + TEST_CASES_FILE="${OPTARG}" + ;; i ) INVENTORY=${OPTARG} ;; diff --git a/tests/snappi_tests/multidut/ecn/files/multidut_helper.py b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py index 11fa4f679e4..76c27031316 100644 --- a/tests/snappi_tests/multidut/ecn/files/multidut_helper.py +++ b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py @@ -51,28 +51,32 @@ def run_ecn_test(api, if snappi_extra_params is None: snappi_extra_params = SnappiTestParams() - duthost1 = snappi_extra_params.multi_dut_params.duthost1 + # Traffic flow: + # tx_port (TGEN) --- ingress DUT --- egress DUT --- rx_port (TGEN) + rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] - duthost2 = snappi_extra_params.multi_dut_params.duthost2 + egress_duthost = rx_port['duthost'] + tx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[1] + ingress_duthost = tx_port['duthost'] pytest_assert(testbed_config is not None, 'Failed to get L2/3 testbed config') logger.info("Stopping PFC watchdog") - stop_pfcwd(duthost1, rx_port['asic_value']) - stop_pfcwd(duthost2, tx_port['asic_value']) + stop_pfcwd(egress_duthost, rx_port['asic_value']) + stop_pfcwd(ingress_duthost, tx_port['asic_value']) logger.info("Disabling packet aging if necessary") - disable_packet_aging(duthost1) - disable_packet_aging(duthost2) + disable_packet_aging(egress_duthost) + disable_packet_aging(ingress_duthost) # Configure WRED/ECN thresholds logger.info("Configuring WRED and ECN thresholds") - config_result = config_wred(host_ans=duthost1, + config_result = config_wred(host_ans=egress_duthost, kmin=snappi_extra_params.ecn_params["kmin"], kmax=snappi_extra_params.ecn_params["kmax"], pmax=snappi_extra_params.ecn_params["pmax"]) pytest_assert(config_result is True, 'Failed to configure WRED/ECN at the DUT') - config_result = config_wred(host_ans=duthost2, + config_result = config_wred(host_ans=ingress_duthost, kmin=snappi_extra_params.ecn_params["kmin"], kmax=snappi_extra_params.ecn_params["kmax"], pmax=snappi_extra_params.ecn_params["pmax"]) @@ -80,20 +84,20 @@ def run_ecn_test(api, # Enable ECN marking logger.info("Enabling ECN markings") - pytest_assert(enable_ecn(host_ans=duthost1, prio=lossless_prio), 'Unable to enable ecn') - pytest_assert(enable_ecn(host_ans=duthost2, prio=lossless_prio), 'Unable to enable ecn') + pytest_assert(enable_ecn(host_ans=egress_duthost, prio=lossless_prio), 'Unable to enable ecn') + pytest_assert(enable_ecn(host_ans=ingress_duthost, prio=lossless_prio), 'Unable to enable ecn') - config_result = config_ingress_lossless_buffer_alpha(host_ans=duthost1, + config_result = config_ingress_lossless_buffer_alpha(host_ans=egress_duthost, alpha_log2=3) pytest_assert(config_result is True, 'Failed to configure PFC threshold to 8') - config_result = config_ingress_lossless_buffer_alpha(host_ans=duthost2, + config_result = config_ingress_lossless_buffer_alpha(host_ans=ingress_duthost, alpha_log2=3) pytest_assert(config_result is True, 'Failed to configure PFC threshold to 8') # Get the ID of the port to test - port_id = get_dut_port_id(dut_hostname=duthost1.hostname, + port_id = get_dut_port_id(dut_hostname=egress_duthost.hostname, dut_port=dut_port, conn_data=conn_data, fanout_data=fanout_data) @@ -166,7 +170,7 @@ def run_ecn_test(api, capture_name=snappi_extra_params.packet_capture_file) logger.info("Running traffic") - run_traffic(duthost=duthost1, + run_traffic(duthost=egress_duthost, api=api, config=testbed_config, data_flow_names=data_flow_names, diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py index 5d4cab952b6..8e6d2503223 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_basic_helper.py @@ -11,6 +11,7 @@ from tests.common.snappi_tests.port import select_ports, select_tx_port # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp # noqa: F401 from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) @@ -56,35 +57,39 @@ def run_pfcwd_basic_test(api, if snappi_extra_params is None: snappi_extra_params = SnappiTestParams() - duthost1 = snappi_extra_params.multi_dut_params.duthost1 + # Traffic flow: + # tx_port (TGEN) --- ingress DUT --- egress DUT --- rx_port (TGEN) + rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] - duthost2 = snappi_extra_params.multi_dut_params.duthost2 + egress_duthost = rx_port['duthost'] + tx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[1] + ingress_duthost = tx_port["duthost"] pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config') - if (duthost1.is_multi_asic): - enable_packet_aging(duthost1, rx_port['asic_value']) - enable_packet_aging(duthost2, tx_port['asic_value']) - start_pfcwd(duthost1, rx_port['asic_value']) - start_pfcwd(duthost2, tx_port['asic_value']) + if (egress_duthost.is_multi_asic): + enable_packet_aging(egress_duthost, rx_port['asic_value']) + enable_packet_aging(ingress_duthost, tx_port['asic_value']) + start_pfcwd(egress_duthost, rx_port['asic_value']) + start_pfcwd(ingress_duthost, tx_port['asic_value']) else: - enable_packet_aging(duthost1) - enable_packet_aging(duthost2) - start_pfcwd(duthost1) - start_pfcwd(duthost2) + enable_packet_aging(egress_duthost) + enable_packet_aging(ingress_duthost) + start_pfcwd(egress_duthost) + start_pfcwd(ingress_duthost) ini_stats = {} for prio in prio_list: - ini_stats.update(get_stats(duthost1, rx_port['peer_port'], prio)) + ini_stats.update(get_stats(egress_duthost, rx_port['peer_port'], prio)) # Set appropriate pfcwd loss deviation - these values are based on empirical testing - DEVIATION = 0.35 if duthost1.facts['asic_type'] in ["broadcom"] or \ - duthost2.facts['asic_type'] in ["broadcom"] else 0.3 + DEVIATION = 0.35 if egress_duthost.facts['asic_type'] in ["broadcom"] or \ + ingress_duthost.facts['asic_type'] in ["broadcom"] else 0.3 - poll_interval_sec = get_pfcwd_poll_interval(duthost1, rx_port['asic_value']) / 1000.0 - detect_time_sec = get_pfcwd_detect_time(host_ans=duthost1, intf=dut_port, + poll_interval_sec = get_pfcwd_poll_interval(egress_duthost, rx_port['asic_value']) / 1000.0 + detect_time_sec = get_pfcwd_detect_time(host_ans=egress_duthost, intf=dut_port, asic_value=rx_port['asic_value']) / 1000.0 - restore_time_sec = get_pfcwd_restore_time(host_ans=duthost1, intf=dut_port, + restore_time_sec = get_pfcwd_restore_time(host_ans=egress_duthost, intf=dut_port, asic_value=rx_port['asic_value']) / 1000.0 """ Warm up traffic is initially sent before any other traffic to prevent pfcwd @@ -148,7 +153,7 @@ def run_pfcwd_basic_test(api, fin_stats = {} for prio in prio_list: - fin_stats.update(get_stats(duthost1, rx_port['peer_port'], prio)) + fin_stats.update(get_stats(egress_duthost, rx_port['peer_port'], prio)) loss_packets = 0 for k in fin_stats.keys(): @@ -302,7 +307,10 @@ def __gen_traffic(testbed_config, eth, ipv4 = data_flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = prio + else: + eth.pfc_queue.value = pfcQueueValueDict[prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py index 7c97502eda1..ce45cd30240 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_burst_storm_helper.py @@ -9,6 +9,7 @@ from tests.common.snappi_tests.port import select_ports, select_tx_port # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) @@ -51,22 +52,27 @@ def run_pfcwd_burst_storm_test(api, if snappi_extra_params is None: snappi_extra_params = SnappiTestParams() - duthost1 = snappi_extra_params.multi_dut_params.duthost1 + # Traffic flow: + # tx_port (TGEN) --- ingress DUT --- egress DUT --- rx_port (TGEN) + rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] rx_port_id = rx_port["port_id"] - duthost2 = snappi_extra_params.multi_dut_params.duthost2 + egress_duthost = rx_port['duthost'] + tx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[1] tx_port_id = tx_port["port_id"] + ingress_duthost = tx_port['duthost'] pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config') - start_pfcwd(duthost1, rx_port['asic_value']) - enable_packet_aging(duthost1) - start_pfcwd(duthost2, tx_port['asic_value']) - enable_packet_aging(duthost2) - poll_interval_sec = get_pfcwd_poll_interval(duthost1, rx_port['asic_value']) / 1000.0 - detect_time_sec = get_pfcwd_detect_time(host_ans=duthost1, intf=dut_port, asic_value=rx_port['asic_value']) / 1000.0 # noqa: E501 - restore_time_sec = get_pfcwd_restore_time(host_ans=duthost1, intf=dut_port, asic_value=rx_port['asic_value']) / 1000.0 # noqa: E501 + start_pfcwd(egress_duthost, rx_port['asic_value']) + enable_packet_aging(egress_duthost) + start_pfcwd(ingress_duthost, tx_port['asic_value']) + enable_packet_aging(ingress_duthost) + + poll_interval_sec = get_pfcwd_poll_interval(egress_duthost, rx_port['asic_value']) / 1000.0 + detect_time_sec = get_pfcwd_detect_time(host_ans=egress_duthost, intf=rx_port['peer_port'], asic_value=rx_port['asic_value']) / 1000.0 # noqa: E501 + restore_time_sec = get_pfcwd_restore_time(host_ans=egress_duthost, intf=rx_port['peer_port'], asic_value=rx_port['asic_value']) / 1000.0 # noqa: E501 burst_cycle_sec = poll_interval_sec + detect_time_sec + restore_time_sec + 0.1 data_flow_dur_sec = ceil(burst_cycle_sec * BURST_EVENTS) pause_flow_dur_sec = poll_interval_sec * 0.5 @@ -108,7 +114,7 @@ def run_pfcwd_burst_storm_test(api, __verify_results(rows=flow_stats, data_flow_prefix=DATA_FLOW_PREFIX, pause_flow_prefix=PAUSE_FLOW_PREFIX, - duthosts=[duthost1, duthost2]) + duthosts=[egress_duthost, ingress_duthost]) def __gen_traffic(testbed_config, @@ -177,7 +183,10 @@ def __gen_traffic(testbed_config, eth, ipv4 = data_flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = prio + else: + eth.pfc_queue.value = pfcQueueValueDict[prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py index 737a65f179e..059a16d5a74 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_multi_node_helper.py @@ -11,6 +11,7 @@ from tests.common.snappi_tests.port import select_ports # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp # noqa: F401 from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) @@ -67,25 +68,37 @@ def run_pfcwd_multi_node_test(api, if snappi_extra_params is None: snappi_extra_params = SnappiTestParams() - duthost1 = snappi_extra_params.multi_dut_params.duthost1 + # Traffic flow: + # tx_port (TGEN) --- ingress DUT --- egress DUT --- rx_port (TGEN) + + # initialize the (duthost, port) set. + # The final list will have all the asics which needs to be configured for PFC + pfcwd_to_be_configured = set() + rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] rx_port_id_list = [rx_port["port_id"]] - duthost2 = snappi_extra_params.multi_dut_params.duthost2 + egress_duthost = rx_port['duthost'] + # Add the port to the set of ports to be configured for PFC + pfcwd_to_be_configured.add((egress_duthost, rx_port['asic_value'])) + tx_port = [snappi_extra_params.multi_dut_params.multi_dut_ports[1], snappi_extra_params.multi_dut_params.multi_dut_ports[2]] tx_port_id_list = [tx_port[0]["port_id"], tx_port[1]["port_id"]] + # add ingress DUT into the set + pfcwd_to_be_configured.add((tx_port[0]['duthost'], tx_port[0]['asic_value'])) + pfcwd_to_be_configured.add((tx_port[1]['duthost'], tx_port[1]['asic_value'])) pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config') num_ports = len(port_config_list) pytest_require(num_ports >= 3, "This test requires at least 3 ports") - start_pfcwd(duthost1, rx_port['asic_value']) - enable_packet_aging(duthost1) - start_pfcwd(duthost2, tx_port[0]['asic_value']) - enable_packet_aging(duthost2) + # Enable PFC watchdog on the rx side and tx side of the DUT without duplication. + for duthost, asic in pfcwd_to_be_configured: + start_pfcwd(duthost, asic) + enable_packet_aging(duthost) - poll_interval_sec = get_pfcwd_poll_interval(duthost1, rx_port['asic_value']) / 1000.0 - detect_time_sec = get_pfcwd_detect_time(host_ans=duthost1, intf=dut_port, + poll_interval_sec = get_pfcwd_poll_interval(egress_duthost, rx_port['asic_value']) / 1000.0 + detect_time_sec = get_pfcwd_detect_time(host_ans=egress_duthost, intf=rx_port['peer_port'], asic_value=rx_port['asic_value']) / 1000.0 if trigger_pfcwd: @@ -399,7 +412,10 @@ def __gen_data_flow(testbed_config, eth, ipv4 = flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = flow_prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = flow_prio + else: + eth.pfc_queue.value = pfcQueueValueDict[flow_prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py index 24a91805122..efc0310b5b2 100644 --- a/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py +++ b/tests/snappi_tests/multidut/pfcwd/files/pfcwd_multidut_runtime_traffic_helper.py @@ -7,6 +7,7 @@ from tests.common.snappi_tests.port import select_ports, select_tx_port # noqa: F401 from tests.common.snappi_tests.snappi_helpers import wait_for_arp from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict DATA_FLOW_NAME = "Data Flow" DATA_PKT_SIZE = 1024 @@ -48,16 +49,20 @@ def run_pfcwd_runtime_traffic_test(api, if snappi_extra_params is None: snappi_extra_params = SnappiTestParams() - duthost1 = snappi_extra_params.duthost1 + # Traffic flow: + # tx_port (TGEN) --- ingress DUT --- egress DUT --- rx_port (TGEN) + rx_port = snappi_extra_params.rx_port - duthost2 = snappi_extra_params.duthost2 - tx_port = snappi_extra_params.tx_port + egress_duthost = rx_port['duthost'] rx_port_id = snappi_extra_params.rx_port_id + + tx_port = snappi_extra_params.tx_port + ingress_duthost = tx_port['duthost'] tx_port_id = snappi_extra_params.tx_port_id pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config') - stop_pfcwd(duthost1, rx_port['asic_value']) - stop_pfcwd(duthost2, tx_port['asic_value']) + stop_pfcwd(egress_duthost, rx_port['asic_value']) + stop_pfcwd(ingress_duthost, tx_port['asic_value']) __gen_traffic(testbed_config=testbed_config, port_config_list=port_config_list, @@ -75,7 +80,7 @@ def run_pfcwd_runtime_traffic_test(api, flow_stats = __run_traffic(api=api, config=testbed_config, - duthost=duthost1, + duthost=egress_duthost, port=rx_port, all_flow_names=all_flow_names, pfcwd_start_delay_sec=PFCWD_START_DELAY_SEC, @@ -142,7 +147,10 @@ def __gen_traffic(testbed_config, eth, ipv4 = data_flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = prio + else: + eth.pfc_queue.value = pfcQueueValueDict[prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py index cd868d31f1c..da163989f58 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_basic_helper.py @@ -11,6 +11,7 @@ enable_packet_aging, start_pfcwd, sec_to_nanosec from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) @@ -255,7 +256,10 @@ def __gen_traffic(testbed_config, eth, ipv4 = data_flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = prio + else: + eth.pfc_queue.value = pfcQueueValueDict[prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py index e00eaa0aee0..d6f9a18f2d2 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_burst_storm_helper.py @@ -9,6 +9,7 @@ enable_packet_aging, start_pfcwd, sec_to_nanosec from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) @@ -186,7 +187,10 @@ def __gen_traffic(testbed_config, eth, ipv4 = data_flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = prio + else: + eth.pfc_queue.value = pfcQueueValueDict[prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py index c923ea8406c..4cdfe5b7228 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_multi_node_helper.py @@ -10,6 +10,7 @@ start_pfcwd, enable_packet_aging, get_pfcwd_poll_interval, get_pfcwd_detect_time, sec_to_nanosec from tests.common.snappi_tests.port import select_ports from tests.common.snappi_tests.snappi_helpers import wait_for_arp +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict logger = logging.getLogger(__name__) @@ -405,7 +406,10 @@ def __gen_data_flow(testbed_config, eth, ipv4 = flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = flow_prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = flow_prio + else: + eth.pfc_queue.value = pfcQueueValueDict[flow_prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py b/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py index 6d7bfef6d98..14452d6cc41 100644 --- a/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py +++ b/tests/snappi_tests/pfcwd/files/pfcwd_runtime_traffic_helper.py @@ -6,6 +6,7 @@ from tests.common.snappi_tests.common_helpers import start_pfcwd, stop_pfcwd, sec_to_nanosec from tests.common.snappi_tests.port import select_ports, select_tx_port from tests.common.snappi_tests.snappi_helpers import wait_for_arp +from tests.snappi_tests.variables import pfcQueueGroupSize, pfcQueueValueDict DATA_FLOW_NAME = "Data Flow" WARM_UP_TRAFFIC_NAME = "Warm Up Traffic" @@ -166,7 +167,10 @@ def __gen_traffic(testbed_config, eth, ipv4 = data_flow.packet.ethernet().ipv4() eth.src.value = tx_mac eth.dst.value = rx_mac - eth.pfc_queue.value = prio + if pfcQueueGroupSize == 8: + eth.pfc_queue.value = prio + else: + eth.pfc_queue.value = pfcQueueValueDict[prio] ipv4.src.value = tx_port_config.ip ipv4.dst.value = rx_port_config.ip diff --git a/tests/snmp/test_snmp_psu.py b/tests/snmp/test_snmp_psu.py index ca3f87cfab7..05d2ce4942e 100644 --- a/tests/snmp/test_snmp_psu.py +++ b/tests/snmp/test_snmp_psu.py @@ -1,4 +1,5 @@ import pytest +import logging from tests.common.helpers.assertions import pytest_assert from tests.common.helpers.snmp_helpers import get_snmp_facts @@ -21,7 +22,13 @@ def test_snmp_numpsu(duthosts, enum_supervisor_dut_hostname, localhost, creds_al snmp_facts = get_snmp_facts( localhost, host=hostip, version="v2c", community=creds_all_duts[duthost.hostname]["snmp_rocommunity"], wait=True)['ansible_facts'] - res = duthost.shell("psuutil numpsus") + res = duthost.shell("psuutil numpsus", module_ignore_errors=True) + + # For kvm testbed, we will get the expected return code 2 because of no chassis + if duthost.facts["asic_type"] == "vs" and res['rc'] == 2: + logging.info("Get expected return code 2 on kvm testbed.") + return + assert int(res['rc']) == 0, "Failed to get number of PSUs" numpsus = int(res['stdout']) @@ -40,6 +47,11 @@ def test_snmp_psu_status(duthosts, enum_supervisor_dut_hostname, localhost, cred psus_on = 0 msg = "Unexpected operstatus results {} != {} for PSU {}" + # For kvm testbed, there is no snmp psu info + if duthost.facts["asic_type"] == "vs": + logging.info("No snmp psu info on kvm testbed.") + return + for psu_indx, operstatus in list(snmp_facts['snmp_psu'].items()): get_presence = duthost.shell( "redis-cli -n 6 hget 'PSU_INFO|PSU {}' presence".format(psu_indx)) diff --git a/tests/stress/test_stress_routes.py b/tests/stress/test_stress_routes.py index bba52369335..0dfd5a27b4b 100644 --- a/tests/stress/test_stress_routes.py +++ b/tests/stress/test_stress_routes.py @@ -21,7 +21,8 @@ def announce_withdraw_routes(duthost, namespace, localhost, ptf_ip, topo_name): logger.info("announce ipv4 and ipv6 routes") - localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="announce", path="../ansible/") + localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="announce", path="../ansible/", + log_path="logs") wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "outq") is True) @@ -30,7 +31,8 @@ def announce_withdraw_routes(duthost, namespace, localhost, ptf_ip, topo_name): sleep_to_wait(CRM_POLLING_INTERVAL * 5) logger.info("withdraw ipv4 and ipv6 routes") - localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="withdraw", path="../ansible/") + localhost.announce_routes(topo_name=topo_name, ptf_ip=ptf_ip, action="withdraw", path="../ansible/", + log_path="logs") wait_until(MAX_WAIT_TIME, CRM_POLLING_INTERVAL, 0, lambda: check_queue_status(duthost, "inq") is True) sleep_to_wait(CRM_POLLING_INTERVAL * 5) diff --git a/tests/stress/utils.py b/tests/stress/utils.py index 4a37897e54b..f34c6e0d4b0 100644 --- a/tests/stress/utils.py +++ b/tests/stress/utils.py @@ -22,15 +22,8 @@ def check_queue_status(duthost, queue): bgp_neighbors = duthost.show_and_parse(SHOW_BGP_SUMMARY_CMD) bgp_neighbor_addr_regex = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}") for neighbor in bgp_neighbors: - if "neighbor" in neighbor: - if bgp_neighbor_addr_regex.match(neighbor["neighbor"]) and int(neighbor[queue]) != 0: - return False - elif "neighbhor" in neighbor: - if bgp_neighbor_addr_regex.match(neighbor["neighbhor"]) and int(neighbor[queue]) != 0: - return False - else: - raise ValueError("Unexpected neighbor key in bgp summary output") - + if bgp_neighbor_addr_regex.match(neighbor["neighbhor"]) and int(neighbor[queue]) != 0: + return False return True diff --git a/tests/syslog/test_logrotate.py b/tests/syslog/test_logrotate.py index d3b9f5f7971..1b8923b1071 100644 --- a/tests/syslog/test_logrotate.py +++ b/tests/syslog/test_logrotate.py @@ -5,6 +5,7 @@ from tests.common.plugins.loganalyzer.loganalyzer import DisableLogrotateCronContext from tests.common import config_reload from tests.common.helpers.assertions import pytest_assert +from tests.common.utilities import wait_until from tests.conftest import tbinfo logger = logging.getLogger(__name__) @@ -41,9 +42,6 @@ def backup_syslog(rand_selected_dut): logger.info('Recover syslog file to syslog') duthost.shell('sudo mv /var/log/syslog_bk /var/log/syslog') - logger.info('Remove temp file /var/log/syslog.1') - duthost.shell('sudo rm -f /var/log/syslog.1') - logger.info('Restart rsyslog service') duthost.shell('sudo service rsyslog restart') @@ -141,7 +139,7 @@ def multiply_with_unit(logrotate_threshold, num): return str(int(logrotate_threshold[:-1]) * num) + logrotate_threshold[-1] -def validate_logrotate_function(duthost, logrotate_threshold): +def validate_logrotate_function(duthost, logrotate_threshold, small_size): """ Validate logrotate function :param duthost: DUT host object @@ -154,7 +152,10 @@ def validate_logrotate_function(duthost, logrotate_threshold): logrotate_threshold)): syslog_number_origin = get_syslog_file_count(duthost) logger.info('There are {} syslog gz files'.format(syslog_number_origin)) - create_temp_syslog_file(duthost, multiply_with_unit(logrotate_threshold, 0.9)) + if small_size: + create_temp_syslog_file(duthost, multiply_with_unit(logrotate_threshold, 0.5)) + else: + create_temp_syslog_file(duthost, multiply_with_unit(logrotate_threshold, 0.9)) run_logrotate(duthost) syslog_number_no_rotate = get_syslog_file_count(duthost) logger.info('There are {} syslog gz files after running logrotate'.format(syslog_number_no_rotate)) @@ -208,7 +209,7 @@ def test_logrotate_normal_size(rand_selected_dut): if get_var_log_size(duthost) < 200 * 1024: pytest.skip('{} size is lower than 200MB, skip this test'.format(LOG_FOLDER)) rotate_large_threshold = get_threshold_based_on_memory(duthost) - validate_logrotate_function(duthost, rotate_large_threshold) + validate_logrotate_function(duthost, rotate_large_threshold, False) @pytest.mark.disable_loganalyzer @@ -220,7 +221,7 @@ def test_logrotate_small_size(rand_selected_dut, simulate_small_var_log_partitio Execute config reload to active the mount Stop logrotate cron job, make sure no logrotate executes during this test Check current syslog.x file number and save it - Create a temp file with size of rotate_size * 90%, and rename it as 'syslog', run logrotate command + Create a temp file with size of rotate_size * 50%, and rename it as 'syslog', run logrotate command There would be no logrotate happens - by checking the 'syslog.x' file number not increased Create a temp file with size of rotate_size * 110%, and rename it as 'syslog', run logrotate command There would be logrotate happens - by checking the 'syslog.x' file number increased by 1 @@ -231,11 +232,10 @@ def test_logrotate_small_size(rand_selected_dut, simulate_small_var_log_partitio """ duthost = rand_selected_dut rotate_small_threshold = get_threshold_based_on_memory(duthost) - validate_logrotate_function(duthost, rotate_small_threshold) + validate_logrotate_function(duthost, rotate_small_threshold, True) def get_pending_entries(duthost, ignore_list=None): - # grep returns error code when there is no match, add 'true' so the ansible module doesn't fail pending_entries = set(duthost.shell('sonic-db-cli APPL_DB keys "_*"')['stdout'].split()) if ignore_list: @@ -244,7 +244,9 @@ def get_pending_entries(duthost, ignore_list=None): pending_entries.remove(entry) except ValueError: continue - return list(pending_entries) + pending_entries = list(pending_entries) + logger.info('Pending entries in APPL_DB: {}'.format(pending_entries)) + return pending_entries def clear_pending_entries(duthost): @@ -255,6 +257,10 @@ def clear_pending_entries(duthost): duthost.shell('sonic-db-cli APPL_DB publish "NEIGH_TABLE_CHANNEL" ""') +def no_pending_entries(duthost, ignore_list=None): + return not bool(get_pending_entries(duthost, ignore_list=ignore_list)) + + @pytest.fixture def orch_logrotate_setup(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_rand_one_frontend_asic_index): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] @@ -306,5 +312,7 @@ def test_orchagent_logrotate(orch_logrotate_setup, duthosts, enum_rand_one_per_h else: duthost.shell('sudo ip neigh add {} lladdr {} dev {}'.format(FAKE_IP, FAKE_MAC, target_port)) duthost.control_process('orchagent', pause=False, namespace=asic_id) - pending_entries = get_pending_entries(duthost, ignore_list=ignore_entries) - pytest_assert(not pending_entries, "Found pending entries in APPL_DB: {}".format(pending_entries)) + pytest_assert( + wait_until(30, 1, 0, no_pending_entries, duthost, ignore_list=ignore_entries), + "Found pending entries in APPL_DB" + ) diff --git a/tests/system_health/test_system_health.py b/tests/system_health/test_system_health.py index 260156cc9b7..0019a8893a7 100644 --- a/tests/system_health/test_system_health.py +++ b/tests/system_health/test_system_health.py @@ -15,7 +15,8 @@ from tests.common.fixtures.duthost_utils import is_support_mock_asic # noqa F401 pytestmark = [ - pytest.mark.topology('any') + pytest.mark.topology('any'), + pytest.mark.device_type('physical') ] logger = logging.getLogger(__name__) diff --git a/tests/telemetry/conftest.py b/tests/telemetry/conftest.py index 0574c09f242..8a66b6510b6 100644 --- a/tests/telemetry/conftest.py +++ b/tests/telemetry/conftest.py @@ -171,6 +171,10 @@ def test_eventd_healthy(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, set duthost = duthosts[enum_rand_one_per_hwsku_hostname] + features_dict, succeeded = duthost.get_feature_status() + if succeeded and ('eventd' not in features_dict or features_dict['eventd'] == 'disabled'): + pytest.skip("eventd is disabled on the system") + do_init(duthost) module = __import__("eventd_events") diff --git a/tests/telemetry/events/event_utils.py b/tests/telemetry/events/event_utils.py index 2299ca52d82..d29701f5698 100644 --- a/tests/telemetry/events/event_utils.py +++ b/tests/telemetry/events/event_utils.py @@ -1,5 +1,6 @@ import logging import os +import pytest import json import re @@ -101,6 +102,10 @@ def verify_received_output(received_file, N): def restart_eventd(duthost): + features_dict, succeeded = duthost.get_feature_status() + if succeeded and ('eventd' not in features_dict or features_dict['eventd'] == 'disabled'): + pytest.skip("eventd is disabled on the system") + duthost.shell("systemctl reset-failed eventd") duthost.service(name="eventd", state="restarted") pytest_assert(wait_until(100, 10, 0, duthost.is_service_fully_started, "eventd"), "eventd not started") diff --git a/tests/test_posttest.py b/tests/test_posttest.py index ac43eee643e..d2c1d44c7b7 100644 --- a/tests/test_posttest.py +++ b/tests/test_posttest.py @@ -1,3 +1,4 @@ +import os import pytest import logging import time @@ -88,3 +89,11 @@ def test_enable_startup_tsa_tsb_service(duthosts, localhost): else: logger.info("{} file does not exist in the specified path on dut {}". format(backup_tsa_tsb_file_path, duthost.hostname)) + + +def test_collect_ptf_logs(ptfhost): + log_files = ptfhost.shell('ls /tmp/*.log')['stdout'].split() + if not os.path.exists('logs/ptf'): + os.makedirs('logs/ptf') + for log_file in log_files: + ptfhost.fetch(src=log_file, dest='logs/ptf', fail_on_missing=False) diff --git a/tests/testsuites/platform_sanity b/tests/testsuites/platform_sanity new file mode 100644 index 00000000000..6be9148be78 --- /dev/null +++ b/tests/testsuites/platform_sanity @@ -0,0 +1,21 @@ +acl/test_acl.py::TestBasicAcl +decap/test_decap.py +fib/test_fib.py::test_basic_fib +pfcwd/test_pfc_config.py::TestPfcConfig +pfcwd/test_pfcwd_function.py::TestPfcwdFunc::test_pfcwd_actions +qos/test_qos_sai.py::TestQosSai::testQosSaiPfcXoffLimit +qos/test_qos_sai.py::TestQosSai::testQosSaiPfcXonLimit +qos/test_qos_sai.py::TestQosSai::testQosSaiLossyQueue +qos/test_qos_sai.py::TestQosSai::testQosSaiDscpQueueMapping +qos/test_qos_sai.py::TestQosSai::testQosSaiDot1pQueueMapping +qos/test_qos_sai.py::TestQosSai::testQosSaiDwrr +qos/test_qos_sai.py::TestQosSai::testQosSaiQSharedWatermark +voq/test_fabric_reach.py +voq/test_voq_fabric_status_all.py +voq/test_voq_init.py +voq/test_voq_ipfwd.py::TestTableValidation +voq/test_voq_ipfwd.py::TestVoqIPFwd +voq/test_voq_ipfwd.py::TestFPLinkFlap::test_front_panel_linkflap_port +voq/test_voq_nbr.py::test_neighbor_clear_one +voq/test_voq_nbr.py::test_neighbor_hw_mac_change +voq/test_voq_nbr.py::TestNeighborLinkFlap diff --git a/tests/vlan/test_vlan_ping.py b/tests/vlan/test_vlan_ping.py index f2f5c5a93c1..281a31738ca 100644 --- a/tests/vlan/test_vlan_ping.py +++ b/tests/vlan/test_vlan_ping.py @@ -3,8 +3,13 @@ import ipaddress import logging import ptf.testutils as testutils +import ptf.packet as scapy +from ptf.mask import Mask import six +from ipaddress import ip_address, IPv4Address from tests.common.helpers.assertions import pytest_assert as py_assert +from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 +from tests.common.dualtor.dual_tor_utils import lower_tor_host # noqa F401 logger = logging.getLogger(__name__) @@ -47,7 +52,7 @@ def static_neighbor_entry(duthost, dic, oper, ip_version="both"): @pytest.fixture(scope='module') -def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo): +def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo, lower_tor_host): # noqa F811 """ Setup: Collecting vm_host_info, ptfhost_info Teardown: Removing all added ipv4 and ipv6 neighbors @@ -69,11 +74,28 @@ def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo): else: vm_ip_with_prefix = six.ensure_text(vm_info['conf']['interfaces']['Port-Channel1']['ipv4']) output = vm_info['host'].command("ip addr show dev po1") + # in case of lower tor host we need to use the next portchannel + if "dualtor-aa" in tbinfo["topo"]["name"] and rand_one_dut_hostname == lower_tor_host.hostname: + vm_ip_with_prefix = six.ensure_text(vm_info['conf']['interfaces']['Port-Channel2']['ipv4']) + output = vm_info['host'].command("ip addr show dev po2") vm_host_info["mac"] = output['stdout_lines'][1].split()[1] vm_ip_intf = ipaddress.IPv4Interface(vm_ip_with_prefix).ip vm_host_info["ipv4"] = vm_ip_intf duthost = duthosts[rand_one_dut_hostname] mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + if "dualtor-aa" in tbinfo["topo"]["name"]: + idx = duthosts.index(duthost) + unselected_duthost = duthosts[1 - idx] + unslctd_mg_facts = unselected_duthost.minigraph_facts(host=unselected_duthost.hostname)['ansible_facts'] + unslctd_mg_facts['mg_ptf_idx'] = unslctd_mg_facts['minigraph_port_indices'].copy() + try: + map = tbinfo['topo']['ptf_map'][str(1 - idx)] + if map: + for port, index in list(unslctd_mg_facts['minigraph_port_indices'].items()): + if str(index) in map: + unslctd_mg_facts['mg_ptf_idx'][port] = map[str(index)] + except (ValueError, KeyError): + pass my_cfg_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] ptfhost_info = {} ip4 = None @@ -88,14 +110,24 @@ def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo): vm_host_info['port_index_list'] = [mg_facts['minigraph_ptf_indices'][intf['attachto']]] break else: - for intf in mg_facts['minigraph_portchannel_interfaces']: - if intf['peer_addr'] == str(vm_host_info['ipv4']): - portchannel = intf['attachto'] - ifaces_list = [] - for iface in mg_facts['minigraph_portchannels'][portchannel]['members']: - ifaces_list.append(mg_facts['minigraph_ptf_indices'][iface]) - vm_host_info['port_index_list'] = ifaces_list + ifaces_list = [] + # UL pkt may take any of the tor in case of dualtor-aa + if "dualtor-aa" in tbinfo["topo"]["name"]: + for intf in mg_facts['minigraph_portchannel_interfaces']: + if type(ip_address(intf['peer_addr'])) is IPv4Address: + portchannel = intf['attachto'] + for iface in mg_facts['minigraph_portchannels'][portchannel]['members']: + ifaces_list.append(mg_facts['minigraph_ptf_indices'][iface]) + ifaces_list.append(unslctd_mg_facts['mg_ptf_idx'][iface]) + ifaces_list = list(dict.fromkeys(ifaces_list)) + else: + for intf in mg_facts['minigraph_portchannel_interfaces']: + if intf['peer_addr'] == str(vm_host_info['ipv4']): + portchannel = intf['attachto'] + for iface in mg_facts['minigraph_portchannels'][portchannel]['members']: + ifaces_list.append(mg_facts['minigraph_ptf_indices'][iface]) break + vm_host_info['port_index_list'] = ifaces_list break # getting the ipv4, ipv6 and vlan id of a vlan in DUT with 2 or more vlan members @@ -143,21 +175,38 @@ def vlan_ping_setup(duthosts, rand_one_dut_hostname, ptfhost, nbrhosts, tbinfo): yield vm_host_info, ptfhost_info logger.info("Removing all added ipv4 and ipv6 neighbors") - neigh_list = duthost.shell("sudo ip neigh | grep PERMANENT")["stdout_lines"] - for neigh in neigh_list: - cmd = neigh.split(" PERMANENT")[0] - duthost.shell("sudo ip neigh del {}".format(cmd)) - - -def verify_icmp_packet(dut_mac, src_port, dst_port, ptfadapter): - pkt = testutils.simple_icmp_packet(eth_src=str(src_port['mac']), - eth_dst=str(dut_mac), - ip_src=str(src_port['ipv4']), - ip_dst=str(dst_port['ipv4']), ip_ttl=64) - exptd_pkt = testutils.simple_icmp_packet(eth_src=str(dut_mac), - eth_dst=str(dst_port['mac']), - ip_src=str(src_port['ipv4']), - ip_dst=str(dst_port['ipv4']), ip_ttl=63) + duthost.shell("sudo ip neigh flush nud permanent") + + +def verify_icmp_packet(dut_mac, src_port, dst_port, ptfadapter, tbinfo, vlan_mac=None, dtor_ul=False, dtor_dl=False): + if dtor_ul is True: + # use vlan int mac in case of dualtor UL test pkt + pkt = testutils.simple_icmp_packet(eth_src=str(src_port['mac']), + eth_dst=str(vlan_mac), + ip_src=str(src_port['ipv4']), + ip_dst=str(dst_port['ipv4']), ip_ttl=64) + else: + # use dut mac addr for all other test pkts + pkt = testutils.simple_icmp_packet(eth_src=str(src_port['mac']), + eth_dst=str(dut_mac), + ip_src=str(src_port['ipv4']), + ip_dst=str(dst_port['ipv4']), ip_ttl=64) + if dtor_dl is True: + # expect vlan int mac as src mac in dualtor DL test pkt + exptd_pkt = testutils.simple_icmp_packet(eth_src=str(vlan_mac), + eth_dst=str(dst_port['mac']), + ip_src=str(src_port['ipv4']), + ip_dst=str(dst_port['ipv4']), ip_ttl=63) + else: + # expect dut mac as src mac for non dualtor DL test pkt + exptd_pkt = testutils.simple_icmp_packet(eth_src=str(dut_mac), + eth_dst=str(dst_port['mac']), + ip_src=str(src_port['ipv4']), + ip_dst=str(dst_port['ipv4']), ip_ttl=63) + # skip smac check for dualtor-aa UL test pkt + if "dualtor-aa" in tbinfo["topo"]["name"] and dtor_ul is True: + exptd_pkt = Mask(exptd_pkt) + exptd_pkt.set_do_not_care_scapy(scapy.Ether, "src") for i in range(5): testutils.send_packet(ptfadapter, src_port['port_index_list'][0], pkt) try: @@ -169,7 +218,8 @@ def verify_icmp_packet(dut_mac, src_port, dst_port, ptfadapter): raise e # If it fails on the last attempt, raise the exception -def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter): +def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, + ptfadapter, tbinfo, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 """ test for checking connectivity of statically added ipv4 and ipv6 arp entries """ @@ -177,14 +227,33 @@ def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter) vmhost_info, ptfhost_info = vlan_ping_setup device2 = dict(list(ptfhost_info.items())[1:]) device1 = dict(list(ptfhost_info.items())[:1]) + # use mac addr of vlan interface in case of dualtor + dualtor_topo = ["dualtor", "dualtor-aa"] + if tbinfo["topo"]["name"] in dualtor_topo: + vlan_table = duthost.get_running_config_facts()['VLAN'] + vlan_name = list(vlan_table.keys())[0] + vlan_mac = duthost.get_dut_iface_mac(vlan_name) + # dump neigh entries + logger.info("Dumping all ipv4 and ipv6 neighbors") + duthost.shell("sudo ip neigh show") + # flush entries of vlan interface in case of dualtor to avoid issue#12302 + logger.info("Flushing all ipv4 and ipv6 neighbors on {}".format(vlan_name)) + duthost.shell("sudo ip neigh flush dev {} all".format(vlan_name)) # initial setup and checking connectivity, try to break in more chunks logger.info("initializing setup for ipv4 and ipv6") static_neighbor_entry(duthost, ptfhost_info, "add") logger.info("Checking connectivity to ptf ports") + for member in ptfhost_info: - verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter) - verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter) + if tbinfo["topo"]["name"] in dualtor_topo: + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], + vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], + ptfadapter, tbinfo, vlan_mac, dtor_dl=True) + else: + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo) # flushing and re-adding ipv6 static arp entry static_neighbor_entry(duthost, ptfhost_info, "del", "6") @@ -201,5 +270,11 @@ def test_vlan_ping(vlan_ping_setup, duthosts, rand_one_dut_hostname, ptfadapter) # Checking for connectivity logger.info("Check connectivity to both ptfhost") for member in ptfhost_info: - verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter) - verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter) + if tbinfo["topo"]["name"] in dualtor_topo: + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], + vmhost_info, ptfadapter, tbinfo, vlan_mac, dtor_ul=True) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], + ptfadapter, tbinfo, vlan_mac, dtor_dl=True) + else: + verify_icmp_packet(duthost.facts['router_mac'], ptfhost_info[member], vmhost_info, ptfadapter, tbinfo) + verify_icmp_packet(duthost.facts['router_mac'], vmhost_info, ptfhost_info[member], ptfadapter, tbinfo)