diff --git a/.azure-pipelines/pr_test_scripts.yaml b/.azure-pipelines/pr_test_scripts.yaml index a533c62574f..a7d7afa83ee 100644 --- a/.azure-pipelines/pr_test_scripts.yaml +++ b/.azure-pipelines/pr_test_scripts.yaml @@ -52,6 +52,7 @@ t0: - fdb/test_fdb_flush.py - fdb/test_fdb_mac_expire.py - fdb/test_fdb_mac_move.py + - fib/test_fib.py - generic_config_updater/test_aaa.py - generic_config_updater/test_bgp_speaker.py - generic_config_updater/test_bgpl.py @@ -76,7 +77,10 @@ t0: - http/test_http_copy.py - iface_loopback_action/test_iface_loopback_action.py - iface_namingmode/test_iface_namingmode.py + - ip/test_mgmt_ipv6_only.py + - ip/test_ip_packet.py - ipfwd/test_dip_sip.py + - ipfwd/test_dir_bcast.py - lldp/test_lldp.py - log_fidelity/test_bgp_shutdown.py - macsec/test_controlplane.py @@ -161,6 +165,7 @@ t0: - tacacs/test_rw_user.py - telemetry/test_events.py - telemetry/test_telemetry.py + - telemetry/test_telemetry_cert_rotation.py - test_features.py - test_interfaces.py - test_procdockerstatsd.py @@ -171,7 +176,6 @@ t0: - vlan/test_vlan.py - vlan/test_vlan_ping.py - vxlan/test_vnet_route_leak.py - - ip/test_mgmt_ipv6_only.py t0-2vlans: - dhcp_relay/test_dhcp_relay.py @@ -212,10 +216,12 @@ t1-lag: - everflow/test_everflow_per_interface.py - everflow/test_everflow_testbed.py - fdb/test_fdb_flush.py + - fib/test_fib.py - generic_config_updater/test_mmu_dynamic_threshold_config_update.py - golden_config_infra/test_config_reload_with_rendered_golden_config.py - http/test_http_copy.py - iface_namingmode/test_iface_namingmode.py + - ip/test_ip_packet.py - ipfwd/test_dip_sip.py - ipfwd/test_mtu.py - lldp/test_lldp.py @@ -322,6 +328,7 @@ onboarding_t0: onboarding_t1: - decap/test_decap.py + - generic_config_updater/test_cacl.py onboarding_dualtor: - dualtor_mgmt/test_dualtor_bgp_update_delay.py diff --git a/.azure-pipelines/pr_test_skip_scripts.yaml b/.azure-pipelines/pr_test_skip_scripts.yaml index 3b55bd09aab..7ec9c98c5bc 100644 --- a/.azure-pipelines/pr_test_skip_scripts.yaml +++ b/.azure-pipelines/pr_test_skip_scripts.yaml @@ -1,6 +1,6 @@ t0: - # This script only supported on Mellanox - - restapi/test_restapi.py + # KVM do not support drop reason in testcase, and testcase would set drop reason in setup stage, can't do more test + - drop_packets/test_configurable_drop_counters.py # This script only supported on Mellanox - generic_config_updater/test_pfcwd_interval.py # There is no k8s in inventory file @@ -39,9 +39,15 @@ t0: - platform_tests/mellanox/test_hw_management_service.py - platform_tests/mellanox/test_psu_power_threshold.py - platform_tests/mellanox/test_reboot_cause.py + # This script only supported on Mellanox + - restapi/test_restapi.py - snmp/test_snmp_phy_entity.py t1: + # KVM do not support bfd test + - bfd/test_bfd.py + # KVM do not support drop reason in testcase, and testcase would set drop reason in setup stage, can't do more test + - drop_packets/test_configurable_drop_counters.py # This script only supported on Mellanox - generic_config_updater/test_pfcwd_interval.py # There is no k8s in inventory file @@ -84,6 +90,9 @@ t1: - sub_port_interfaces/test_sub_port_l2_forwarding.py t2: + # KVM do not support bfd test + - bfd/test_bfd_static_route.py + - bfd/test_bfd_traffic.py # This script only supported on Mellanox - generic_config_updater/test_pfcwd_interval.py # There is no k8s in inventory file @@ -142,7 +151,7 @@ tgen: - ixia/test_ixia_traffic.py - ixia/test_tgen.py -wan: +wan-pub: # Currently PR test will not test wan topo - wan/isis/test_isis_authentication.py - wan/isis/test_isis_csnp_interval.py diff --git a/.azure-pipelines/testscripts_analyse/analyse_testscripts.py b/.azure-pipelines/testscripts_analyse/analyse_testscripts.py index 47015d7f7e9..9fa4a222a46 100644 --- a/.azure-pipelines/testscripts_analyse/analyse_testscripts.py +++ b/.azure-pipelines/testscripts_analyse/analyse_testscripts.py @@ -40,7 +40,7 @@ def topo_name_to_type(topo_name): - pattern = re.compile(r'^(wan|t0|t1|ptf|fullmesh|dualtor|t2|tgen|mgmttor|m0|mc0|mx|dpu|any)') + pattern = re.compile(r'^(wan|t0|t1|ptf|fullmesh|dualtor|t2|tgen|multidut-tgen|mgmttor|m0|mc0|mx|dpu|any|snappi)') match = pattern.match(topo_name) if match is None: logging.warning("Unsupported testbed type - {}".format(topo_name)) @@ -52,6 +52,8 @@ def topo_name_to_type(topo_name): topo_type = 't0' if topo_type in ['mc0']: topo_type = 'm0' + if topo_type in ['multidut-tgen']: + topo_type = 'tgen' return topo_type diff --git a/.azure-pipelines/testscripts_analyse/constant.py b/.azure-pipelines/testscripts_analyse/constant.py index 50480bc7ca4..19a6f93dbdc 100644 --- a/.azure-pipelines/testscripts_analyse/constant.py +++ b/.azure-pipelines/testscripts_analyse/constant.py @@ -6,7 +6,7 @@ } # We temporarily set four types of PR checker here -PR_TOPOLOGY_TYPE = ["t0", "t1", "t2", "wan", "dpu"] +PR_TOPOLOGY_TYPE = ["t0", "t1", "t2", "wan", "dpu", "tgen", "snappi", "ptf"] # Map the topology name and topology type in pr_test_scripts.yaml # Key is the topology name in pr_test_scripts.yaml and the value is topology type @@ -19,5 +19,9 @@ "multi-asic-t1-lag": "t1", "t2": "t2", "wan-pub": "wan", - "dpu": "dpu" + "dpu": "dpu", + "tgen": "tgen", + "multidut-tgen": "tgen", + "snappi": "snappi", + "ptf": "ptf" } diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index 4f94bd75069..9a3e91b90d4 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -523,9 +523,26 @@ path: /etc/sonic/port_config.json state: absent - - name: execute cli "config load_minigraph -y" to apply new minigraph + - name: find interface name mapping + port_alias: + hwsku: "{{ hwsku }}" + when: topo == "mx" + + - name: Copy dhcp_server config hwsku {{ hwsku }} + copy: src=golden_config_db/dhcp_server_mx.json + dest=/tmp/dhcp_server.json become: true - shell: config load_minigraph -y + when: topo == "mx" + + - name: Generate golden_config_db.json + generate_golden_config_db: + topo_name: "{{ topo }}" + port_index_map: "{{ port_index_map | default({}) }}" + become: true + + - name: execute cli "config load_minigraph --override_config -y" to apply new minigraph + become: true + shell: config load_minigraph --override_config -y - name: Wait for switch to become reachable again become: false diff --git a/ansible/golden_config_db/dhcp_server_mx.json b/ansible/golden_config_db/dhcp_server_mx.json new file mode 100644 index 00000000000..0524978d775 --- /dev/null +++ b/ansible/golden_config_db/dhcp_server_mx.json @@ -0,0 +1,243 @@ +{ + "DHCP_SERVER_IPV4": { + "Vlan1000": { + "gateway": "192.168.0.1", + "lease_time": "900", + "mode": "PORT", + "netmask": "255.255.255.0", + "state": "enabled" + } + }, + "DHCP_SERVER_IPV4_PORT": { + "Vlan1000|1": { + "ips": [ + "192.168.0.3" + ] + }, + "Vlan1000|2": { + "ips": [ + "192.168.0.4" + ] + }, + "Vlan1000|3": { + "ips": [ + "192.168.0.5" + ] + }, + "Vlan1000|4": { + "ips": [ + "192.168.0.6" + ] + }, + "Vlan1000|5": { + "ips": [ + "192.168.0.7" + ] + }, + "Vlan1000|6": { + "ips": [ + "192.168.0.8" + ] + }, + "Vlan1000|7": { + "ips": [ + "192.168.0.9" + ] + }, + "Vlan1000|8": { + "ips": [ + "192.168.0.10" + ] + }, + "Vlan1000|9": { + "ips": [ + "192.168.0.11" + ] + }, + "Vlan1000|10": { + "ips": [ + "192.168.0.12" + ] + }, + "Vlan1000|11": { + "ips": [ + "192.168.0.13" + ] + }, + "Vlan1000|12": { + "ips": [ + "192.168.0.14" + ] + }, + "Vlan1000|13": { + "ips": [ + "192.168.0.15" + ] + }, + "Vlan1000|14": { + "ips": [ + "192.168.0.16" + ] + }, + "Vlan1000|15": { + "ips": [ + "192.168.0.17" + ] + }, + "Vlan1000|16": { + "ips": [ + "192.168.0.18" + ] + }, + "Vlan1000|17": { + "ips": [ + "192.168.0.19" + ] + }, + "Vlan1000|18": { + "ips": [ + "192.168.0.20" + ] + }, + "Vlan1000|19": { + "ips": [ + "192.168.0.21" + ] + }, + "Vlan1000|20": { + "ips": [ + "192.168.0.22" + ] + }, + "Vlan1000|21": { + "ips": [ + "192.168.0.23" + ] + }, + "Vlan1000|22": { + "ips": [ + "192.168.0.24" + ] + }, + "Vlan1000|23": { + "ips": [ + "192.168.0.25" + ] + }, + "Vlan1000|24": { + "ips": [ + "192.168.0.26" + ] + }, + "Vlan1000|25": { + "ips": [ + "192.168.0.27" + ] + }, + "Vlan1000|26": { + "ips": [ + "192.168.0.28" + ] + }, + "Vlan1000|27": { + "ips": [ + "192.168.0.29" + ] + }, + "Vlan1000|28": { + "ips": [ + "192.168.0.30" + ] + }, + "Vlan1000|29": { + "ips": [ + "192.168.0.31" + ] + }, + "Vlan1000|30": { + "ips": [ + "192.168.0.32" + ] + }, + "Vlan1000|31": { + "ips": [ + "192.168.0.33" + ] + }, + "Vlan1000|32": { + "ips": [ + "192.168.0.34" + ] + }, + "Vlan1000|33": { + "ips": [ + "192.168.0.35" + ] + }, + "Vlan1000|34": { + "ips": [ + "192.168.0.36" + ] + }, + "Vlan1000|35": { + "ips": [ + "192.168.0.37" + ] + }, + "Vlan1000|36": { + "ips": [ + "192.168.0.38" + ] + }, + "Vlan1000|37": { + "ips": [ + "192.168.0.39" + ] + }, + "Vlan1000|38": { + "ips": [ + "192.168.0.40" + ] + }, + "Vlan1000|39": { + "ips": [ + "192.168.0.41" + ] + }, + "Vlan1000|40": { + "ips": [ + "192.168.0.42" + ] + }, + "Vlan1000|41": { + "ips": [ + "192.168.0.43" + ] + }, + "Vlan1000|42": { + "ips": [ + "192.168.0.44" + ] + }, + "Vlan1000|43": { + "ips": [ + "192.168.0.45" + ] + }, + "Vlan1000|44": { + "ips": [ + "192.168.0.46" + ] + }, + "Vlan1000|45": { + "ips": [ + "192.168.0.47" + ] + }, + "Vlan1000|46": { + "ips": [ + "192.168.0.48" + ] + } + } +} diff --git a/ansible/library/generate_golden_config_db.py b/ansible/library/generate_golden_config_db.py new file mode 100644 index 00000000000..38bf88ae2d8 --- /dev/null +++ b/ansible/library/generate_golden_config_db.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python + +# This ansible module is for generate golden_config_db.json +# Currently, only enable dhcp_server feature and generated related configuration in MX device +# which has dhcp_server feature. + + +import copy +import json + +from ansible.module_utils.basic import AnsibleModule + +DOCUMENTATION = ''' +module: generate_golden_config_db.py +author: Yaqiang Zhu (yaqiangzhu@microsoft.com) +short_description: Generate golden_config_db.json +Description: + When load_minigraph, SONiC support to use parameter --override_config to add configuration via + golden_config_db.json. This module is to generate required /etc/sonic/golden_config_db.json + Input: + topo_name: Name of current topo +''' + +GOLDEN_CONFIG_DB_PATH = "/etc/sonic/golden_config_db.json" +TEMP_DHCP_SERVER_CONFIG_PATH = "/tmp/dhcp_server.json" +DUMMY_QUOTA = "dummy_single_quota" + + +class GenerateGoldenConfigDBModule(object): + def __init__(self): + self.module = AnsibleModule(argument_spec=dict(topo_name=dict(required=True, type='str'), + port_index_map=dict(require=False, type='dict', default=None)), + supports_check_mode=True) + self.topo_name = self.module.params['topo_name'] + self.port_index_map = self.module.params['port_index_map'] + + def generate_mx_golden_config_db(self): + """ + If FEATURE table in init_cfg.json contains dhcp_server, enable it. + And add dhcp_server related configuration + """ + rc, out, err = self.module.run_command("cat /etc/sonic/init_cfg.json") + if rc != 0: + self.module.fail_json(msg="Failed to get init_cfg.json: {}".format(err)) + + # Generate FEATURE table from init_cfg.ini + init_config_obj = json.loads(out) + gold_config_db = {} + if "FEATURE" not in init_config_obj or "dhcp_server" not in init_config_obj["FEATURE"]: + return "{}" + init_config_obj["FEATURE"]["dhcp_server"]["state"] = "enabled" + gold_config_db = {"FEATURE": copy.deepcopy(init_config_obj["FEATURE"])} + + # Generate dhcp_server related configuration + rc, out, err = self.module.run_command("cat {}".format(TEMP_DHCP_SERVER_CONFIG_PATH)) + if rc != 0: + self.module.fail_json(msg="Failed to get dhcp_server config: {}".format(err)) + if self.port_index_map is None or self.port_index_map == {}: + self.module.fail_json(msg="port_index_map is missing") + dhcp_server_config_obj = json.loads(out) + # Update DHCP_SERVER_IPV4_PORT based on port index map + dhcp_server_port_config = {} + for key, value in dhcp_server_config_obj["DHCP_SERVER_IPV4_PORT"].items(): + splits = key.split("|") + new_key = "{}|{}".format(splits[0], self.port_index_map[splits[1]]) + dhcp_server_port_config[new_key] = value + dhcp_server_config_obj["DHCP_SERVER_IPV4_PORT"] = dhcp_server_port_config + + gold_config_db.update(dhcp_server_config_obj) + return json.dumps(gold_config_db, indent=4) + + def generate(self): + if self.topo_name == "mx": + config = self.generate_mx_golden_config_db() + else: + config = "{}" + + with open(GOLDEN_CONFIG_DB_PATH, "w") as temp_file: + temp_file.write(config) + self.module.run_command("sudo rm -f {}".format(TEMP_DHCP_SERVER_CONFIG_PATH)) + self.module.exit_json(change=True, msg="Success to generate golden_config_db.json") + + +def main(): + generate_golden_config_db = GenerateGoldenConfigDBModule() + generate_golden_config_db.generate() + + +if __name__ == '__main__': + main() diff --git a/ansible/library/port_alias.py b/ansible/library/port_alias.py index 073c8c122d7..0535d8e5f4a 100755 --- a/ansible/library/port_alias.py +++ b/ansible/library/port_alias.py @@ -104,6 +104,7 @@ def get_portmap(self, asic_id=None, include_internal=False, portmap = {} aliasmap = {} portspeed = {} + indexmap = {} # Front end interface asic names front_panel_asic_ifnames = {} front_panel_asic_id = {} @@ -205,6 +206,8 @@ def get_portmap(self, asic_id=None, include_internal=False, sysport['asic_name'] = asic_name sysport['switchid'] = switchid sysports.append(sysport) + if port_index != -1 and len(mapping) > port_index: + indexmap[mapping[port_index]] = name if len(sysports) > 0: sysport = {} sysport['name'] = 'Cpu0' @@ -218,7 +221,7 @@ def get_portmap(self, asic_id=None, include_internal=False, sysports.insert(0, sysport) return (aliases, portmap, aliasmap, portspeed, front_panel_asic_ifnames, front_panel_asic_id, asic_if_names, - sysports) + sysports, indexmap) def main(): @@ -241,6 +244,7 @@ def main(): aliasmap = {} portspeed = {} sysports = [] + indexmap = {} # Map of ASIC interface names to front panel interfaces front_panel_asic_ifnames = {} front_panel_asic_ifs_asic_id = {} @@ -296,7 +300,7 @@ def main(): if num_asic == 1: asic_id = None (aliases_asic, portmap_asic, aliasmap_asic, portspeed_asic, front_panel_asic, front_panel_asic_ids, - asicifnames_asic, sysport_asic) = allmap.get_portmap( + asicifnames_asic, sysport_asic, index_name) = allmap.get_portmap( asic_id, include_internal, hostname, switchid, slotid) if aliases_asic is not None: aliases.extend(aliases_asic) @@ -315,6 +319,8 @@ def main(): asic_if_names[asic] = asicifnames_asic if sysport_asic is not None: sysports.extend(sysport_asic) + if index_name is not None: + indexmap.update(index_name) # Sort the Interface Name needed in multi-asic aliases.sort(key=lambda x: int(x[1])) @@ -335,7 +341,8 @@ def main(): 'front_panel_asic_ifnames': front_panel_asic_ifnames_list, 'front_panel_asic_ifs_asic_id': front_panel_asic_ifs_asic_id_list, 'asic_if_names': asic_if_names, - 'sysports': sysports}) + 'sysports': sysports, + 'port_index_map': indexmap}) except (IOError, OSError) as e: fail_msg = "IO error" + str(e) diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py index c6b1ceb5198..5be78e3344b 100644 --- a/ansible/module_utils/port_utils.py +++ b/ansible/module_utils/port_utils.py @@ -291,6 +291,13 @@ def get_port_alias_to_name_map(hwsku, asic_name=None): elif hwsku == "et6448m": for i in range(0, 52): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i + elif hwsku in ["rd98DX35xx_cn9131", "rd98DX35xx", "Nokia-7215-A1"]: + for i in range(0, 32): + port_alias_to_name_map["oneGigE%d" % i] = "Ethernet%d" % i + for i in range(32, 48): + port_alias_to_name_map["twod5GigE%d" % i] = "Ethernet%d" % i + for i in range(48, 54): + port_alias_to_name_map["twenty5GigE%d" % i] = "Ethernet%d" % i elif hwsku == "Nokia-IXR7250E-36x400G" or hwsku == "Nokia-IXR7250E-36x100G": for i in range(1, 37): sonic_name = "Ethernet%d" % ((i - 1) * 8) diff --git a/ansible/roles/test/files/acstests/everflow_policer_test.py b/ansible/roles/test/files/acstests/everflow_policer_test.py index de4e0fd06db..09c5cc96b9f 100644 --- a/ansible/roles/test/files/acstests/everflow_policer_test.py +++ b/ansible/roles/test/files/acstests/everflow_policer_test.py @@ -216,7 +216,7 @@ def checkMirroredFlow(self): if self.asic_type in ["mellanox"]: import binascii payload = binascii.unhexlify("0"*44) + str(payload) # Add the padding - elif self.asic_type in ["innovium"]: + elif self.asic_type in ["innovium"] or self.hwsku in ["rd98DX35xx_cn9131", "rd98DX35xx", "Nokia-7215-A1"]: import binascii payload = binascii.unhexlify("0"*24) + str(payload) # Add the padding @@ -252,6 +252,7 @@ def checkMirroredFlow(self): masked_exp_pkt.set_do_not_care_scapy(scapy.GRE, "seqnum_present") if self.asic_type in ["marvell"]: masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "id") + masked_exp_pkt.set_do_not_care_scapy(scapy.GRE, "seqnum_present") if exp_pkt.haslayer(scapy.ERSPAN_III): masked_exp_pkt.set_do_not_care_scapy(scapy.ERSPAN_III, "span_id") @@ -269,7 +270,7 @@ def match_payload(pkt): pkt = scapy.Ether(pkt).load pkt = pkt[22:] # Mask the Mellanox specific inner header pkt = scapy.Ether(pkt) - elif self.asic_type in ["innovium"]: + elif self.asic_type in ["innovium"] or self.hwsku in ["rd98DX35xx_cn9131", "rd98DX35xx", "Nokia-7215-A1"]: pkt = scapy.Ether(pkt)[scapy.GRE].payload pkt_str = str(pkt) pkt = scapy.Ether(pkt_str[8:]) diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index 4fcc7ecf152..148b19c3b5d 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -68,7 +68,7 @@ r, ".* ERR bgp#bgpcfgd: .*BGPVac.*attribute is supported.*" # https://msazure.visualstudio.com/One/_workitems/edit/14233938 r, ".* ERR swss\d*#fdbsyncd: :- readData: netlink reports an error=-25 on reading a netlink socket.*" -r, ".* ERR swss\d*#fdbsyncd: :- readData: netlink reports an error=-33 on reading a netlink socket.*" +r, ".* ERR swss\d*#.*syncd: :- readData: netlink reports an error=-33 on reading a netlink socket.*" # https://dev.azure.com/msazure/One/_workitems/edit/14213168 r, ".* ERR /hostcfgd: sonic-kdump-config --disable - failed.*" diff --git a/tests/bfd/test_bfd_traffic.py b/tests/bfd/test_bfd_traffic.py index 78cb5ea288a..a49b09193ce 100644 --- a/tests/bfd/test_bfd_traffic.py +++ b/tests/bfd/test_bfd_traffic.py @@ -7,7 +7,10 @@ prepare_traffic_test_variables, get_random_bgp_neighbor_ip_of_asic, toggle_port_channel_or_member, \ get_port_channel_by_member, wait_until_bfd_up, wait_until_given_bfd_down, assert_traffic_switching -pytestmark = [pytest.mark.topology("t2")] +pytestmark = [ + pytest.mark.topology("t2"), + pytest.mark.device_type('physical') +] logger = logging.getLogger(__name__) diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 0a42b684d05..16d0bd5301f 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -524,8 +524,7 @@ ecmp/test_ecmp_sai_value.py: conditions: - "topo_type not in ['t1', 't0']" - "asic_type not in ['broadcom']" - - "release in ['201911', '202012', '202205', '202211', 'master']" - - "'internal' in build_version" + - "release in ['201911', '202012', '202205', '202211']" - "topo_type in ['t1'] and hwsku in ['Arista-7050CX3-32S-C32']" ecmp/test_fgnhg.py: diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml index 19d252acce5..43bd4bdf0f7 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_skip_traffic_test.yaml @@ -116,3 +116,30 @@ everflow/test_everflow_testbed.py: reason: "Skip traffic test for KVM testbed" conditions: - "asic_type in ['vs']" + +####################################### +##### fib ##### +####################################### +fib/test_fib.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +####################################### +##### ip ##### +####################################### +ip/test_ip_packet.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" + +####################################### +##### ipfwd ##### +####################################### +ipfwd/test_dir_bcast.py: + skip_traffic_test: + reason: "Skip traffic test for KVM testbed" + conditions: + - "asic_type in ['vs']" diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py index 974645ad0a3..c6469273c55 100644 --- a/tests/common/plugins/sanity_check/checks.py +++ b/tests/common/plugins/sanity_check/checks.py @@ -14,6 +14,7 @@ from tests.common.helpers.parallel import parallel_run, reset_ansible_local_tmp from tests.common.dualtor.mux_simulator_control import _probe_mux_ports from tests.common.fixtures.duthost_utils import check_bgp_router_id +from tests.common.errors import RunAnsibleModuleFail logger = logging.getLogger(__name__) SYSTEM_STABILIZE_MAX_TIME = 300 @@ -29,6 +30,7 @@ 'check_monit', 'check_secureboot', 'check_neighbor_macsec_empty', + 'check_ipv6_mgmt', 'check_mux_simulator'] __all__ = CHECK_ITEMS @@ -982,3 +984,36 @@ def _check(*args, **kwargs): return init_check_result return _check + + +# check ipv6 neighbor reachability +@pytest.fixture(scope="module") +def check_ipv6_mgmt(duthosts, localhost): + # check ipv6 mgmt interface reachability for debugging purpose only. + # No failure will be trigger for this sanity check. + def _check(*args, **kwargs): + init_result = {"failed": False, "check_item": "ipv6_mgmt"} + result = parallel_run(_check_ipv6_mgmt_to_dut, args, kwargs, duthosts, timeout=30, init_result=init_result) + return list(result.values()) + + def _check_ipv6_mgmt_to_dut(*args, **kwargs): + dut = kwargs['node'] + results = kwargs['results'] + + logger.info("Checking ipv6 mgmt interface reachability on %s..." % dut.hostname) + check_result = {"failed": False, "check_item": "ipv6_mgmt", "host": dut.hostname} + + # most of the testbed should reply within 10 ms, Set the timeout to 2 seconds to reduce the impact of delay. + try: + shell_result = localhost.shell("ping6 -c 2 -W 2 " + dut.mgmt_ipv6) + logging.info("ping6 output: %s" % shell_result["stdout"]) + except RunAnsibleModuleFail as e: + # set to False for now to avoid blocking the test + check_result["failed"] = False + logging.info("Failed to ping ipv6 mgmt interface on %s, exception: %s" % (dut.hostname, repr(e))) + except Exception as e: + logger.info("Exception while checking ipv6_mgmt reachability for %s: %s" % (dut.hostname, repr(e))) + finally: + logger.info("Done checking ipv6 management reachability on %s" % dut.hostname) + results[dut.hostname] = check_result + return _check diff --git a/tests/conftest.py b/tests/conftest.py index 46f14abf77f..5415c7819f8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -863,6 +863,13 @@ def collect_techsupport_all_duts(request, duthosts): [collect_techsupport_on_dut(request, a_dut) for a_dut in duthosts] +@pytest.fixture +def collect_techsupport_all_nbrs(request, nbrhosts): + yield + if request.config.getoption("neighbor_type") == "sonic": + [collect_techsupport_on_dut(request, nbrhosts[nbrhost]['host']) for nbrhost in nbrhosts] + + @pytest.fixture(scope="session", autouse=True) def tag_test_report(request, pytestconfig, tbinfo, duthost, record_testsuite_property): if not request.config.getoption("--junit-xml"): @@ -2057,7 +2064,7 @@ def compare_running_config(pre_running_config, cur_running_config): for key in pre_running_config.keys(): if not compare_running_config(pre_running_config[key], cur_running_config[key]): return False - return True + return True # We only have string in list in running config now, so we can ignore the order of the list. elif type(pre_running_config) is list: if set(pre_running_config) != set(cur_running_config): diff --git a/tests/dash/conftest.py b/tests/dash/conftest.py index d66b9623e4d..1c310e1b630 100644 --- a/tests/dash/conftest.py +++ b/tests/dash/conftest.py @@ -43,6 +43,12 @@ def pytest_addoption(parser): help="Skip dataplane checking" ) + parser.addoption( + "--skip_cert_cleanup", + action="store_true", + help="Skip certificates cleanup after test" + ) + @pytest.fixture(scope="module") def config_only(request): @@ -64,6 +70,11 @@ def skip_dataplane_checking(request): return request.config.getoption("--skip_dataplane_checking") +@pytest.fixture(scope="module") +def skip_cert_cleanup(request): + return request.config.getoption("--skip_cert_cleanup") + + @pytest.fixture(scope="module") def config_facts(duthost): return duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] @@ -244,7 +255,7 @@ def apply_direct_configs(dash_outbound_configs, apply_config): @pytest.fixture(scope="module", autouse=True) -def setup_gnmi_server(duthosts, rand_one_dut_hostname, localhost, ptfhost): +def setup_gnmi_server(duthosts, rand_one_dut_hostname, localhost, ptfhost, skip_cert_cleanup): if not ENABLE_GNMI_API: yield return @@ -253,7 +264,7 @@ def setup_gnmi_server(duthosts, rand_one_dut_hostname, localhost, ptfhost): generate_gnmi_cert(localhost, duthost) apply_gnmi_cert(duthost, ptfhost) yield - recover_gnmi_cert(localhost, duthost) + recover_gnmi_cert(localhost, duthost, skip_cert_cleanup) @pytest.fixture(scope="function") diff --git a/tests/dash/gnmi_utils.py b/tests/dash/gnmi_utils.py index 68860d61ee3..95432701dff 100644 --- a/tests/dash/gnmi_utils.py +++ b/tests/dash/gnmi_utils.py @@ -197,7 +197,7 @@ def apply_gnmi_cert(duthost, ptfhost): time.sleep(env.gnmi_server_start_wait_time) -def recover_gnmi_cert(localhost, duthost): +def recover_gnmi_cert(localhost, duthost, skip_cert_cleanup): """ Restart gnmi server to use default certificate @@ -208,7 +208,8 @@ def recover_gnmi_cert(localhost, duthost): Returns: """ env = GNMIEnvironment(duthost) - localhost.shell("rm -rf "+env.work_dir, module_ignore_errors=True) + if not skip_cert_cleanup: + localhost.shell("rm -rf "+env.work_dir, module_ignore_errors=True) dut_command = "docker exec %s supervisorctl status %s" % (env.gnmi_container, env.gnmi_program) output = duthost.command(dut_command, module_ignore_errors=True)['stdout'].strip() if 'RUNNING' in output: diff --git a/tests/drop_packets/drop_packets.py b/tests/drop_packets/drop_packets.py index 05d0da520f1..7bd7511df09 100644 --- a/tests/drop_packets/drop_packets.py +++ b/tests/drop_packets/drop_packets.py @@ -158,6 +158,9 @@ def is_mellanox_devices(hwsku): def is_mellanox_fanout(duthost, localhost): # Ansible localhost fixture which calls ansible playbook on the local host + if duthost.facts.get("asic_type") == "vs": + return False + try: dut_facts = \ localhost.conn_graph_facts(host=duthost.hostname, filepath=LAB_CONNECTION_GRAPH_PATH)["ansible_facts"] diff --git a/tests/ecmp/test_ecmp_sai_value.py b/tests/ecmp/test_ecmp_sai_value.py index 795b22d46fe..05c02e51d55 100644 --- a/tests/ecmp/test_ecmp_sai_value.py +++ b/tests/ecmp/test_ecmp_sai_value.py @@ -126,10 +126,14 @@ def check_config_bcm_file(duthost, topo_type): logging.info("sai_hash_seed_config_hash_offset_enable={}".format(value)) else: logging.info("sai_hash_seed_config_hash_offset_enable not found in the file.") - if topo_type == "t0": - pytest_assert(not cat_output, "sai_hash_seed_config_hash_offset_enable should not set for T0") - if topo_type == "t1": - pytest_assert(cat_output and value == "1", "sai_hash_seed_config_hash_offset_enable is not set to 1") + # with code change https://github.com/sonic-net/sonic-buildimage/pull/18912, + # the sai_hash_seed_config_hash_offset_enable is not set in config.bcm, + # it's set by swss config on 202311 and later image + if "20230531" in duthost.os_version: + if topo_type == "t0": + pytest_assert(not cat_output, "sai_hash_seed_config_hash_offset_enable should not set for T0") + if topo_type == "t1": + pytest_assert(cat_output and value == "1", "sai_hash_seed_config_hash_offset_enable is not set to 1") else: pytest.fail("Config bcm file not found.") @@ -276,7 +280,7 @@ def test_ecmp_offset_value(localhost, duthosts, tbinfo, enum_rand_one_per_hwsku_ elif parameter == "reload": logging.info("Run config reload on DUT") config_reload(duthost, safe_reload=True, check_intf_up_ports=True) - check_hash_seed_value(duthost, asic_name, topo_type) + check_ecmp_offset_value(duthost, asic_name, topo_type, hwsku) elif parameter == "reboot": logging.info("Run cold reboot on DUT") reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD, reboot_helper=None, diff --git a/tests/everflow/everflow_test_utilities.py b/tests/everflow/everflow_test_utilities.py index a925fe5c87e..97e80743fea 100644 --- a/tests/everflow/everflow_test_utilities.py +++ b/tests/everflow/everflow_test_utilities.py @@ -860,9 +860,12 @@ def get_expected_mirror_packet(mirror_session, setup, duthost, direction, mirror payload = binascii.unhexlify("0" * 44) + str(payload) else: payload = binascii.unhexlify("0" * 44) + bytes(payload) - - if duthost.facts["asic_type"] in ["barefoot", "cisco-8000", "innovium"] or duthost.facts.get( - "platform_asic") in ["broadcom-dnx"]: + if ( + duthost.facts["asic_type"] in ["barefoot", "cisco-8000", "innovium"] + or duthost.facts.get("platform_asic") in ["broadcom-dnx"] + or duthost.facts["hwsku"] + in ["rd98DX35xx", "rd98DX35xx_cn9131", "Nokia-7215-A1"] + ): if six.PY2: payload = binascii.unhexlify("0" * 24) + str(payload) else: @@ -888,6 +891,7 @@ def get_expected_mirror_packet(mirror_session, setup, duthost, direction, mirror expected_packet.set_do_not_care_scapy(packet.IP, "chksum") if duthost.facts["asic_type"] == 'marvell': expected_packet.set_do_not_care_scapy(packet.IP, "id") + expected_packet.set_do_not_care_scapy(packet.GRE, "seqnum_present") if duthost.facts["asic_type"] in ["cisco-8000", "innovium"] or \ duthost.facts.get("platform_asic") in ["broadcom-dnx"]: expected_packet.set_do_not_care_scapy(packet.GRE, "seqnum_present") diff --git a/tests/fib/test_fib.py b/tests/fib/test_fib.py index 2d04f024c68..5b57d527b70 100644 --- a/tests/fib/test_fib.py +++ b/tests/fib/test_fib.py @@ -10,6 +10,9 @@ from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 from tests.common.fixtures.ptfhost_utils import set_ptf_port_mapping_mode # noqa F401 from tests.common.fixtures.ptfhost_utils import ptf_test_port_map_active_active, ptf_test_port_map +# from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa: F401 from tests.ptf_runner import ptf_runner from tests.common.dualtor.mux_simulator_control import mux_server_url # noqa F401 @@ -83,7 +86,8 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, mux_status_from_nic_simulator, ignore_ttl, single_fib_for_duts, # noqa F401 duts_running_config_facts, duts_minigraph_facts, - validate_active_active_dualtor_setup): # noqa F401 + validate_active_active_dualtor_setup, # noqa F401 + skip_traffic_test): # noqa F811 if 'dualtor' in updated_tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') @@ -103,6 +107,8 @@ def test_basic_fib(duthosts, ptfhost, ipv4, ipv6, mtu, log_file = "/tmp/fib_test.FibTest.ipv4.{}.ipv6.{}.{}.log".format( ipv4, ipv6, timestamp) logging.info("PTF log file: %s" % log_file) + if skip_traffic_test is True: + return ptf_runner( ptfhost, "ptftests", @@ -315,7 +321,7 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s hash_keys, ptfhost, ipver, toggle_all_simulator_ports_to_rand_selected_tor_m, # noqa F811 updated_tbinfo, mux_server_url, mux_status_from_nic_simulator, ignore_ttl, # noqa F811 single_fib_for_duts, duts_running_config_facts, duts_minigraph_facts, # noqa F811 - setup_active_active_ports, active_active_ports): # noqa F811 + setup_active_active_ports, active_active_ports, skip_traffic_test): # noqa F811 if 'dualtor' in updated_tbinfo['topo']['name']: wait(30, 'Wait some time for mux active/standby state to be stable after toggled mux state') @@ -331,6 +337,8 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE + if skip_traffic_test is True: + return ptf_runner( ptfhost, "ptftests", @@ -365,7 +373,7 @@ def test_hash(add_default_route_to_dut, duthosts, fib_info_files_per_function, s def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files_per_function, # noqa F811 hash_keys, ptfhost, ipver, tbinfo, mux_server_url, # noqa F811 ignore_ttl, single_fib_for_duts, duts_running_config_facts, # noqa F811 - duts_minigraph_facts): + duts_minigraph_facts, skip_traffic_test): # noqa F811 # Skip test on none T1 testbed pytest_require('t1' == tbinfo['topo']['type'], "The test case runs on T1 topology") @@ -379,6 +387,8 @@ def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE + if skip_traffic_test is True: + return ptf_runner(ptfhost, "ptftests", "hash_test.IPinIPHashTest", @@ -404,7 +414,8 @@ def test_ipinip_hash(add_default_route_to_dut, duthost, duthosts, fib_info_files def test_ipinip_hash_negative(add_default_route_to_dut, duthosts, fib_info_files_per_function, # noqa F811 ptfhost, ipver, tbinfo, mux_server_url, ignore_ttl, single_fib_for_duts, # noqa F811 - duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator): + duts_running_config_facts, duts_minigraph_facts, mux_status_from_nic_simulator, + skip_traffic_test): # noqa F811 hash_keys = ['inner_length'] timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') log_file = "/tmp/hash_test.IPinIPHashTest.{}.{}.log".format( @@ -416,6 +427,8 @@ def test_ipinip_hash_negative(add_default_route_to_dut, duthosts, fib_info_files else: src_ip_range = SRC_IPV6_RANGE dst_ip_range = DST_IPV6_RANGE + if skip_traffic_test is True: + return ptf_runner(ptfhost, "ptftests", "hash_test.IPinIPHashTest", diff --git a/tests/generic_config_updater/test_cacl.py b/tests/generic_config_updater/test_cacl.py index 5764bb01f2f..cbaa7b9b1b3 100644 --- a/tests/generic_config_updater/test_cacl.py +++ b/tests/generic_config_updater/test_cacl.py @@ -18,7 +18,7 @@ # SSH_ONLY CTRLPLANE SSH SSH_ONLY ingress pytestmark = [ - pytest.mark.topology('t0', 'm0', 'mx'), + pytest.mark.topology('t0', 'm0', 'mx', 't1'), ] logger = logging.getLogger(__name__) @@ -134,24 +134,24 @@ def expect_res_success_acl_rule(duthost, expected_content_list, unexpected_conte expect_res_success(duthost, output, expected_content_list, unexpected_content_list) -def cacl_tc1_add_new_table(duthost): +def cacl_tc1_add_new_table(duthost, protocol): """ Add acl table for test Sample output admin@vlab-01:~$ show acl table - Name Type Binding Description Stage - ------ --------- --------- ------------- ------- - ... - TEST_1 CTRLPLANE SNMP Test_Table_1 ingress + Name Type Binding Description Stage Status + ---------------------- --------- --------------- ---------------------------- ------- -------- + SNMP_TEST_1 CTRLPLANE SNMP SNMP_Test_Table_1 ingress Active """ + table = "{}_TEST_1".format(protocol) json_patch = [ { "op": "add", - "path": "/ACL_TABLE/TEST_1", + "path": "/ACL_TABLE/{}".format(table), "value": { - "policy_desc": "Test_Table_1", + "policy_desc": "{}_Test_Table_1".format(protocol), "services": [ - "SNMP" + protocol ], "stage": "ingress", "type": "CTRLPLANE" @@ -166,23 +166,27 @@ def cacl_tc1_add_new_table(duthost): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - expected_content_list = ["TEST_1", "CTRLPLANE", "SNMP", "Test_Table_1", "ingress"] - expect_acl_table_match(duthost, "TEST_1", expected_content_list) + expected_content_list = [table, "CTRLPLANE", protocol, "{}_Test_Table_1".format(protocol), "ingress"] + expect_acl_table_match(duthost, table, expected_content_list) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc1_add_duplicate_table(duthost): +def cacl_tc1_add_duplicate_table(duthost, protocol): """ Add duplicate acl table """ + if protocol == 'SSH': + table_name = "SSH_ONLY" + else: + table_name = "{}_ACL".format(protocol) json_patch = [ { "op": "add", - "path": "/ACL_TABLE/SNMP_ACL", + "path": "/ACL_TABLE/{}".format(table_name), "value": { - "policy_desc": "SNMP_ACL", + "policy_desc": table_name, "services": [ - "SNMP" + protocol ], "stage": "ingress", "type": "CTRLPLANE" @@ -200,32 +204,53 @@ def cacl_tc1_add_duplicate_table(duthost): delete_tmpfile(duthost, tmpfile) -def cacl_tc1_replace_table_variable(duthost): +def cacl_tc1_replace_table_variable(duthost, protocol): """ Replace acl table with SSH service Expected output admin@vlab-01:~$ show acl table Name Type Binding Description Stage ---------- --------- --------------- ------------- ------- - SNMP_ACL CTRLPLANE SSH SNMP_TO_SSH egress + SNMP_ACL CTRLPLANE SNMP SNMP_TO_SSH egress """ - json_patch = [ - { - "op": "replace", - "path": "/ACL_TABLE/SNMP_ACL/stage", - "value": "egress" - }, - { - "op": "replace", - "path": "/ACL_TABLE/SNMP_ACL/services/0", - "value": "SSH" - }, - { - "op": "replace", - "path": "/ACL_TABLE/SNMP_ACL/policy_desc", - "value": "SNMP_TO_SSH" - } - ] + if protocol == 'SSH': + table_name = "SSH_ONLY" + json_patch = [ + { + "op": "replace", + "path": "/ACL_TABLE/{}/stage".format(table_name), + "value": "egress" + }, + { + "op": "replace", + "path": "/ACL_TABLE/{}/services/0".format(table_name), + "value": "NTP" + }, + { + "op": "replace", + "path": "/ACL_TABLE/{}/policy_desc".format(table_name), + "value": "{}_TO_NTP".format(protocol) + } + ] + else: + table_name = "{}_ACL".format(protocol) + json_patch = [ + { + "op": "replace", + "path": "/ACL_TABLE/{}/stage".format(table_name), + "value": "egress" + }, + { + "op": "replace", + "path": "/ACL_TABLE/{}/services/0".format(table_name), + "value": "SSH" + }, + { + "op": "replace", + "path": "/ACL_TABLE/{}/policy_desc".format(table_name), + "value": "{}_TO_SSH".format(protocol) + } + ] tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) @@ -233,23 +258,26 @@ def cacl_tc1_replace_table_variable(duthost): try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - - expected_content_list = ["SNMP_ACL", "CTRLPLANE", "SSH", - "SNMP_TO_SSH", "egress"] - expect_acl_table_match(duthost, "SNMP_ACL", expected_content_list) + if protocol == 'SSH': + expected_content_list = [table_name, "CTRLPLANE", "NTP", + "{}_TO_NTP".format(protocol), "egress"] + else: + expected_content_list = [table_name, "CTRLPLANE", "SSH", + "{}_TO_SSH".format(protocol), "egress"] + expect_acl_table_match(duthost, table_name, expected_content_list) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc1_add_invalid_table(duthost): +def cacl_tc1_add_invalid_table(duthost, protocol): """ Add invalid acl table {"service": "SSH", "stage": "ogress", "type": "CTRLPLANE"}, # wrong stage {"service": "SSH", "stage": "ingress", "type": "TRLPLANE"} # wrong type """ invalid_table = [ - {"service": "SSH", "stage": "ogress", "type": "CTRLPLANE"}, - {"service": "SSH", "stage": "ingress", "type": "TRLPLANE"} + {"service": protocol, "stage": "ogress", "type": "CTRLPLANE"}, + {"service": protocol, "stage": "ingress", "type": "TRLPLANE"} ] for ele in invalid_table: @@ -297,13 +325,17 @@ def cacl_tc1_remove_unexisted_table(duthost): delete_tmpfile(duthost, tmpfile) -def cacl_tc1_remove_table(duthost): +def cacl_tc1_remove_table(duthost, protocol): """ Remove acl table test """ + if protocol == 'SSH': + table_name = "SSH_ONLY" + else: + table_name = "{}_ACL".format(protocol) json_patch = [ { "op": "remove", - "path": "/ACL_TABLE/SSH_ONLY" + "path": "/ACL_TABLE/{}".format(table_name) } ] @@ -314,21 +346,12 @@ def cacl_tc1_remove_table(duthost): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - expect_acl_table_match(duthost, "SSH_ONLY", []) + expect_acl_table_match(duthost, table_name, []) finally: delete_tmpfile(duthost, tmpfile) -def test_cacl_tc1_acl_table_suite(rand_selected_dut): - cacl_tc1_add_new_table(rand_selected_dut) - cacl_tc1_add_duplicate_table(rand_selected_dut) - cacl_tc1_replace_table_variable(rand_selected_dut) - cacl_tc1_add_invalid_table(rand_selected_dut) - cacl_tc1_remove_unexisted_table(rand_selected_dut) - cacl_tc1_remove_table(rand_selected_dut) - - -def cacl_tc2_add_init_rule(duthost): +def cacl_tc2_add_init_rule(duthost, protocol): """ Add acl rule for test Check 'ip tables' to make sure rule is actually being applied @@ -342,51 +365,93 @@ def cacl_tc2_add_init_rule(duthost): SRC_IP: 9.9.9.9/32 """ + params_dict = {} + + if protocol == 'SSH': + params_dict["table"] = "SSH_ONLY" + params_dict["IP_PROTOCOL"] = "6" + params_dict["L4_DST_PORT"] = "22" + elif protocol == 'SNMP': + params_dict["table"] = "SNMP_ACL" + params_dict["IP_PROTOCOL"] = "17" + params_dict["L4_DST_PORT"] = "161" + elif protocol == 'NTP': + params_dict["table"] = "NTP_ACL" + params_dict["IP_PROTOCOL"] = "17" + params_dict["L4_DST_PORT"] = "123" + elif protocol == 'EXTERNAL_CLIENT': + params_dict["table"] = "EXTERNAL_CLIENT_ACL" + params_dict["IP_PROTOCOL"] = "6" + params_dict["L4_DST_PORT"] = "8081" json_patch = [ { "op": "add", "path": "/ACL_RULE", "value": { - "SSH_ONLY|TEST_DROP": { - "L4_DST_PORT": "22", - "IP_PROTOCOL": "6", - "IP_TYPE": "IP", - "PACKET_ACTION": "DROP", - "PRIORITY": "9998", - "SRC_IP": "9.9.9.9/32" + "{}|TEST_DROP".format(params_dict["table"]): { + "IP_PROTOCOL": "{}".format(params_dict["IP_PROTOCOL"]), + "L4_DST_PORT": "{}".format(params_dict["L4_DST_PORT"]), + "IP_TYPE": "IP", + "PACKET_ACTION": "DROP", + "PRIORITY": "9998", + "SRC_IP": "9.9.9.9/32" } } } ] - tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - - expected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 22 -j DROP"] + if protocol == 'SSH': + expected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 22 -j DROP"] + if protocol == 'NTP': + expected_content_list = ["-A INPUT -s 9.9.9.9/32 -p udp -m udp --dport 123 -j DROP"] + elif protocol == 'SNMP': + expected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 161 -j DROP", + "-A INPUT -s 9.9.9.9/32 -p udp -m udp --dport 161 -j DROP"] + elif protocol == 'EXTERNAL_CLIENT': + expected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 8081 -j DROP"] expect_res_success_acl_rule(duthost, expected_content_list, []) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc2_add_duplicate_rule(duthost): +def cacl_tc2_add_duplicate_rule(duthost, protocol): """ Add duplicate acl rule for test """ + params_dict = {} + + if protocol == 'SSH': + params_dict["table"] = "SSH_ONLY" + params_dict["IP_PROTOCOL"] = "6" + params_dict["L4_DST_PORT"] = "22" + elif protocol == 'SNMP': + params_dict["table"] = "SNMP_ACL" + params_dict["IP_PROTOCOL"] = "17" + params_dict["L4_DST_PORT"] = "161" + elif protocol == 'NTP': + params_dict["table"] = "NTP_ACL" + params_dict["IP_PROTOCOL"] = "6" + params_dict["L4_DST_PORT"] = "123" + elif protocol == 'EXTERNAL_CLIENT': + params_dict["table"] = "EXTERNAL_CLIENT_ACL" + params_dict["IP_PROTOCOL"] = "6" + params_dict["L4_DST_PORT"] = "8081" json_patch = [ { "op": "add", "path": "/ACL_RULE", "value": { - "SSH_ONLY|TEST_DROP": { - "L4_DST_PORT": "22", - "IP_PROTOCOL": "6", - "IP_TYPE": "IP", - "PACKET_ACTION": "DROP", - "PRIORITY": "9998", - "SRC_IP": "9.9.9.9/32" + "{}|TEST_DROP".format(params_dict["table"]): { + "IP_PROTOCOL": "{}".format(params_dict["IP_PROTOCOL"]), + "L4_DST_PORT": "{}".format(params_dict["L4_DST_PORT"]), + "IP_TYPE": "IP", + "PACKET_ACTION": "DROP", + "PRIORITY": "9998", + "SRC_IP": "9.9.9.9/32" } } } @@ -402,7 +467,7 @@ def cacl_tc2_add_duplicate_rule(duthost): delete_tmpfile(duthost, tmpfile) -def cacl_tc2_replace_rule(duthost): +def cacl_tc2_replace_rule(duthost, protocol): """ Replace a value from acl rule test Check 'ip tables' to make sure rule is actually being applied @@ -415,23 +480,41 @@ def cacl_tc2_replace_rule(duthost): L4_DST_PORT: 22 SRC_IP: 8.8.8.8/32 """ + if protocol == 'SSH': + table = 'SSH_ONLY' + elif protocol == 'SNMP': + table = 'SNMP_ACL' + elif protocol == 'NTP': + table = 'NTP_ACL' + elif protocol == 'EXTERNAL_CLIENT': + table = 'EXTERNAL_CLIENT_ACL' json_patch = [ { "op": "replace", - "path": "/ACL_RULE/SSH_ONLY|TEST_DROP/SRC_IP", + "path": "/ACL_RULE/{}|TEST_DROP/SRC_IP".format(table), "value": "8.8.8.8/32" } ] - tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - - expected_content_list = ["-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 22 -j DROP"] - unexpected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 22 -j DROP"] + if protocol == 'SSH': + expected_content_list = ["-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 22 -j DROP"] + unexpected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 22 -j DROP"] + if protocol == 'NTP': + expected_content_list = ["-A INPUT -s 8.8.8.8/32 -p udp -m udp --dport 123 -j DROP"] + unexpected_content_list = ["-A INPUT -s 9.9.9.9/32 -p udp -m udp --dport 123 -j DROP"] + elif protocol == 'SNMP': + expected_content_list = ["-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 161 -j DROP", + "-A INPUT -s 8.8.8.8/32 -p udp -m udp --dport 161 -j DROP"] + unexpected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 161 -j DROP", + "-A INPUT -s 9.9.9.9/32 -p udp -m udp --dport 161 -j DROP"] + elif protocol == 'EXTERNAL_CLIENT': + expected_content_list = ["-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 8081 -j DROP"] + unexpected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 8081 -j DROP"] expect_res_success_acl_rule(duthost, expected_content_list, unexpected_content_list) finally: delete_tmpfile(duthost, tmpfile) @@ -465,13 +548,21 @@ def cacl_tc2_add_rule_to_unexisted_table(duthost): delete_tmpfile(duthost, tmpfile) -def cacl_tc2_remove_table_before_rule(duthost): +def cacl_tc2_remove_table_before_rule(duthost, protocol): """ Remove acl table before removing acl rule """ + if protocol == 'SSH': + table = 'SSH_ONLY' + elif protocol == 'SNMP': + table = 'SNMP_ACL' + elif protocol == 'NTP': + table = 'NTP_ACL' + elif protocol == 'EXTERNAL_CLIENT': + table = 'EXTERNAL_CLIENT_ACL' json_patch = [ { "op": "remove", - "path": "/ACL_TABLE/SSH_ONLY" + "path": "/ACL_TABLE/{}".format(table) } ] @@ -485,13 +576,21 @@ def cacl_tc2_remove_table_before_rule(duthost): delete_tmpfile(duthost, tmpfile) -def cacl_tc2_remove_unexist_rule(duthost): +def cacl_tc2_remove_unexist_rule(duthost, protocol): """ Remove unexisted acl rule """ + if protocol == 'SSH': + table = 'SSH_ONLY' + elif protocol == 'SNMP': + table = 'SNMP_ACL' + elif protocol == 'NTP': + table = 'NTP_ACL' + elif protocol == 'EXTERNAL_CLIENT': + table = 'EXTERNAL_CLIENT_ACL' json_patch = [ { "op": "remove", - "path": "/ACL_RULE/SSH_ONLY|TEST_DROP2" + "path": "/ACL_RULE/{}|TEST_DROP2".format(table) } ] tmpfile = generate_tmpfile(duthost) @@ -520,18 +619,80 @@ def cacl_tc2_remove_rule(duthost): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - unexpected_content_list = ["-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 22 -j DROP"] + unexpected_content_list = ["-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 22 -j DROP", + "-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 161 -j DROP", + "-A INPUT -s 8.8.8.8/32 -p udp -m udp --dport 161 -j DROP", + "-A INPUT -s 8.8.8.8/32 -p tcp -m udp --dport 123 -j DROP", + "-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 8081 -j DROP"] expect_res_success_acl_rule(duthost, [], unexpected_content_list) finally: delete_tmpfile(duthost, tmpfile) +def cacl_external_client_add_new_table(duthost): + """ Add acl table for test + Sample output + admin@vlab-01:~$ show acl table + Name Type Binding Description Stage Status + ---------------------- --------- --------------- ---------------------------- ------- -------- + EXTERNAL_CLIENT_ACL CTRLPLANE EXTERNAL_CLIENT EXTERNAL_CLIENT_ACL ingress Active + """ + json_patch = [ + { + "op": "add", + "path": "/ACL_TABLE/EXTERNAL_CLIENT_ACL", + "value": { + "policy_desc": "EXTERNAL_CLIENT_ACL", + "services": [ + "EXTERNAL_CLIENT" + ], + "stage": "ingress", + "type": "CTRLPLANE" + } + } + ] + + tmpfile = generate_tmpfile(duthost) + logger.info("tmpfile {}".format(tmpfile)) + + try: + output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) + expect_op_success(duthost, output) + + expected_content_list = ["EXTERNAL_CLIENT_ACL", "CTRLPLANE", "EXTERNAL_CLIENT", + "EXTERNAL_CLIENT_ACL", "ingress"] + expect_acl_table_match(duthost, "EXTERNAL_CLIENT_ACL", expected_content_list) + finally: + delete_tmpfile(duthost, tmpfile) + + +@pytest.fixture(scope="module", params=["SSH", "NTP", "SNMP", "EXTERNAL_CLIENT"]) +def cacl_protocol(request): # noqa F811 + """ + Return the protocol to be tested + """ + return request.param + + +def test_cacl_tc1_acl_table_suite(cacl_protocol, rand_selected_dut): + logger.info("Test acl table for protocol {}".format(cacl_protocol)) + cacl_tc1_add_new_table(rand_selected_dut, cacl_protocol) + cacl_tc1_add_duplicate_table(rand_selected_dut, cacl_protocol) + cacl_tc1_replace_table_variable(rand_selected_dut, cacl_protocol) + cacl_tc1_add_invalid_table(rand_selected_dut, cacl_protocol) + cacl_tc1_remove_unexisted_table(rand_selected_dut) + cacl_tc1_remove_table(rand_selected_dut, cacl_protocol) + + # ACL_RULE tests are related. So group them into one test. -def test_cacl_tc2_acl_rule_test(rand_selected_dut): - cacl_tc2_add_init_rule(rand_selected_dut) - cacl_tc2_add_duplicate_rule(rand_selected_dut) - cacl_tc2_replace_rule(rand_selected_dut) +def test_cacl_tc2_acl_rule_test(cacl_protocol, rand_selected_dut): + logger.info("Test acl table for protocol {}".format(cacl_protocol)) + if cacl_protocol == 'EXTERNAL_CLIENT': + cacl_external_client_add_new_table(rand_selected_dut) + cacl_tc2_add_init_rule(rand_selected_dut, cacl_protocol) + cacl_tc2_add_duplicate_rule(rand_selected_dut, cacl_protocol) + cacl_tc2_replace_rule(rand_selected_dut, cacl_protocol) cacl_tc2_add_rule_to_unexisted_table(rand_selected_dut) - cacl_tc2_remove_table_before_rule(rand_selected_dut) - cacl_tc2_remove_unexist_rule(rand_selected_dut) + cacl_tc2_remove_table_before_rule(rand_selected_dut, cacl_protocol) + cacl_tc2_remove_unexist_rule(rand_selected_dut, cacl_protocol) cacl_tc2_remove_rule(rand_selected_dut) diff --git a/tests/generic_config_updater/test_pfcwd_status.py b/tests/generic_config_updater/test_pfcwd_status.py index 7af82555a92..52ba9a661e6 100644 --- a/tests/generic_config_updater/test_pfcwd_status.py +++ b/tests/generic_config_updater/test_pfcwd_status.py @@ -13,8 +13,7 @@ from tests.generic_config_updater.gu_utils import is_valid_platform_and_version pytestmark = [ - pytest.mark.topology('any'), - pytest.mark.device_type('physical') + pytest.mark.topology('any') ] logger = logging.getLogger(__name__) @@ -37,6 +36,12 @@ def ignore_expected_loganalyzer_exceptions(duthosts, loganalyzer): '.*ERR syncd#syncd:.*SAI_API_SWITCH:sai_bulk_object_get_stats.* ', ] ) + if duthost.facts["asic_type"] == "vs": + loganalyzer[duthost.hostname].ignore_regex.extend( + [ + '.*ERR syncd#syncd: :- queryStatsCapability: failed to find switch oid:.* in switch state map' + ] + ) return diff --git a/tests/ip/test_ip_packet.py b/tests/ip/test_ip_packet.py index 56b840a62d1..88c413d37dd 100644 --- a/tests/ip/test_ip_packet.py +++ b/tests/ip/test_ip_packet.py @@ -11,6 +11,9 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.portstat_utilities import parse_column_positions from tests.common.portstat_utilities import parse_portstat +# from tests.common.fixtures.ptfhost_utils import skip_traffic_test # noqa F401 +# Temporary work around to add skip_traffic_test fixture from duthost_utils +from tests.common.fixtures.duthost_utils import skip_traffic_test # noqa: F401 from tests.drop_packets.drop_packets import is_mellanox_fanout @@ -186,7 +189,7 @@ def common_param(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbin .format(prefix, selected_peer_ip_ifaces_pairs[1][0]), ptf_port_idx_namespace)) def test_forward_ip_packet_with_0x0000_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param): + ptfadapter, common_param, skip_traffic_test): # noqa F811 # GIVEN a ip packet with checksum 0x0000(compute from scratch) # WHEN send the packet to DUT # THEN DUT should forward it as normal ip packet @@ -242,6 +245,8 @@ def test_forward_ip_packet_with_0x0000_chksum(self, duthosts, enum_rand_one_per_ tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 + if skip_traffic_test is True: + return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) pytest_assert(tx_ok >= self.PKT_NUM_MIN, @@ -255,7 +260,7 @@ def test_forward_ip_packet_with_0x0000_chksum(self, duthosts, enum_rand_one_per_ .format(tx_ok, match_cnt)) def test_forward_ip_packet_with_0xffff_chksum_tolerant(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param): + ptfadapter, common_param, skip_traffic_test): # noqa F811 # GIVEN a ip packet with checksum 0x0000(compute from scratch) # WHEN manually set checksum as 0xffff and send the packet to DUT # THEN DUT should tolerant packet with 0xffff, forward it as normal packet @@ -311,6 +316,8 @@ def test_forward_ip_packet_with_0xffff_chksum_tolerant(self, duthosts, enum_rand tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 + if skip_traffic_test is True: + return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) pytest_assert(tx_ok >= self.PKT_NUM_MIN, @@ -325,7 +332,7 @@ def test_forward_ip_packet_with_0xffff_chksum_tolerant(self, duthosts, enum_rand def test_forward_ip_packet_with_0xffff_chksum_drop(self, duthosts, localhost, enum_rand_one_per_hwsku_frontend_hostname, ptfadapter, - common_param, tbinfo): + common_param, tbinfo, skip_traffic_test): # noqa F811 # GIVEN a ip packet with checksum 0x0000(compute from scratch) # WHEN manually set checksum as 0xffff and send the packet to DUT @@ -391,6 +398,8 @@ def test_forward_ip_packet_with_0xffff_chksum_drop(self, duthosts, localhost, logger.info("Setting PKT_NUM_ZERO for t2 max topology with 0.2 tolerance") self.PKT_NUM_ZERO = self.PKT_NUM * 0.2 + if skip_traffic_test is True: + return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) pytest_assert(max(rx_drp, rx_err) >= self.PKT_NUM_MIN, @@ -404,7 +413,7 @@ def test_forward_ip_packet_with_0xffff_chksum_drop(self, duthosts, localhost, .format(match_cnt)) def test_forward_ip_packet_recomputed_0xffff_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param): + ptfadapter, common_param, skip_traffic_test): # noqa F811 # GIVEN a ip packet, after forwarded(ttl-1) by DUT, # it's checksum will be 0xffff after wrongly incrementally recomputed # ref to https://datatracker.ietf.org/doc/html/rfc1624 @@ -462,6 +471,8 @@ def test_forward_ip_packet_recomputed_0xffff_chksum(self, duthosts, enum_rand_on tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 + if skip_traffic_test is True: + return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) pytest_assert(tx_ok >= self.PKT_NUM_MIN, @@ -475,7 +486,7 @@ def test_forward_ip_packet_recomputed_0xffff_chksum(self, duthosts, enum_rand_on .format(tx_ok, match_cnt)) def test_forward_ip_packet_recomputed_0x0000_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param): + ptfadapter, common_param, skip_traffic_test): # noqa F811 # GIVEN a ip packet, after forwarded(ttl-1) by DUT, it's checksum will be 0x0000 after recompute from scratch # WHEN send the packet to DUT # THEN DUT recompute new checksum as 0x0000 and forward packet as expected. @@ -530,6 +541,8 @@ def test_forward_ip_packet_recomputed_0x0000_chksum(self, duthosts, enum_rand_on tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 + if skip_traffic_test is True: + return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) pytest_assert(tx_ok >= self.PKT_NUM_MIN, @@ -543,7 +556,7 @@ def test_forward_ip_packet_recomputed_0x0000_chksum(self, duthosts, enum_rand_on .format(tx_ok, match_cnt)) def test_forward_normal_ip_packet(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param): + ptfadapter, common_param, skip_traffic_test): # noqa F811 # GIVEN a random normal ip packet # WHEN send the packet to DUT # THEN DUT should forward it as normal ip packet, nothing change but ttl-1 @@ -591,6 +604,8 @@ def test_forward_normal_ip_packet(self, duthosts, enum_rand_one_per_hwsku_fronte tx_drp = TestIPPacket.sum_ifaces_counts(portstat_out, out_ifaces, "tx_drp") tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 + if skip_traffic_test is True: + return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) pytest_assert(tx_ok >= self.PKT_NUM_MIN, @@ -604,7 +619,7 @@ def test_forward_normal_ip_packet(self, duthosts, enum_rand_one_per_hwsku_fronte .format(tx_ok, match_cnt)) def test_drop_ip_packet_with_wrong_0xffff_chksum(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - ptfadapter, common_param): + ptfadapter, common_param, skip_traffic_test): # noqa F811 # GIVEN a random normal ip packet, and manually modify checksum to 0xffff # WHEN send the packet to DUT # THEN DUT should drop it and add drop count @@ -645,6 +660,8 @@ def test_drop_ip_packet_with_wrong_0xffff_chksum(self, duthosts, enum_rand_one_p tx_err = TestIPPacket.sum_ifaces_counts(rif_counter_out, out_rif_ifaces, "tx_err") if rif_support else 0 asic_type = duthost.facts['asic_type'] + if skip_traffic_test is True: + return pytest_assert(rx_ok >= self.PKT_NUM_MIN, "Received {} packets in rx, not in expected range".format(rx_ok)) pytest_assert(max(rx_drp, rx_err) >= self.PKT_NUM_MIN if asic_type not in ["marvell"] else True, diff --git a/tests/ipfwd/test_dir_bcast.py b/tests/ipfwd/test_dir_bcast.py index 1c462271bb5..0e07ce18111 100644 --- a/tests/ipfwd/test_dir_bcast.py +++ b/tests/ipfwd/test_dir_bcast.py @@ -2,7 +2,7 @@ import json import logging -from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 +from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory, skip_traffic_test # noqa F401 from tests.ptf_runner import ptf_runner from datetime import datetime from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # noqa F401 @@ -65,7 +65,7 @@ def ptf_test_port_map(duthost, ptfhost, mg_facts, testbed_type, tbinfo): def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor_m, skip_traffic_test): # noqa F811 duthost = duthosts[rand_one_dut_hostname] testbed_type = tbinfo['topo']['name'] @@ -81,6 +81,8 @@ def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, 'ptf_test_port_map': PTF_TEST_PORT_MAP } log_file = "/tmp/dir_bcast.BcastTest.{}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")) + if skip_traffic_test is True: + return ptf_runner( ptfhost, 'ptftests', diff --git a/tests/ixia/ecn/test_red_accuracy.py b/tests/ixia/ecn/test_red_accuracy.py index 670772ce717..68b80b58b05 100644 --- a/tests/ixia/ecn/test_red_accuracy.py +++ b/tests/ixia/ecn/test_red_accuracy.py @@ -12,6 +12,7 @@ pytestmark = [pytest.mark.topology('tgen')] + def test_red_accuracy(request, ixia_api, ixia_testbed_config, conn_graph_facts, # noqa F811 fanout_graph_facts, duthosts, localhost, # noqa F811 rand_one_dut_hostname, rand_one_dut_portname_oper_up, diff --git a/tests/ixia/pfc/test_global_pause.py b/tests/ixia/pfc/test_global_pause.py index b9e9503017a..e5c79954d11 100644 --- a/tests/ixia/pfc/test_global_pause.py +++ b/tests/ixia/pfc/test_global_pause.py @@ -11,6 +11,7 @@ pytestmark = [pytest.mark.topology('tgen')] + def test_global_pause(ixia_api, ixia_testbed_config, conn_graph_facts, fanout_graph_facts, # noqa F811 duthosts, rand_one_dut_hostname, rand_one_dut_portname_oper_up, lossless_prio_list, lossy_prio_list, prio_dscp_map): # noqa F811 diff --git a/tests/pc/test_retry_count.py b/tests/pc/test_retry_count.py index 60404ec1faf..77c5925c3fa 100644 --- a/tests/pc/test_retry_count.py +++ b/tests/pc/test_retry_count.py @@ -291,7 +291,8 @@ def test_kill_teamd_lag_up(self, duthost, nbrhosts, higher_retry_count_on_peers, pytest_assert(not status["runner"]["selected"], "lag member is still up") -def test_peer_retry_count_disabled(duthost, nbrhosts, higher_retry_count_on_peers, disable_retry_count_on_peer): +def test_peer_retry_count_disabled(duthost, nbrhosts, higher_retry_count_on_peers, disable_retry_count_on_peer, + collect_techsupport_all_nbrs): """ Test that peers reset the retry count to 3 when the feature is disabled """ @@ -364,7 +365,8 @@ def test_kill_teamd_peer_lag_up(self, duthost, nbrhosts, higher_retry_count_on_p pytest_assert(not status["runner"]["selected"], "lag member is still up") -def test_dut_retry_count_disabled(duthost, nbrhosts, higher_retry_count_on_dut, disable_retry_count_on_dut): +def test_dut_retry_count_disabled(duthost, nbrhosts, higher_retry_count_on_dut, disable_retry_count_on_dut, + collect_techsupport_all_nbrs): """ Test that DUT resets the retry count to 3 when the feature is disabled """ diff --git a/tests/platform_tests/test_kdump.py b/tests/platform_tests/test_kdump.py index 34639446316..dc0b8291c56 100644 --- a/tests/platform_tests/test_kdump.py +++ b/tests/platform_tests/test_kdump.py @@ -64,10 +64,8 @@ def test_kernel_panic(self, duthosts, enum_rand_one_per_hwsku_hostname, localhos if "Enabled" not in out["stdout"]: pytest.skip('DUT {}: Skip test since kdump is not enabled'.format(hostname)) - reboot(duthost, localhost, reboot_type=REBOOT_TYPE_KERNEL_PANIC) + reboot(duthost, localhost, reboot_type=REBOOT_TYPE_KERNEL_PANIC, safe_reboot=True) - # Wait until all critical processes are healthy. - wait_critical_processes(duthost) check_interfaces_and_services(duthost, conn_graph_facts["device_conn"][hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_KERNEL_PANIC) self.wait_lc_healthy_if_sup(duthost, duthosts, localhost, conn_graph_facts, xcvr_skip_list) diff --git a/tests/platform_tests/test_link_down.py b/tests/platform_tests/test_link_down.py index dca759ee7f3..b4e34138e65 100644 --- a/tests/platform_tests/test_link_down.py +++ b/tests/platform_tests/test_link_down.py @@ -19,7 +19,7 @@ logger = logging.getLogger(__name__) pytestmark = [ - pytest.mark.topology('t2'), + pytest.mark.topology('t0', 't1', 't2'), pytest.mark.disable_loganalyzer, ] diff --git a/tests/qos/test_qos_dscp_mapping.py b/tests/qos/test_qos_dscp_mapping.py index c4c9f40c684..44638aa6b57 100644 --- a/tests/qos/test_qos_dscp_mapping.py +++ b/tests/qos/test_qos_dscp_mapping.py @@ -15,7 +15,7 @@ from tests.common.helpers.ptf_tests_helper import downstream_links, upstream_links, select_random_link,\ get_stream_ptf_ports, get_dut_pair_port_from_ptf_port, apply_dscp_cfg_setup, apply_dscp_cfg_teardown # noqa F401 from tests.common.utilities import get_ipv4_loopback_ip, get_dscp_to_queue_value, find_egress_queue,\ - get_egress_queue_pkt_count_all_prio + get_egress_queue_pkt_count_all_prio, wait_until from tests.common.helpers.assertions import pytest_assert from tests.common.fixtures.duthost_utils import dut_qos_maps_module # noqa F401 @@ -127,8 +127,6 @@ def send_and_verify_traffic(ptfadapter, logger.info("Received packet(s) on port {}".format(ptf_dst_port_ids[port_index])) global packet_egressed_success packet_egressed_success = True - # Wait for packets to be processed by the DUT - time.sleep(8) return ptf_dst_port_ids[port_index] except AssertionError as detail: @@ -280,15 +278,10 @@ def _run_test(self, if packet_egressed_success: dut_egress_port = get_dut_pair_port_from_ptf_port(duthost, tbinfo, dst_ptf_port_id) pytest_assert(dut_egress_port, "No egress port on DUT found for ptf port {}".format(dst_ptf_port_id)) + # Wait for the queue counters to be populated. + verification_success = wait_until(60, 2, 0, lambda: find_queue_count_and_value(duthost, + queue_val, dut_egress_port)[0] >= DEFAULT_PKT_COUNT) egress_queue_count, egress_queue_val = find_queue_count_and_value(duthost, queue_val, dut_egress_port) - # Re-poll DUT if queue value could not be accurately found - if egress_queue_val == -1: - time.sleep(2) - egress_queue_count, egress_queue_val = find_queue_count_and_value(duthost, queue_val, - dut_egress_port) - # Due to protocol packets, egress_queue_count can be greater than expected count. - verification_success = egress_queue_count >= DEFAULT_PKT_COUNT - if verification_success: logger.info("SUCCESS: Received expected number of packets on queue {}".format(queue_val)) output_table.append([rotating_dscp, queue_val, egress_queue_count, "SUCCESS", queue_val]) diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 6506bf5039e..efd8f9c9892 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -1231,11 +1231,15 @@ def testQosSaiLossyQueueVoq( if not get_src_dst_asic_and_duts['single_asic_test']: pytest.skip("Lossy Queue Voq test is only supported on cisco-8000 single-asic") if "lossy_queue_voq_1" in LossyVoq: - if 'modular_chassis' in get_src_dst_asic_and_duts['src_dut'].facts and\ - get_src_dst_asic_and_duts['src_dut'].facts["modular_chassis"] == "True": - pytest.skip("LossyQueueVoq: This test is skipped since cisco-8000 T2 " - "doesn't support split-voq.") + if ('modular_chassis' in get_src_dst_asic_and_duts['src_dut'].facts and + get_src_dst_asic_and_duts['src_dut'].facts["modular_chassis"] == "True"): + if get_src_dst_asic_and_duts['src_dut'].facts['platform'] != 'x86_64-88_lc0_36fh-r0': + pytest.skip("LossyQueueVoq: This test is skipped since cisco-8000 T2 " + "doesn't support split-voq.") elif "lossy_queue_voq_2" in LossyVoq: + if get_src_dst_asic_and_duts['src_dut'].facts['platform'] == 'x86_64-88_lc0_36fh-r0': + pytest.skip("LossyQueueVoq: lossy_queue_voq_2 test is not applicable " + "for x86_64-88_lc0_36fh-r0, with split-voq.") if not ('modular_chassis' in get_src_dst_asic_and_duts['src_dut'].facts and get_src_dst_asic_and_duts['src_dut'].facts["modular_chassis"] == "True"): pytest.skip("LossyQueueVoq: lossy_queue_voq_2 test is not applicable " diff --git a/tests/telemetry/conftest.py b/tests/telemetry/conftest.py index 3bb360d5c3b..0574c09f242 100644 --- a/tests/telemetry/conftest.py +++ b/tests/telemetry/conftest.py @@ -175,6 +175,10 @@ def test_eventd_healthy(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, set module = __import__("eventd_events") + duthost.shell("systemctl restart eventd") + + py_assert(wait_until(100, 10, 0, duthost.is_service_fully_started, "eventd"), "eventd not started.") + module.test_event(duthost, gnxi_path, ptfhost, DATA_DIR, None) logger.info("Completed test file: {}".format("eventd_events test completed.")) diff --git a/tests/telemetry/events/event_utils.py b/tests/telemetry/events/event_utils.py index 10a33165741..2299ca52d82 100644 --- a/tests/telemetry/events/event_utils.py +++ b/tests/telemetry/events/event_utils.py @@ -12,6 +12,21 @@ PUBLISHED = 1 +def add_test_watchdog_timeout_service(duthost): + logger.info("Adding mock watchdog.service to systemd") + duthost.copy(src="telemetry/events/events_data/test-watchdog-timeout.service", dest="/etc/systemd/system/") + duthost.shell("systemctl daemon-reload") + duthost.shell("systemctl start test-watchdog-timeout.service") + + +def delete_test_watchdog_timeout_service(duthost): + logger.info("Deleting mock test-watchdog-timeout.service") + duthost.shell("systemctl stop test-watchdog-timeout.service", module_ignore_errors=True) + duthost.shell("rm /etc/systemd/system/test-watchdog-timeout.service", module_ignore_errors=True) + duthost.shell("systemctl daemon-reload") + duthost.shell("systemctl reset-failed") + + def backup_monit_config(duthost): logger.info("Backing up monit config files") duthost.shell("cp -f /etc/monit/monitrc ~/") diff --git a/tests/telemetry/events/events_data/test-watchdog-timeout.service b/tests/telemetry/events/events_data/test-watchdog-timeout.service new file mode 100644 index 00000000000..1e9263cae01 --- /dev/null +++ b/tests/telemetry/events/events_data/test-watchdog-timeout.service @@ -0,0 +1,13 @@ +[Unit] +Description=Test Watchdog Timeout + +[Service] +Type=simple +# This service will sleep for 2 minutes therefore not sending the signal to watchdog within the specified 1 min requirement +# Since SIGABRT will be sent after watchdog times out due to no signal, it will crash the bash process and dump core +# Added logic to trap the SIGABRT so that no core dump is dropped. +ExecStart=/bin/bash -c 'trap "" SIGABRT; sleep 120' +WatchdogSec=60s + +[Install] +WantedBy=multi-user.target diff --git a/tests/telemetry/events/host_events.py b/tests/telemetry/events/host_events.py index 83703fbc8d5..f546e567160 100644 --- a/tests/telemetry/events/host_events.py +++ b/tests/telemetry/events/host_events.py @@ -4,8 +4,10 @@ import time from run_events_test import run_test from event_utils import backup_monit_config, customize_monit_config, restore_monit_config +from event_utils import add_test_watchdog_timeout_service, delete_test_watchdog_timeout_service from telemetry_utils import trigger_logger from tests.common.helpers.dut_utils import is_container_running +from tests.common.utilities import wait_until logger = logging.getLogger(__name__) tag = "sonic-events-host" @@ -15,6 +17,9 @@ def test_event(duthost, gnxi_path, ptfhost, data_dir, validate_yang): logger.info("Beginning to test host events") run_test(duthost, gnxi_path, ptfhost, data_dir, validate_yang, trigger_kernel_event, "event_kernel.json", "sonic-events-host:event-kernel", tag, False) + run_test(duthost, gnxi_path, ptfhost, data_dir, validate_yang, kill_critical_process, + "process_exited_unexpectedly.json", "sonic-events-host:process-exited-unexpectedly", + tag, False) backup_monit_config(duthost) customize_monit_config( duthost, @@ -38,6 +43,14 @@ def test_event(duthost, gnxi_path, ptfhost, data_dir, validate_yang): "event_down_ctr.json", "sonic-events-host:event-down-ctr", tag, False) finally: restore_monit_config(duthost) + add_test_watchdog_timeout_service(duthost) + try: + # We need to alot flat 60 seconds for watchdog timeout to fire since the timer is set to 60\ + # With a base limit of 30 seconds, we will use 90 seconds + run_test(duthost, gnxi_path, ptfhost, data_dir, validate_yang, None, + "watchdog_timeout.json", "sonic-events-host:watchdog-timeout", tag, False, 90) + finally: + delete_test_watchdog_timeout_service(duthost) def trigger_mem_threshold_exceeded_alert(duthost): @@ -55,26 +68,41 @@ def trigger_kernel_event(duthost): trigger_logger(duthost, "zlib decompression failed, data probably corrupt", "kernel") +def is_container_down(duthost, container): + return not is_container_running(duthost, container) + + def get_running_container(duthost): logger.info("Check if acms or snmp container is running") - container = "acms" - container_running = is_container_running(duthost, container) - if not container_running: - container = "snmp" + if is_container_running(duthost, "acms"): + return "acms" + elif is_container_running(duthost, "snmp"): + return "snmp" else: - return container - container_running = is_container_running(duthost, container) - if not container_running: return "" - return container + + +def get_critical_process(duthost): + logger.info("Check if snmpd/bgpd process is running") + if is_container_running(duthost, "snmp"): + pid = duthost.shell("docker exec snmp pgrep -f sonic_ax_impl")["stdout"] + if pid != "": + return pid, "snmp" + if is_container_running(duthost, "bpg"): + pid = duthost.shell("docker exec bgp pgrep -f bpgd")["stdout"] + if pid != "": + return pid, "bgpd" + return "", "" def restart_container(duthost): logger.info("Stopping container for event stopped event") container = get_running_container(duthost) assert container != "", "No available container for testing" - + duthost.shell("systemctl reset-failed {}".format(container)) duthost.shell("systemctl restart {}".format(container)) + is_container_running = wait_until(100, 10, 0, duthost.is_service_fully_started, container) + assert is_container_running, "{} not running after restart".format(container) def mask_container(duthost): @@ -89,3 +117,26 @@ def mask_container(duthost): duthost.shell("systemctl unmask {}".format(container)) duthost.shell("systemctl restart {}".format(container)) + + +def kill_critical_process(duthost): + logger.info("Killing critical process for exited unexpectedly event") + pid, container = get_critical_process(duthost) + assert pid != "", "No available process for testing" + + change_autorestart = False + autorestart = duthost.shell("show feature autorestart {}".format(container))['stdout_lines'] + if "disabled" in str(autorestart): + change_autorestart = True + duthost.shell("config feature autorestart {} enabled".format(container)) + + duthost.shell("docker exec {} kill -9 {}".format(container, pid), module_ignore_errors=True) + + # Wait until specified container is not running because of critical process exit + wait_until(30, 5, 0, is_container_down, duthost, container) + + if change_autorestart: + duthost.shell("config feature autorestart {} disabled".format(container)) + + duthost.shell("systemctl reset-failed {}".format(container), module_ignore_errors=True) + wait_until(100, 10, 0, duthost.is_service_fully_started, container) diff --git a/tests/telemetry/telemetry_utils.py b/tests/telemetry/telemetry_utils.py index 3f10cb5bed5..efb3b23493b 100644 --- a/tests/telemetry/telemetry_utils.py +++ b/tests/telemetry/telemetry_utils.py @@ -154,3 +154,58 @@ def generate_client_cli(duthost, gnxi_path, method=METHOD_GET, xpath="COUNTERS/E if filter_event_regex != "": cmd += " --filter_event_regex {}".format(filter_event_regex) return cmd + + +def unarchive_telemetry_certs(duthost): + # Move all files within old_certs directory to parent certs directory + path = "/etc/sonic/telemetry/" + archive_dir = path + "old_certs" + cmd = "ls {}".format(archive_dir) + filenames = duthost.shell(cmd)['stdout_lines'] + for filename in filenames: + cmd = "mv {}/{} {}".format(archive_dir, filename, path) + duthost.shell(cmd) + cmd = "rm -rf {}".format(archive_dir) + + +def archive_telemetry_certs(duthost): + # Move all files within certs directory to old_certs directory + path = "/etc/sonic/telemetry/" + archive_dir = path + "old_certs" + cmd = "mkdir -p {}".format(archive_dir) + duthost.shell(cmd) + cmd = "ls {}".format(path) + filenames = duthost.shell(cmd)['stdout_lines'] + for filename in filenames: + if filename.endswith(".cer") or filename.endswith(".key"): + cmd = "mv {} {}".format(path + filename, archive_dir) + duthost.shell(cmd) + + +def rotate_telemetry_certs(duthost, localhost): + path = "/etc/sonic/telemetry/" + # Create new certs to rotate + cmd = "openssl req \ + -x509 \ + -sha256 \ + -nodes \ + -newkey rsa:2048 \ + -keyout streamingtelemetryserver.key \ + -subj '/CN=ndastreamingservertest' \ + -out streamingtelemetryserver.cer" + localhost.shell(cmd) + cmd = "openssl req \ + -x509 \ + -sha256 \ + -nodes \ + -newkey rsa:2048 \ + -keyout dsmsroot.key \ + -subj '/CN=ndastreamingclienttest' \ + -out dsmsroot.cer" + localhost.shell(cmd) + + # Rotate certs + duthost.copy(src="streamingtelemetryserver.cer", dest=path) + duthost.copy(src="streamingtelemetryserver.key", dest=path) + duthost.copy(src="dsmsroot.cer", dest=path) + duthost.copy(src="dsmsroot.key", dest=path) diff --git a/tests/telemetry/test_telemetry_cert_rotation.py b/tests/telemetry/test_telemetry_cert_rotation.py new file mode 100644 index 00000000000..8910a049e60 --- /dev/null +++ b/tests/telemetry/test_telemetry_cert_rotation.py @@ -0,0 +1,153 @@ +import logging +import pytest + +from tests.common.helpers.assertions import pytest_assert +from tests.common.utilities import wait_until, wait_tcp_connection +from tests.common.helpers.gnmi_utils import GNMIEnvironment +from telemetry_utils import generate_client_cli +from telemetry_utils import archive_telemetry_certs, unarchive_telemetry_certs, rotate_telemetry_certs + +pytestmark = [ + pytest.mark.topology('any') +] + +logger = logging.getLogger(__name__) + +METHOD_GET = "get" +SUBMODE_POLL = 2 + +""" + +Testing cert rotation by telemetry + +1. Test that telemetry will stay up without certs +2. Test that when we serve one successful request, delete certs, second request will not work +3. Test that when we have no certs, first request will fail, rotate certs, second request will work +4. Test that when we have certs, request will succeed, rotate certs, second request will also succeed + +""" + + +@pytest.mark.parametrize('setup_streaming_telemetry', [False], indirect=True) +def test_telemetry_not_exit(duthosts, rand_one_dut_hostname, setup_streaming_telemetry, localhost): + """ Test that telemetry server will not exit when certs are missing. We will shutdown telemetry, + remove certs and verify that telemetry is up and running. + """ + logger.info("Testing telemetry server will startup without certs") + + duthost = duthosts[rand_one_dut_hostname] + env = GNMIEnvironment(duthost, GNMIEnvironment.TELEMETRY_MODE) + + # Shutting down telemetry + duthost.service(name=env.gnmi_container, state="stopped") + + # Remove certs + archive_telemetry_certs(duthost) + + # Bring back telemetry + duthost.shell("systemctl reset-failed %s" % (env.gnmi_container), module_ignore_errors=True) + duthost.service(name=env.gnmi_container, state="restarted") + + # Wait until telemetry is active and running + pytest_assert(wait_until(100, 10, 0, duthost.is_service_fully_started, env.gnmi_container), + "%s not started." % (env.gnmi_container)) + + # Restore certs + unarchive_telemetry_certs(duthost) + + # Wait for telemetry server to listen on port + dut_ip = duthost.mgmt_ip + wait_tcp_connection(localhost, dut_ip, env.gnmi_port, timeout_s=60) + + +def test_telemetry_post_cert_del(duthosts, rand_one_dut_hostname, ptfhost, gnxi_path, localhost): + """ Test that telemetry server with certificates will accept requests. + When certs are deleted, subsequent requests will not work. + """ + logger.info("Testing telemetry server post cert add") + + duthost = duthosts[rand_one_dut_hostname] + env = GNMIEnvironment(duthost, GNMIEnvironment.TELEMETRY_MODE) + + # Initial request should pass with certs + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_GET, + target="OTHERS", xpath="proc/uptime") + ret = ptfhost.shell(cmd)['rc'] + assert ret == 0, "Telemetry server request should complete with certs" + + # Remove certs + archive_telemetry_certs(duthost) + + # Requests should fail without certs + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_GET, + target="OTHERS", xpath="proc/uptime") + ret = ptfhost.shell(cmd, module_ignore_errors=True)['rc'] + assert ret != 0, "Telemetry server request should fail without certs" + + # Restore certs + unarchive_telemetry_certs(duthost) + + # Wait for telemetry server to listen on port + dut_ip = duthost.mgmt_ip + wait_tcp_connection(localhost, dut_ip, env.gnmi_port, timeout_s=60) + + +def test_telemetry_post_cert_add(duthosts, rand_one_dut_hostname, ptfhost, gnxi_path, localhost): + """ Test that telemetry server with no certificates will reject requests. + When certs are rotated, subsequent requests will work. + """ + logger.info("Testing telemetry server post cert add") + + duthost = duthosts[rand_one_dut_hostname] + env = GNMIEnvironment(duthost, GNMIEnvironment.TELEMETRY_MODE) + + # Remove certs + archive_telemetry_certs(duthost) + + # Initial request should fail without certs + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_GET, + target="OTHERS", xpath="proc/uptime") + ret = ptfhost.shell(cmd, module_ignore_errors=True)['rc'] + assert ret != 0, "Telemetry server request should fail without certs" + + # Rotate certs + rotate_telemetry_certs(duthost, localhost) + + # Wait for telemetry server to listen on port + dut_ip = duthost.mgmt_ip + wait_tcp_connection(localhost, dut_ip, env.gnmi_port, timeout_s=60) + + # Requests should successfully complete with certs + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_GET, + target="OTHERS", xpath="proc/uptime") + ret = ptfhost.shell(cmd)['rc'] + assert ret == 0, "Telemetry server request should complete with certs" + + +def test_telemetry_cert_rotate(duthosts, rand_one_dut_hostname, ptfhost, gnxi_path, localhost): + """ Test that telemetry server with certs will serve requests. + When certs are rotated, subsequent requests will work. + """ + logger.info("Testing telemetry server cert rotate") + + duthost = duthosts[rand_one_dut_hostname] + env = GNMIEnvironment(duthost, GNMIEnvironment.TELEMETRY_MODE) + + # Initial request should complete with certs + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_GET, + target="OTHERS", xpath="proc/uptime") + ret = ptfhost.shell(cmd)['rc'] + assert ret == 0, "Telemetry server request should fail without certs" + + # Rotate certs + rotate_telemetry_certs(duthost, localhost) + + # Wait for telemetry server to listen on port + dut_ip = duthost.mgmt_ip + wait_tcp_connection(localhost, dut_ip, env.gnmi_port, timeout_s=60) + + # Requests should successfully complete with certs + cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_GET, + target="OTHERS", xpath="proc/uptime") + ret = ptfhost.shell(cmd)['rc'] + assert ret == 0, "Telemetry server request should complete with certs"