From a5091bba3901582601e800026d219aa872f1315f Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Tue, 9 May 2023 19:09:27 -0400 Subject: [PATCH 01/35] [sonic_sku_create] remove shell=True, replace exit() with sys.exit() (#2816) #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. `sys.exit` is better than `exit`, considered good to use in production code. Ref: https://stackoverflow.com/questions/6501121/difference-between-exit-and-sys-exit-in-python https://stackoverflow.com/questions/19747371/python-exit-commands-why-so-many-and-when-should-each-be-used #### How I did it `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) Replace `exit()` by `sys.exit()` #### How to verify it Add UT --- scripts/sonic_sku_create.py | 60 ++-- .../config_db_incorrect_platform.json | 12 + .../config_db_invalid_portname.json | 302 ++++++++++++++++++ tests/sku_create_test.py | 300 +++++++++++++++++ 4 files changed, 644 insertions(+), 30 deletions(-) create mode 100644 tests/sku_create_input/2700_files/config_db_incorrect_platform.json create mode 100644 tests/sku_create_input/2700_files/config_db_invalid_portname.json diff --git a/scripts/sonic_sku_create.py b/scripts/sonic_sku_create.py index e32af358f1..123bfd46cb 100755 --- a/scripts/sonic_sku_create.py +++ b/scripts/sonic_sku_create.py @@ -37,7 +37,7 @@ from tabulate import tabulate from lxml import etree as ET from lxml.etree import QName - +from sonic_py_common.general import check_output_pipe minigraph_ns = "Microsoft.Search.Autopilot.Evolution" minigraph_ns1 = "http://schemas.datacontract.org/2004/07/Microsoft.Search.Autopilot.Evolution" @@ -105,10 +105,10 @@ def sku_def_parser(self, sku_def): # Parsing XML sku definition file to extract Interface speed and InterfaceName(alias) |/<#> to be used to analyze split configuration # Rest of the fields are used as placeholders for portconfig_dict [name,lanes,SPEED,ALIAS,index] try: - f = open(str(sku_def),"r") + f = open(str(sku_def), "r") except IOError: print("Couldn't open file: " + str(sku_def), file=sys.stderr) - exit(1) + sys.exit(1) element = ET.parse(f) root = element.getroot() @@ -184,7 +184,7 @@ def check_json_lanes_with_bko(self, data, port_idx): int_port_speed = int(port_speed) else: print(port_str, "does not contain speed key, Exiting...", file=sys.stderr) - exit(1) + sys.exit(1) for i in range(1,self.base_lanes): curr_port_str = "Ethernet{:d}".format(port_idx+i) if curr_port_str in data['PORT']: @@ -193,20 +193,20 @@ def check_json_lanes_with_bko(self, data, port_idx): curr_speed = curr_port_dict.get("speed") else: print(curr_port_str, "does not contain speed key, Exiting...", file=sys.stderr) - exit(1) + sys.exit(1) if port_speed != curr_speed: print(curr_port_str, "speed is different from that of ",port_str,", Exiting...", file=sys.stderr) - exit(1) + sys.exit(1) if "alias" in curr_port_dict: curr_alias = curr_port_dict.get("alias") else: print(curr_port_str, "does not contain alias key, Exiting...", file=sys.stderr) - exit(1) + sys.exit(1) if "lanes" in curr_port_dict: curr_lanes = curr_port_dict.get("lanes") else: print(curr_port_str, "does not contain lanes key, Exiting...", file=sys.stderr) - exit(1) + sys.exit(1) port_bmp |= (1< Date: Tue, 9 May 2023 22:34:04 -0400 Subject: [PATCH 02/35] [show][mlnx] replace shell=True, replace xml (#2700) Signed-off-by: maipbui #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. #### How I did it `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) #### How to verify it Add UT Manual test ``` admin@***:~$ show platform mlnx issu ISSU is enabled admin@***:~$ show platform mlnx sniffer sdk sniffer is disabled ``` --- show/plugins/mlnx.py | 23 +++++++-------- tests/show_mlnx_test.py | 63 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 12 deletions(-) create mode 100644 tests/show_mlnx_test.py diff --git a/show/plugins/mlnx.py b/show/plugins/mlnx.py index cefe0cbd01..04d6a78b0a 100644 --- a/show/plugins/mlnx.py +++ b/show/plugins/mlnx.py @@ -26,7 +26,8 @@ import sys import subprocess import click - import xml.etree.ElementTree as ET + from shlex import join + from lxml import etree as ET from sonic_py_common import device_info except ImportError as e: raise ImportError("%s - required module not found" % str(e)) @@ -46,9 +47,9 @@ def run_command(command, display_cmd=False, ignore_error=False, print_to_console """Run bash command and print output to stdout """ if display_cmd == True: - click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) + click.echo(click.style("Running command: ", fg='cyan') + click.style(join(command), fg='green')) - proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) (out, err) = proc.communicate() if len(out) > 0 and print_to_console: @@ -70,9 +71,9 @@ def mlnx(): # get current status of sniffer from conf file def sniffer_status_get(env_variable_name): enabled = False - command = "docker exec {} bash -c 'touch {}'".format(CONTAINER_NAME, SNIFFER_CONF_FILE) + command = ["docker", "exec", CONTAINER_NAME, "bash", "-c", 'touch {}'.format(SNIFFER_CONF_FILE)] run_command(command) - command = 'docker cp {} {}'.format(SNIFFER_CONF_FILE_IN_CONTAINER, TMP_SNIFFER_CONF_FILE) + command = ['docker', 'cp', SNIFFER_CONF_FILE_IN_CONTAINER, TMP_SNIFFER_CONF_FILE] run_command(command) conf_file = open(TMP_SNIFFER_CONF_FILE, 'r') for env_variable_string in conf_file: @@ -80,7 +81,7 @@ def sniffer_status_get(env_variable_name): enabled = True break conf_file.close() - command = 'rm -rf {}'.format(TMP_SNIFFER_CONF_FILE) + command = ['rm', '-rf', TMP_SNIFFER_CONF_FILE] run_command(command) return enabled @@ -97,10 +98,8 @@ def is_issu_status_enabled(): # Get the SAI XML path from sai.profile sai_profile_path = '/{}/sai.profile'.format(HWSKU_PATH) - DOCKER_CAT_COMMAND = 'docker exec {container_name} cat {path}' - - command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_profile_path) - sai_profile_content, _ = run_command(command, print_to_console=False) + DOCKER_CAT_COMMAND = ['docker', 'exec', CONTAINER_NAME, 'cat', sai_profile_path] + sai_profile_content, _ = run_command(DOCKER_CAT_COMMAND, print_to_console=False) sai_profile_kvs = {} @@ -117,8 +116,8 @@ def is_issu_status_enabled(): sys.exit(1) # Get ISSU from SAI XML - command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_xml_path) - sai_xml_content, _ = run_command(command, print_to_console=False) + DOCKER_CAT_COMMAND = ['docker', 'exec', CONTAINER_NAME, 'cat', sai_xml_path] + sai_xml_content, _ = run_command(DOCKER_CAT_COMMAND, print_to_console=False) try: root = ET.fromstring(sai_xml_content) diff --git a/tests/show_mlnx_test.py b/tests/show_mlnx_test.py new file mode 100644 index 0000000000..63e2aea1e4 --- /dev/null +++ b/tests/show_mlnx_test.py @@ -0,0 +1,63 @@ +import sys +import click +import pytest +import show.plugins.mlnx as show +from unittest.mock import call, patch, mock_open, MagicMock + + +class TestShowMlnx(object): + def setup(self): + print('SETUP') + + @patch('click.style') + def test_run_command(self, mock_click): + cmd0 = ['echo', 'test'] + out, err = show.run_command(cmd0, display_cmd=True) + + assert mock_click.call_args_list == [call('Running command: ', fg='cyan'), call(' '.join(cmd0), fg='green')] + assert out == 'test\n' + + cmd1 = [sys.executable, "-c", "import sys; sys.exit(6)"] + with pytest.raises(SystemExit) as e: + show.run_command(cmd1) + assert e.value.code == 6 + + @patch('builtins.open', mock_open(read_data=show.ENV_VARIABLE_SX_SNIFFER)) + @patch('show.plugins.mlnx.run_command') + def test_sniffer_status_get_enable(self, mock_runcmd): + expected_calls = [ + call(["docker", "exec", show.CONTAINER_NAME, "bash", "-c", 'touch {}'.format(show.SNIFFER_CONF_FILE)]), + call(['docker', 'cp', show.SNIFFER_CONF_FILE_IN_CONTAINER, show.TMP_SNIFFER_CONF_FILE]), + call(['rm', '-rf', show.TMP_SNIFFER_CONF_FILE]) + ] + + output = show.sniffer_status_get(show.ENV_VARIABLE_SX_SNIFFER) + assert mock_runcmd.call_args_list == expected_calls + assert output + + @patch('builtins.open', mock_open(read_data='not_enable')) + @patch('show.plugins.mlnx.run_command') + def test_sniffer_status_get_disable(self, mock_runcmd): + expected_calls = [ + call(["docker", "exec", show.CONTAINER_NAME, "bash", "-c", 'touch {}'.format(show.SNIFFER_CONF_FILE)]), + call(['docker', 'cp', show.SNIFFER_CONF_FILE_IN_CONTAINER, show.TMP_SNIFFER_CONF_FILE]), + call(['rm', '-rf', show.TMP_SNIFFER_CONF_FILE]) + ] + + output = show.sniffer_status_get(show.ENV_VARIABLE_SX_SNIFFER) + assert mock_runcmd.call_args_list == expected_calls + assert not output + + @patch('show.plugins.mlnx.run_command') + def test_is_issu_status_enabled_systemexit(self, mock_runcmd): + mock_runcmd.return_value = ('key0=value0\n', '') + expected_calls = ['docker', 'exec', show.CONTAINER_NAME, 'cat', r'/{}/sai.profile'.format(show.HWSKU_PATH)] + + with pytest.raises(SystemExit) as e: + show.is_issu_status_enabled() + assert e.value.code == 1 + mock_runcmd.assert_called_with(expected_calls, print_to_console=False) + + def teardown(self): + print('TEARDOWN') + From 3fb3258806c25b8d60a255ce0508dcd20018bdc6 Mon Sep 17 00:00:00 2001 From: Arvindsrinivasan Lakshmi Narasimhan <55814491+arlakshm@users.noreply.github.com> Date: Wed, 10 May 2023 13:41:58 -0700 Subject: [PATCH 03/35] Revert "[chassis]: remote cli commands infra for sonic chassis (#2701)" (#2832) This reverts commit 7e24463f88ba64f485e8ae5c33e00ab3b152e073. --- rcli/__init__.py | 0 rcli/linecard.py | 151 ---------- rcli/rexec.py | 44 --- rcli/rshell.py | 38 --- rcli/utils.py | 149 ---------- setup.py | 4 - sonic-utilities-data/bash_completion.d/rexec | 21 -- sonic-utilities-data/bash_completion.d/rshell | 21 -- tests/chassis_modules_test.py | 12 +- tests/mock_tables/asic0/state_db.json | 12 - tests/mock_tables/chassis_state_db.json | 9 - tests/mock_tables/database_config.json | 5 - tests/mock_tables/state_db.json | 4 +- tests/remote_cli_test.py | 260 ------------------ 14 files changed, 8 insertions(+), 722 deletions(-) delete mode 100644 rcli/__init__.py delete mode 100644 rcli/linecard.py delete mode 100644 rcli/rexec.py delete mode 100644 rcli/rshell.py delete mode 100644 rcli/utils.py delete mode 100644 sonic-utilities-data/bash_completion.d/rexec delete mode 100644 sonic-utilities-data/bash_completion.d/rshell delete mode 100644 tests/mock_tables/chassis_state_db.json delete mode 100644 tests/remote_cli_test.py diff --git a/rcli/__init__.py b/rcli/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/rcli/linecard.py b/rcli/linecard.py deleted file mode 100644 index fdc6882ed1..0000000000 --- a/rcli/linecard.py +++ /dev/null @@ -1,151 +0,0 @@ -import click -import os -import paramiko -import sys -import select -import socket -import sys -import termios -import tty - -from .utils import get_linecard_ip -from paramiko.py3compat import u -from paramiko import Channel - -EMPTY_OUTPUTS = ['', '\x1b[?2004l\r'] - -class Linecard: - - def __init__(self, linecard_name, username, password): - """ - Initialize Linecard object and store credentials, connection, and channel - - :param linecard_name: The name of the linecard you want to connect to - :param username: The username to use to connect to the linecard - :param password: The linecard password. If password not provided, it - will prompt the user for it - :param use_ssh_keys: Whether or not to use SSH keys to authenticate. - """ - self.ip = get_linecard_ip(linecard_name) - - if not self.ip: - sys.exit(1) - - self.linecard_name = linecard_name - self.username = username - self.password = password - - self.connection = self._connect() - - - def _connect(self): - connection = paramiko.SSHClient() - # if ip address not in known_hosts, ignore known_hosts error - connection.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - try: - connection.connect(self.ip, username=self.username, password=self.password) - except paramiko.ssh_exception.NoValidConnectionsError as e: - connection = None - click.echo(e) - return connection - - def _get_password(self): - """ - Prompts the user for a password, and returns the password - - :param username: The username that we want to get the password for - :type username: str - :return: The password for the username. - """ - - return getpass( - "Password for username '{}': ".format(self.username), - # Pass in click stdout stream - this is similar to using click.echo - stream=click.get_text_stream('stdout') - ) - - def _set_tty_params(self): - tty.setraw(sys.stdin.fileno()) - tty.setcbreak(sys.stdin.fileno()) - - def _is_data_to_read(self, read): - if self.channel in read: - return True - return False - - def _is_data_to_write(self, read): - if sys.stdin in read: - return True - return False - - def _write_to_terminal(self, data): - # Write channel output to terminal - sys.stdout.write(data) - sys.stdout.flush() - - def _start_interactive_shell(self): - oldtty = termios.tcgetattr(sys.stdin) - try: - self._set_tty_params() - self.channel.settimeout(0.0) - - while True: - #Continuously wait for commands and execute them - read, write, ex = select.select([self.channel, sys.stdin], [], []) - if self._is_data_to_read(read): - try: - # Get output from channel - x = u(self.channel.recv(1024)) - if len(x) == 0: - # logout message will be displayed - break - self._write_to_terminal(x) - except socket.timeout as e: - click.echo("Connection timed out") - break - if self._is_data_to_write(read): - # If we are able to send input, get the input from stdin - x = sys.stdin.read(1) - if len(x) == 0: - break - # Send the input to the channel - self.channel.send(x) - finally: - # Now that the channel has been exited, return to the previously-saved old tty - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty) - pass - - - def start_shell(self) -> None: - """ - Opens a session, gets a pseudo-terminal, invokes a shell, and then - attaches the host shell to the remote shell. - """ - # Create shell session - self.channel = self.connection.get_transport().open_session() - self.channel.get_pty() - self.channel.invoke_shell() - # Use Paramiko Interactive script to connect to the shell - self._start_interactive_shell() - # After user exits interactive shell, close the connection - self.connection.close() - - - def execute_cmd(self, command) -> str: - """ - Takes a command as an argument, executes it on the remote shell, and returns the output - - :param command: The command to execute on the remote shell - :return: The output of the command. - """ - # Execute the command and gather errors and output - _, stdout, stderr = self.connection.exec_command(command + "\n") - output = stdout.read().decode('utf-8') - - if stderr: - # Error was present, add message to output - output += stderr.read().decode('utf-8') - - # Close connection and return output - self.connection.close() - return output diff --git a/rcli/rexec.py b/rcli/rexec.py deleted file mode 100644 index fb56df8351..0000000000 --- a/rcli/rexec.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import click -import paramiko -import sys - -from .linecard import Linecard -from rcli import utils as rcli_utils -from sonic_py_common import device_info - -@click.command() -@click.argument('linecard_names', nargs=-1, type=str, required=True) -@click.option('-c', '--command', type=str, required=True) -def cli(linecard_names, command): - """ - Executes a command on one or many linecards - - :param linecard_names: A list of linecard names to execute the command on, - use `all` to execute on all linecards. - :param command: The command to execute on the linecard(s) - """ - if not device_info.is_chassis(): - click.echo("This commmand is only supported Chassis") - sys.exit(1) - - username = os.getlogin() - password = rcli_utils.get_password(username) - - if list(linecard_names) == ["all"]: - # Get all linecard names using autocompletion helper - linecard_names = rcli_utils.get_all_linecards(None, None, "") - - # Iterate through each linecard, execute command, and gather output - for linecard_name in linecard_names: - try: - lc = Linecard(linecard_name, username, password) - if lc.connection: - # If connection was created, connection exists. Otherwise, user will see an error message. - click.echo("======== {} output: ========".format(lc.linecard_name)) - click.echo(lc.execute_cmd(command)) - except paramiko.ssh_exception.AuthenticationException: - click.echo("Login failed on '{}' with username '{}'".format(linecard_name, lc.username)) - -if __name__=="__main__": - cli(prog_name='rexec') diff --git a/rcli/rshell.py b/rcli/rshell.py deleted file mode 100644 index decda6cd59..0000000000 --- a/rcli/rshell.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import click -import paramiko -import sys - -from .linecard import Linecard -from sonic_py_common import device_info -from rcli import utils as rcli_utils - - -@click.command() -@click.argument('linecard_name', type=str, autocompletion=rcli_utils.get_all_linecards) -def cli(linecard_name): - """ - Open interactive shell for one linecard - - :param linecard_name: The name of the linecard to connect to - """ - if not device_info.is_chassis(): - click.echo("This commmand is only supported Chassis") - sys.exit(1) - - username = os.getlogin() - password = rcli_utils.get_password(username) - - try: - lc =Linecard(linecard_name, username, password) - if lc.connection: - click.echo("Connecting to {}".format(lc.linecard_name)) - # If connection was created, connection exists. Otherwise, user will see an error message. - lc.start_shell() - click.echo("Connection Closed") - except paramiko.ssh_exception.AuthenticationException: - click.echo("Login failed on '{}' with username '{}'".format(linecard_name, lc.username)) - - -if __name__=="__main__": - cli(prog_name='rshell') diff --git a/rcli/utils.py b/rcli/utils.py deleted file mode 100644 index 933043d069..0000000000 --- a/rcli/utils.py +++ /dev/null @@ -1,149 +0,0 @@ -import click -from getpass import getpass -import os -import sys - -from swsscommon.swsscommon import SonicV2Connector - -CHASSIS_MODULE_INFO_TABLE = 'CHASSIS_MODULE_TABLE' -CHASSIS_MODULE_INFO_KEY_TEMPLATE = 'CHASSIS_MODULE {}' -CHASSIS_MODULE_INFO_DESC_FIELD = 'desc' -CHASSIS_MODULE_INFO_SLOT_FIELD = 'slot' -CHASSIS_MODULE_INFO_OPERSTATUS_FIELD = 'oper_status' -CHASSIS_MODULE_INFO_ADMINSTATUS_FIELD = 'admin_status' - -CHASSIS_MIDPLANE_INFO_TABLE = 'CHASSIS_MIDPLANE_TABLE' -CHASSIS_MIDPLANE_INFO_IP_FIELD = 'ip_address' -CHASSIS_MIDPLANE_INFO_ACCESS_FIELD = 'access' - -CHASSIS_MODULE_HOSTNAME_TABLE = 'CHASSIS_MODULE_HOSTNAME_TABLE' -CHASSIS_MODULE_HOSTNAME = 'module_hostname' - -def connect_to_chassis_state_db(): - chassis_state_db = SonicV2Connector(host="127.0.0.1") - chassis_state_db.connect(chassis_state_db.CHASSIS_STATE_DB) - return chassis_state_db - - -def connect_state_db(): - state_db = SonicV2Connector(host="127.0.0.1") - state_db.connect(state_db.STATE_DB) - return state_db - - - -def get_linecard_module_name_from_hostname(linecard_name: str): - - chassis_state_db = connect_to_chassis_state_db() - - keys = chassis_state_db.keys(chassis_state_db.CHASSIS_STATE_DB , '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, '*')) - for key in keys: - module_name = key.split('|')[1] - hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, key, CHASSIS_MODULE_HOSTNAME) - if hostname.replace('-', '').lower() == linecard_name.replace('-', '').lower(): - return module_name - - return None - -def get_linecard_ip(linecard_name: str): - """ - Given a linecard name, lookup its IP address in the midplane table - - :param linecard_name: The name of the linecard you want to connect to - :type linecard_name: str - :return: IP address of the linecard - """ - # Adapted from `show chassis modules midplane-status` command logic: - # https://github.com/sonic-net/sonic-utilities/blob/master/show/chassis_modules.py - - # if the user passes linecard hostname, then try to get the module name for that linecard - module_name = get_linecard_module_name_from_hostname(linecard_name) - # if the module name cannot be found from host, assume the user has passed module name - if module_name is None: - module_name = linecard_name - module_ip, module_access = get_module_ip_and_access_from_state_db(module_name) - - if not module_ip: - click.echo('Linecard {} not found'.format(linecard_name)) - return None - - if module_access != 'True': - click.echo('Linecard {} not accessible'.format(linecard_name)) - return None - - - return module_ip - -def get_module_ip_and_access_from_state_db(module_name): - state_db = connect_state_db() - data_dict = state_db.get_all( - state_db.STATE_DB, '{}|{}'.format(CHASSIS_MIDPLANE_INFO_TABLE,module_name )) - if data_dict is None: - return None, None - - linecard_ip = data_dict.get(CHASSIS_MIDPLANE_INFO_IP_FIELD, None) - access = data_dict.get(CHASSIS_MIDPLANE_INFO_ACCESS_FIELD, None) - - return linecard_ip, access - - -def get_all_linecards(ctx, args, incomplete) -> list: - """ - Return a list of all accessible linecard names. This function is used to - autocomplete linecard names in the CLI. - - :param ctx: The Click context object that is passed to the command function - :param args: The arguments passed to the Click command - :param incomplete: The string that the user has typed so far - :return: A list of all accessible linecard names. - """ - # Adapted from `show chassis modules midplane-status` command logic: - # https://github.com/sonic-net/sonic-utilities/blob/master/show/chassis_modules.py - - - chassis_state_db = connect_to_chassis_state_db() - state_db = connect_state_db() - - linecards = [] - keys = state_db.keys(state_db.STATE_DB,'{}|*'.format(CHASSIS_MIDPLANE_INFO_TABLE)) - for key in keys: - key_list = key.split('|') - if len(key_list) != 2: # error data in DB, log it and ignore - click.echo('Warn: Invalid Key {} in {} table'.format(key, CHASSIS_MIDPLANE_INFO_TABLE )) - continue - module_name = key_list[1] - linecard_ip, access = get_module_ip_and_access_from_state_db(module_name) - if linecard_ip is None: - continue - - if access != "True" : - continue - - # get the hostname for this module - hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, module_name), CHASSIS_MODULE_HOSTNAME) - if hostname: - linecards.append(hostname) - else: - linecards.append(module_name) - - # Return a list of all matched linecards - return [lc for lc in linecards if incomplete in lc] - - -def get_password(username=None): - """ - Prompts the user for a password, and returns the password - - :param username: The username that we want to get the password for - :type username: str - :return: The password for the username. - """ - - if username is None: - username =os.getlogin() - - return getpass( - "Password for username '{}': ".format(username), - # Pass in click stdout stream - this is similar to using click.echo - stream=click.get_text_stream('stdout') - ) \ No newline at end of file diff --git a/setup.py b/setup.py index cdeddb8f42..ea0e949ab9 100644 --- a/setup.py +++ b/setup.py @@ -74,7 +74,6 @@ 'pddf_thermalutil', 'pddf_ledutil', 'syslog_util', - 'rcli', 'show', 'show.interfaces', 'show.plugins', @@ -207,8 +206,6 @@ 'pddf_psuutil = pddf_psuutil.main:cli', 'pddf_thermalutil = pddf_thermalutil.main:cli', 'pddf_ledutil = pddf_ledutil.main:cli', - 'rexec = rcli.rexec:cli', - 'rshell = rcli.rshell:cli', 'show = show.main:cli', 'sonic-clear = clear.main:cli', 'sonic-installer = sonic_installer.main:sonic_installer', @@ -237,7 +234,6 @@ 'natsort>=6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 'netaddr>=0.8.0', 'netifaces>=0.10.7', - 'paramiko==2.11.0', 'pexpect>=4.8.0', 'semantic-version>=2.8.5', 'prettyprinter>=0.18.0', diff --git a/sonic-utilities-data/bash_completion.d/rexec b/sonic-utilities-data/bash_completion.d/rexec deleted file mode 100644 index 1199fd0676..0000000000 --- a/sonic-utilities-data/bash_completion.d/rexec +++ /dev/null @@ -1,21 +0,0 @@ -_rexec_completion() { - local IFS=$' -' - COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ - COMP_CWORD=$COMP_CWORD \ - _REXEC_COMPLETE=complete $1 ) ) - return 0 -} - -_rexec_completionetup() { - local COMPLETION_OPTIONS="" - local BASH_VERSION_ARR=(${BASH_VERSION//./ }) - # Only BASH version 4.4 and later have the nosort option. - if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then - COMPLETION_OPTIONS="-o nosort" - fi - - complete $COMPLETION_OPTIONS -F _rexec_completion rexec -} - -_rexec_completionetup; \ No newline at end of file diff --git a/sonic-utilities-data/bash_completion.d/rshell b/sonic-utilities-data/bash_completion.d/rshell deleted file mode 100644 index 012f754dd7..0000000000 --- a/sonic-utilities-data/bash_completion.d/rshell +++ /dev/null @@ -1,21 +0,0 @@ -_rshell_completion() { - local IFS=$' -' - COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ - COMP_CWORD=$COMP_CWORD \ - _RSHELL_COMPLETE=complete $1 ) ) - return 0 -} - -_rshell_completionetup() { - local COMPLETION_OPTIONS="" - local BASH_VERSION_ARR=(${BASH_VERSION//./ }) - # Only BASH version 4.4 and later have the nosort option. - if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then - COMPLETION_OPTIONS="-o nosort" - fi - - complete $COMPLETION_OPTIONS -F _rshell_completion rshell -} - -_rshell_completionetup; \ No newline at end of file diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py index fa8cd608dd..e6dbe569d2 100644 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -33,11 +33,11 @@ """ show_chassis_midplane_output="""\ - Name IP-Address Reachability ----------- ------------- -------------- -LINE-CARD0 192.168.1.100 True -LINE-CARD1 192.168.1.2 False -LINE-CARD2 192.168.1.1 True + Name IP-Address Reachability +----------- ------------- -------------- + LINE-CARD0 192.168.1.1 True + LINE-CARD1 192.168.1.2 False +SUPERVISOR0 192.168.1.100 True """ show_chassis_system_ports_output_asic0="""\ @@ -225,7 +225,7 @@ def test_midplane_show_all_count_lines(self): result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["midplane-status"], []) print(result.output) result_lines = result.output.strip('\n').split('\n') - modules = ["LINE-CARD0", "LINE-CARD1", "LINE-CARD2"] + modules = ["LINE-CARD0", "LINE-CARD1", "SUPERVISOR0"] for i, module in enumerate(modules): assert module in result_lines[i + warning_lines + header_lines] assert len(result_lines) == warning_lines + header_lines + len(modules) diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 6ae0258be0..559af04826 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -287,18 +287,6 @@ "REMOTE_MOD": "0", "REMOTE_PORT": "93" }, - "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { - "ip_address": "127.0.0.1", - "access": "True" - }, - "CHASSIS_MIDPLANE_TABLE|LINE-CARD1": { - "ip_address": "127.0.0.1", - "access": "True" - }, - "CHASSIS_MIDPLANE_TABLE|LINE-CARD2": { - "ip_address": "127.0.0.1", - "access": "False" - }, "ACL_TABLE_TABLE|DATAACL_5" : { "status": "Active" }, diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json deleted file mode 100644 index 5178c49ca0..0000000000 --- a/tests/mock_tables/chassis_state_db.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { - "module_hostname": "sonic-lc1" - }, - "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { - "module_hostname": "sonic-lc2" - } - -} \ No newline at end of file diff --git a/tests/mock_tables/database_config.json b/tests/mock_tables/database_config.json index f55c0734c2..d12ba05414 100644 --- a/tests/mock_tables/database_config.json +++ b/tests/mock_tables/database_config.json @@ -56,11 +56,6 @@ "id" : 12, "separator": "|", "instance" : "redis" - }, - "CHASSIS_STATE_DB" : { - "id" : 13, - "separator": "|", - "instance" : "redis" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 1d8f46297d..cd1a194ba8 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -935,11 +935,11 @@ "max_queues": "20", "max_priority_groups": "8" }, - "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { + "CHASSIS_MIDPLANE_TABLE|SUPERVISOR0": { "ip_address": "192.168.1.100", "access": "True" }, - "CHASSIS_MIDPLANE_TABLE|LINE-CARD2": { + "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { "ip_address": "192.168.1.1", "access": "True" }, diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py deleted file mode 100644 index 67545dd1b3..0000000000 --- a/tests/remote_cli_test.py +++ /dev/null @@ -1,260 +0,0 @@ -import os -from click.testing import CliRunner -import paramiko -from rcli import rexec -from rcli import rshell -from rcli import linecard -from rcli import utils as rcli_utils -import sys -from io import BytesIO, StringIO -from unittest import mock -import select -import socket -import termios - -MULTI_LC_REXEC_OUTPUT = '''======== sonic-lc1 output: ======== -hello world -======== LINE-CARD2 output: ======== -hello world -''' -REXEC_HELP = '''Usage: cli [OPTIONS] LINECARD_NAMES... - - Executes a command on one or many linecards - - :param linecard_names: A list of linecard names to execute the command on, - use `all` to execute on all linecards. :param command: The command to - execute on the linecard(s) - -Options: - -c, --command TEXT [required] - --help Show this message and exit. -''' - -def mock_exec_command(): - - mock_stdout = BytesIO(b"""hello world""") - mock_stderr = BytesIO() - return '', mock_stdout, None - -def mock_exec_error_cmd(): - mock_stdout = BytesIO() - mock_stderr = BytesIO(b"""Command not found""") - return '', mock_stdout, mock_stderr - -def mock_connection_channel(): - c = mock.MagicMock(return_value="channel") - c.get_pty = mock.MagicMock(return_value='') - c.invoke_shell = mock.MagicMock() - c.recv = mock.MagicMock(side_effect=['abcd', '']) - return c - -def mock_connection_channel_with_timeout(): - c = mock.MagicMock(return_value="channel") - c.get_pty = mock.MagicMock(return_value='') - c.invoke_shell = mock.MagicMock() - c.recv = mock.MagicMock(side_effect=['abcd', socket.timeout(10, 'timeout')]) - return c - -def mock_paramiko_connection(channel): - # Create a mock to return for connection. - conn = mock.MagicMock() - #create a mock return for transport - t = mock.MagicMock() - t.open_session = mock.MagicMock(return_value=channel) - conn.get_transport = mock.MagicMock(return_value=t) - conn.connect = mock.MagicMock() - conn.close = mock.MagicMock() - return conn - -class TestRemoteExec(object): - @classmethod - def setup_class(cls): - print("SETUP") - from .mock_tables import dbconnector - dbconnector.load_database_config() - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - #@mock.patch.object(linecard.Linecard, '_get_password', mock.MagicMock(return_value='dummmy')) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) - @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value = mock_exec_command())) - def test_rexec_with_module_name(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD0" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "pwd"]) - print(result.output) - assert result.exit_code == 0, result.output - assert "hello world" in result.output - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) - @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value = mock_exec_command())) - def test_rexec_with_hostname(self): - runner = CliRunner() - LINECARD_NAME = "sonic-lc1" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "pwd"]) - print(result.output) - assert result.exit_code == 0, result.output - assert "hello world" in result.output - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) - @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value = mock_exec_error_cmd())) - def test_rexec_error_with_module_name(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD0" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "pwd"]) - print(result.output) - assert result.exit_code == 0, result.output - assert "Command not found" in result.output - - def test_rexec_error(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD0" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) - print(result.output) - assert result.exit_code == 1, result.output - assert "This commmand is only supported Chassis" in result.output - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) - @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) - def test_rexec_all(self): - runner = CliRunner() - LINECARD_NAME = "all" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) - print(result.output) - assert result.exit_code == 0, result.output - assert MULTI_LC_REXEC_OUTPUT == result.output - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) - @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) - def test_rexec_invalid_lc(self): - runner = CliRunner() - LINECARD_NAME = "sonic-lc-3" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) - print(result.output) - assert result.exit_code == 1, result.output - assert "Linecard sonic-lc-3 not found\n" == result.output - - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) - @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) - def test_rexec_unreachable_lc(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD1" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) - print(result.output) - assert result.exit_code == 1, result.output - assert "Linecard LINE-CARD1 not accessible\n" == result.output - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) - @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) - def test_rexec_help(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD1" - result = runner.invoke(rexec.cli, ["--help"]) - print(result.output) - assert result.exit_code == 0, result.output - assert REXEC_HELP == result.output - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', - 22): "None" }))) - @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) - def test_rexec_exception(self): - runner = CliRunner() - LINECARD_NAME = "sonic-lc1" - result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) - print(result.output) - assert result.exit_code == 0, result.output - assert "[Errno None] Unable to connect to port 22 on 192.168.0.1\n" == result.output - - -class TestRemoteCLI(object): - @classmethod - def setup_class(cls): - print("SETUP") - from .mock_tables import dbconnector - dbconnector.load_database_config() - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(linecard.Linecard, '_set_tty_params', mock.MagicMock()) - @mock.patch.object(termios, 'tcsetattr', mock.MagicMock()) - @mock.patch.object(termios, 'tcgetattr', mock.MagicMock(return_value=[])) - def test_rcli_with_module_name(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD0" - channel = mock_connection_channel() - - with mock.patch('paramiko.SSHClient', mock.MagicMock(return_value=mock_paramiko_connection(channel))), \ - mock.patch('select.select', mock.MagicMock(return_value=([channel], [], []))): - result = runner.invoke(rshell.cli, [LINECARD_NAME]) - print(result.output) - assert result.exit_code == 0, result.output - assert "abcd" in result.output - - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(linecard.Linecard, '_set_tty_params', mock.MagicMock()) - @mock.patch.object(termios, 'tcsetattr', mock.MagicMock()) - @mock.patch.object(termios, 'tcgetattr', mock.MagicMock(return_value=[])) - def test_rcli_with_module_name_2(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD0" - channel = mock_connection_channel() - - with mock.patch('paramiko.SSHClient', mock.MagicMock(return_value=mock_paramiko_connection(channel))), \ - mock.patch('select.select', mock.MagicMock(side_effect=[([], [], []), ([channel], [], []),([channel], [], [])])): - result = runner.invoke(rshell.cli, [LINECARD_NAME]) - print(result.output) - assert result.exit_code == 0, result.output - assert "Connecting to LINE-CARD0" in result.output - - @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - @mock.patch.object(linecard.Linecard, '_set_tty_params', mock.MagicMock()) - @mock.patch.object(termios, 'tcsetattr', mock.MagicMock()) - @mock.patch.object(termios, 'tcgetattr', mock.MagicMock(return_value=[])) - def test_rcli_with_module_name_3(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD0" - channel = mock_connection_channel_with_timeout() - - with mock.patch('paramiko.SSHClient', mock.MagicMock(return_value=mock_paramiko_connection(channel))), \ - mock.patch('select.select', mock.MagicMock(return_value=([channel], [], []))): - result = runner.invoke(rshell.cli, [LINECARD_NAME]) - print(result.output) - assert result.exit_code == 0, result.output - assert "Connecting to LINE-CARD0" in result.output - - def test_rcli_error(self): - runner = CliRunner() - LINECARD_NAME = "LINE-CARD0" - result = runner.invoke(rshell.cli, [LINECARD_NAME]) - print(result.output) - assert result.exit_code == 1, result.output - assert "This commmand is only supported Chassis" in result.output \ No newline at end of file From 3a9995b6213d893f60898183160bbdf22efca153 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Thu, 11 May 2023 14:03:51 +0800 Subject: [PATCH 04/35] [config]Support multi-asic Golden Config override with fix (#2825) ADO: 17746282 #### What I did Support multi-asic Golden Config Override with fix based on https://github.com/sonic-net/sonic-utilities/pull/2738 #### How I did it Add ConfigMgmt support for ASIC validation. Modify override config cli to support multi-asic. #### How to verify it Unit test: ``` tests/config_override_test.py::TestConfigOverrideMultiasic::test_macsec_override PASSED [ 8%] tests/config_override_test.py::TestConfigOverrideMultiasic::test_device_metadata_table_rm PASSED [ 8%] ``` --- config/config_mgmt.py | 19 +++-- config/main.py | 63 ++++++++------- .../multi_asic_dm_rm.json | 11 +++ .../multi_asic_macsec_ov.json | 23 ++++++ tests/config_override_test.py | 77 ++++++++++++++++++- 5 files changed, 159 insertions(+), 34 deletions(-) create mode 100644 tests/config_override_input/multi_asic_dm_rm.json create mode 100644 tests/config_override_input/multi_asic_macsec_ov.json diff --git a/config/config_mgmt.py b/config/config_mgmt.py index a10393c72c..4e3115bd35 100644 --- a/config/config_mgmt.py +++ b/config/config_mgmt.py @@ -35,7 +35,8 @@ class ConfigMgmt(): to verify config for the commands which are capable of change in config DB. ''' - def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True, sonicYangOptions=0): + def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True, + sonicYangOptions=0, configdb=None): ''' Initialise the class, --read the config, --load in data tree. @@ -44,6 +45,7 @@ def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True, debug (bool): verbose mode. allowTablesWithoutYang (bool): allow tables without yang model in config or not. + configdb: configdb to work on. Returns: void @@ -54,6 +56,7 @@ def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True, self.source = source self.allowTablesWithoutYang = allowTablesWithoutYang self.sonicYangOptions = sonicYangOptions + self.configdb = configdb # logging vars self.SYSLOG_IDENTIFIER = "ConfigMgmt" @@ -194,8 +197,11 @@ def readConfigDB(self): self.sysLog(doPrint=True, msg='Reading data from Redis configDb') # Read from config DB on sonic switch data = dict() - configdb = ConfigDBConnector() - configdb.connect() + if self.configdb is None: + configdb = ConfigDBConnector() + configdb.connect() + else: + configdb = self.configdb sonic_cfggen.deep_update(data, sonic_cfggen.FormatConverter.db_to_output(configdb.get_config())) self.configdbJsonIn = sonic_cfggen.FormatConverter.to_serialized(data) self.sysLog(syslog.LOG_DEBUG, 'Reading Input from ConfigDB {}'.\ @@ -215,8 +221,11 @@ def writeConfigDB(self, jDiff): ''' self.sysLog(doPrint=True, msg='Writing in Config DB') data = dict() - configdb = ConfigDBConnector() - configdb.connect(False) + if self.configdb is None: + configdb = ConfigDBConnector() + configdb.connect(False) + else: + configdb = self.configdb sonic_cfggen.deep_update(data, sonic_cfggen.FormatConverter.to_deserialized(jDiff)) self.sysLog(msg="Write in DB: {}".format(data)) configdb.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) diff --git a/config/main.py b/config/main.py index cfe862af52..5b9bd30013 100644 --- a/config/main.py +++ b/config/main.py @@ -1849,36 +1849,45 @@ def override_config_table(db, input_config_db, dry_run): fg='magenta') sys.exit(1) - config_db = db.cfgdb - - # Read config from configDB - current_config = config_db.get_config() - # Serialize to the same format as json input - sonic_cfggen.FormatConverter.to_serialized(current_config) - - updated_config = update_config(current_config, config_input) + cfgdb_clients = db.cfgdb_clients + + for ns, config_db in cfgdb_clients.items(): + # Read config from configDB + current_config = config_db.get_config() + # Serialize to the same format as json input + sonic_cfggen.FormatConverter.to_serialized(current_config) + + if multi_asic.is_multi_asic(): + # Golden Config will use "localhost" to represent host name + if ns == DEFAULT_NAMESPACE: + ns_config_input = config_input["localhost"] + else: + ns_config_input = config_input[ns] + else: + ns_config_input = config_input + updated_config = update_config(current_config, ns_config_input) - yang_enabled = device_info.is_yang_config_validation_enabled(config_db) - if yang_enabled: - # The ConfigMgmt will load YANG and running - # config during initialization. - try: - cm = ConfigMgmt() - cm.validateConfigData() - except Exception as ex: - click.secho("Failed to validate running config. Error: {}".format(ex), fg="magenta") - sys.exit(1) + yang_enabled = device_info.is_yang_config_validation_enabled(config_db) + if yang_enabled: + # The ConfigMgmt will load YANG and running + # config during initialization. + try: + cm = ConfigMgmt(configdb=config_db) + cm.validateConfigData() + except Exception as ex: + click.secho("Failed to validate running config. Error: {}".format(ex), fg="magenta") + sys.exit(1) - # Validate input config - validate_config_by_cm(cm, config_input, "config_input") - # Validate updated whole config - validate_config_by_cm(cm, updated_config, "updated_config") + # Validate input config + validate_config_by_cm(cm, ns_config_input, "config_input") + # Validate updated whole config + validate_config_by_cm(cm, updated_config, "updated_config") - if dry_run: - print(json.dumps(updated_config, sort_keys=True, - indent=4, cls=minigraph_encoder)) - else: - override_config_db(config_db, config_input) + if dry_run: + print(json.dumps(updated_config, sort_keys=True, + indent=4, cls=minigraph_encoder)) + else: + override_config_db(config_db, ns_config_input) def validate_config_by_cm(cm, config_json, jname): diff --git a/tests/config_override_input/multi_asic_dm_rm.json b/tests/config_override_input/multi_asic_dm_rm.json new file mode 100644 index 0000000000..a4c0dd5fa7 --- /dev/null +++ b/tests/config_override_input/multi_asic_dm_rm.json @@ -0,0 +1,11 @@ +{ + "localhost": { + "DEVICE_METADATA": {} + }, + "asic0": { + "DEVICE_METADATA": {} + }, + "asic1": { + "DEVICE_METADATA": {} + } +} diff --git a/tests/config_override_input/multi_asic_macsec_ov.json b/tests/config_override_input/multi_asic_macsec_ov.json new file mode 100644 index 0000000000..ba86f6ef60 --- /dev/null +++ b/tests/config_override_input/multi_asic_macsec_ov.json @@ -0,0 +1,23 @@ +{ + "localhost": { + "MACSEC_PROFILE": { + "profile": { + "key": "value" + } + } + }, + "asic0": { + "MACSEC_PROFILE": { + "profile": { + "key": "value" + } + } + }, + "asic1": { + "MACSEC_PROFILE": { + "profile": { + "key": "value" + } + } + } +} diff --git a/tests/config_override_test.py b/tests/config_override_test.py index 1b058ace13..ca14ae75bb 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -1,6 +1,7 @@ import os import json import filecmp +import importlib import config.main as config from click.testing import CliRunner @@ -20,6 +21,8 @@ RUNNING_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "running_config_yang_failure.json") GOLDEN_INPUT_YANG_FAILURE = os.path.join(DATA_DIR, "golden_input_yang_failure.json") FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") +MULTI_ASIC_MACSEC_OV = os.path.join(DATA_DIR, "multi_asic_macsec_ov.json") +MULTI_ASIC_DEVICE_METADATA_RM = os.path.join(DATA_DIR, "multi_asic_dm_rm.json") # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -173,7 +176,7 @@ def test_yang_verification_enabled(self): def is_yang_config_validation_enabled_side_effect(filename): return True - def config_mgmt_side_effect(): + def config_mgmt_side_effect(configdb): return config_mgmt.ConfigMgmt(source=CONFIG_DB_JSON_FILE) db = Db() @@ -232,7 +235,7 @@ def check_yang_verification_failure(self, db, config, running_config, def read_json_file_side_effect(filename): return golden_config - def config_mgmt_side_effect(): + def config_mgmt_side_effect(configdb): return config_mgmt.ConfigMgmt(source=CONFIG_DB_JSON_FILE) # ConfigMgmt will call ConfigDBConnector to load default config_db.json. @@ -257,3 +260,73 @@ def teardown_class(cls): print("TEARDOWN") os.environ["UTILITIES_UNIT_TESTING"] = "0" return + + +class TestConfigOverrideMultiasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + return + + def test_macsec_override(self): + def read_json_file_side_effect(filename): + with open(MULTI_ASIC_MACSEC_OV, "r") as f: + macsec_profile = json.load(f) + return macsec_profile + db = Db() + cfgdb_clients = db.cfgdb_clients + + # The profile_content was copied from MULTI_ASIC_MACSEC_OV, where all + # ns sharing the same content: {"profile": {"key": "value"}} + profile_content = {"profile": {"key": "value"}} + + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + assert result.exit_code == 0 + + for ns, config_db in cfgdb_clients.items(): + assert config_db.get_config()['MACSEC_PROFILE'] == profile_content + + def test_device_metadata_table_rm(self): + def read_json_file_side_effect(filename): + with open(MULTI_ASIC_DEVICE_METADATA_RM, "r") as f: + device_metadata = json.load(f) + return device_metadata + db = Db() + cfgdb_clients = db.cfgdb_clients + + for ns, config_db in cfgdb_clients.items(): + assert 'DEVICE_METADATA' in config_db.get_config() + + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + assert result.exit_code == 0 + + for ns, config_db in cfgdb_clients.items(): + assert 'DEVICE_METADATA' not in config_db.get_config() + + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + return From 1e73632d2f61817a1bcbb79fd595863597c4c304 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Fri, 12 May 2023 18:16:17 -0700 Subject: [PATCH 05/35] [test]: add UT coverage for GCU (#2818) Add GCU UT coverage to ensure each step of patch sorting is valid --- .../gcu_feature_patch_application_test.py | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py index 9a52a04732..3f744e20ca 100644 --- a/tests/generic_config_updater/gcu_feature_patch_application_test.py +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -72,6 +72,12 @@ def test_feature_patch_application_failure(self): with self.subTest(name=test_case_name): self.run_single_failure_case_applier(data[test_case_name]) + def create_strict_patch_sorter(self, config): + config_wrapper = self.config_wrapper + config_wrapper.get_config_db_as_json = MagicMock(return_value=config) + patch_wrapper = PatchWrapper(config_wrapper) + return ps.StrictPatchSorter(config_wrapper, patch_wrapper) + def create_patch_applier(self, config): global running_config running_config = copy.deepcopy(config) @@ -86,14 +92,30 @@ def create_patch_applier(self, config): @patch("generic_config_updater.change_applier.set_config") def run_single_success_case_applier(self, data, mock_set, mock_db): current_config = data["current_config"] - mock_set.side_effect = set_entry expected_config = data["expected_config"] patch = jsonpatch.JsonPatch(data["patch"]) + + # Test patch applier + mock_set.side_effect = set_entry patch_applier = self.create_patch_applier(current_config) patch_applier.apply(patch) result_config = patch_applier.config_wrapper.get_config_db_as_json() self.assertEqual(expected_config, result_config) + + # Test steps in change applier + sorter = self.create_strict_patch_sorter(current_config) + actual_changes = sorter.sort(patch) + target_config = patch.apply(current_config) + simulated_config = current_config + + for change in actual_changes: + simulated_config = change.apply(simulated_config) + is_valid, error = self.config_wrapper.validate_config_db_config(simulated_config) + self.assertTrue(is_valid, f"Change will produce invalid config. Error: {error}") + + self.assertEqual(target_config, simulated_config) + self.assertEqual(simulated_config, expected_config) @patch("generic_config_updater.change_applier.get_config_db") def run_single_failure_case_applier(self, data, mock_db): From aeb0dbc1d20f3c94e26109105842f0a1409627ce Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Sat, 13 May 2023 09:32:14 +0800 Subject: [PATCH 06/35] Fix the invalid variable issue when set-fips in uboot (#2834) What I did Reproduce the issue: /home/admin# sonic-installer set-fips Command: /usr/bin/fw_setenv linuxargs net.ifnames=0 loopfstype=squashfs loop=image-20220531.27/fs.squashfs systemd.unified_cgroup_hierarchy=0 varlog_size=4096 loglevel=4 sonic_fips=1 Error: illegal character '=' in variable name "loopfstype=squashfs" Work item tracking Microsoft ADO (number only): 22333116 How I did it Add the double quotation marks when calling the command. How to verify it It works fine when calling the following command: /usr/bin/fw_setenv linuxargs "net.ifnames=0 loopfstype=squashfs loop=image-20220531.27/fs.squashfs systemd.unified_cgroup_hierarchy=0 varlog_size=4096 loglevel=4 sonic_fips=1" --- sonic_installer/bootloader/uboot.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py index e4818e53a9..9420d35644 100644 --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -89,7 +89,8 @@ def set_fips(self, image, enable): cmdline = out.strip() cmdline = re.sub('^linuxargs=', '', cmdline) cmdline = re.sub(r' sonic_fips=[^\s]', '', cmdline) + " sonic_fips=" + fips - run_command('/usr/bin/fw_setenv linuxargs ' + cmdline) + cmdline = '"' + cmdline + '"' + run_command('/usr/bin/fw_setenv linuxargs ' + cmdline ) click.echo('Done') def get_fips(self, image): From 9e510a835543aaaa2b29e7b878ef514254a2ca3a Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Mon, 15 May 2023 10:44:45 -0700 Subject: [PATCH 07/35] [chassis][voq[Add "config fabric port ..." commands and tests. (#2730) Added "config fabric port ..." commands and the tests. This change added following config commands and the test for them. config fabric port isolate #portId# config fabric port unisolate #portId# The above two commands can be used to manually isolate and unisolate a fabric link. config fabric port monitor error threshold #crcCells# #rxCells# It sets a fabric link monitoring error threshold config fabric port monitor poll threshold isolation #pollnumber# It sets the number of consecutive polls in which the threshold needs to be detected to isolate a link config fabric port monitor poll threshold recovery #pollnumber# It sets the number of consecutive polls in which no error is detected to unisolate a link --- config/fabric.py | 247 +++++++++++++++++++++++++++++++ config/main.py | 2 + doc/Command-Reference.md | 67 +++++++++ tests/config_fabric_test.py | 95 ++++++++++++ tests/mock_tables/config_db.json | 21 +++ 5 files changed, 432 insertions(+) create mode 100644 config/fabric.py create mode 100644 tests/config_fabric_test.py diff --git a/config/fabric.py b/config/fabric.py new file mode 100644 index 0000000000..a3870589ae --- /dev/null +++ b/config/fabric.py @@ -0,0 +1,247 @@ +import click +import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util +from sonic_py_common import multi_asic +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector + +# +# 'config fabric ...' +# +@click.group(cls=clicommon.AbbreviationGroup) +def fabric(): + """FABRIC-related configuration tasks""" + pass + +# +# 'config fabric port ...' +# +@fabric.group(cls=clicommon.AbbreviationGroup) +def port(): + """FABRIC PORT configuration tasks""" + pass + +# +# 'config fabric port isolate [ -n ]' +# +@port.command() +@click.argument('portid', metavar='', required=True) +@multi_asic_util.multi_asic_click_option_namespace +def isolate(portid, namespace): + """FABRIC PORT isolate """ + + ctx = click.get_current_context() + + if not portid.isdigit(): + ctx.fail("Invalid portid") + + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ctx.fail('Must specify asic') + + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Connect to state database + state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) + state_db.connect(state_db.STATE_DB, False) + + # check if the port is actually in use + portName = f'PORT{portid}' + portStateData = state_db.get_all(state_db.STATE_DB, "FABRIC_PORT_TABLE|" + portName) + if "REMOTE_PORT" not in portStateData: + ctx.fail(f"Port {portid} is not in use") + + # Make sure configuration data exists + portName = f'Fabric{portid}' + portConfigData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_PORT|" + portName) + if not bool(portConfigData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_PORT", portName, {'isolateStatus': True}) + +# +# 'config fabric port unisolate [ -n ]' +# +@port.command() +@click.argument('portid', metavar='', required=True) +@multi_asic_util.multi_asic_click_option_namespace +def unisolate(portid, namespace): + """FABRIC PORT unisolate """ + + ctx = click.get_current_context() + + if not portid.isdigit(): + ctx.fail("Invalid portid") + + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ctx.fail('Must specify asic') + + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Connect to state database + state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) + state_db.connect(state_db.STATE_DB, False) + + # check if the port is actually in use + portName = f'PORT{portid}' + portStateData = state_db.get_all(state_db.STATE_DB, "FABRIC_PORT_TABLE|" + portName) + if "REMOTE_PORT" not in portStateData: + ctx.fail(f"Port {portid} is not in use") + + # Make sure configuration data exists + portName = f'Fabric{portid}' + portConfigData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_PORT|" + portName) + if not bool(portConfigData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_PORT", portName, {'isolateStatus': False}) + +# +# 'config fabric port monitor ...' +# +@port.group(cls=clicommon.AbbreviationGroup) +def monitor(): + """FABRIC PORT MONITOR configuration tasks""" + pass + +# +# 'config fabric port monitor error ...' +# +@monitor.group(cls=clicommon.AbbreviationGroup) +def error(): + """FABRIC PORT MONITOR ERROR configuration tasks""" + pass + +# +# 'config fabric port monitor error threshold ' +# +@error.command('threshold') +@click.argument('crcCells', metavar='', required=True, type=int) +@click.argument('rxcells', metavar='', required=True, type=int) +@multi_asic_util.multi_asic_click_option_namespace +def error_threshold(crccells, rxcells, namespace): + """FABRIC PORT MONITOR ERROR THRESHOLD configuration tasks""" + + ctx = click.get_current_context() + + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ctx.fail('Must specify asic') + + # Check the values + if crccells < 1 or crccells > 1000: + ctx.fail("crcCells must be in range 1...1000") + if rxcells < 10000 or rxcells > 100000000: + ctx.fail("rxCells must be in range 10000...100000000") + + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Connect to state database + state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) + state_db.connect(state_db.STATE_DB, False) + + # Make sure configuration data exists + monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA") + if not bool(monitorData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", + {'monErrThreshCrcCells': crccells, 'monErrThreshRxCells': rxcells}) + +# +# 'config fabric port monitor poll ...' +# +@monitor.group(cls=clicommon.AbbreviationGroup) +def poll(): + """FABRIC PORT MONITOR POLL configuration tasks""" + pass + +# +# 'config fabric port monitor poll threshold ...' +# +@poll.group(cls=clicommon.AbbreviationGroup, name='threshold') +def poll_threshold(): + """FABRIC PORT MONITOR POLL THRESHOLD configuration tasks""" + pass + +# +# 'config fabric port monitor poll threshold isolation ' +# +@poll_threshold.command() +@click.argument('pollcount', metavar='', required=True, type=int) +@multi_asic_util.multi_asic_click_option_namespace +def isolation(pollcount, namespace): + """FABRIC PORT MONITOR POLL THRESHOLD configuration tasks""" + + ctx = click.get_current_context() + + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ctx.fail('Must specify asic') + + if pollcount < 1 or pollcount > 10: + ctx.fail("pollCount must be in range 1...10") + + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Connect to state database + state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) + state_db.connect(state_db.STATE_DB, False) + + # Make sure configuration data exists + monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA") + if not bool(monitorData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", + {"monPollThreshIsolation": pollcount}) + + +# +# 'config fabric port monitor poll threshold recovery ' +# +@poll_threshold.command() +@click.argument('pollcount', metavar='', required=True, type=int) +@multi_asic_util.multi_asic_click_option_namespace +def recovery(pollcount, namespace): + """FABRIC PORT MONITOR POLL THRESHOLD configuration tasks""" + + ctx = click.get_current_context() + + n_asics = multi_asic.get_num_asics() + if n_asics > 1 and namespace is None: + ctx.fail('Must specify asic') + + if pollcount < 1 or pollcount > 10: + ctx.fail("pollCount must be in range 1...10") + + # Connect to config database + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + # Connect to state database + state_db = SonicV2Connector(use_unix_socket_path=True, namespace=namespace) + state_db.connect(state_db.STATE_DB, False) + + # Make sure configuration data exists + monitorData = config_db.get_all(config_db.CONFIG_DB, "FABRIC_MONITOR|FABRIC_MONITOR_DATA") + if not bool(monitorData): + ctx.fail("Fabric monitor configuration data not present") + + # Update entry + config_db.mod_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA", + {"monPollThreshRecovery": pollcount}) + + diff --git a/config/main.py b/config/main.py index 5b9bd30013..abef5397af 100644 --- a/config/main.py +++ b/config/main.py @@ -42,6 +42,7 @@ from . import chassis_modules from . import console from . import feature +from . import fabric from . import flow_counters from . import kdump from . import kube @@ -1181,6 +1182,7 @@ def config(ctx): config.add_command(aaa.radius) config.add_command(chassis_modules.chassis) config.add_command(console.console) +config.add_command(fabric.fabric) config.add_command(feature.feature) config.add_command(flow_counters.flowcnt_route) config.add_command(kdump.kdump) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 86902cd7e7..03c61f1bd4 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -53,6 +53,8 @@ * [ECN](#ecn) * [ECN show commands](#ecn-show-commands) * [ECN config commands](#ecn-config-commands) +* [Fabric](#fabric) + * [Fabric config commands](#fabric-config-commands) * [Feature](#feature) * [Feature show commands](#feature-show-commands) * [Feature config commands](#feature-config-commands) @@ -3287,6 +3289,71 @@ The list of the WRED profile fields that are configurable is listed in the below Go Back To [Beginning of the document](#) or [Beginning of this section](#ecn) +## Fabric + +This section explains all Fabric commands that are supported in SONiC. + +### Fabric config commands + +**config fabric port isolate ** +**config fabric port unisolate ** + +The above two commands can be used to manually isolate and unisolate a fabric link. + +- Usage: + ``` + config fabric port isolate [OPTIONS] + config fabric port unisolate [OPTIONS] + ``` + +- Example: + ``` + admin@sonic:~$ config fabric port isolate 0 -n asic0 + admin@sonic:~$ config fabric port unisolate 0 -n asic0 + ``` + +**config fabric port monitor error threshold ** + +This command sets a fabric link monitoring error threshold + +- Usage: + ``` + config fabric port monitor error threshold [OPTIONS] + ``` + +- Example: + ``` + admin@sonic:~$ config fabric port monitor error threshold 2 61035156 -n asic0 + ``` + +**config fabric port monitor poll threshold isolation ** + +This command sets the number of consecutive polls in which the threshold needs to be detected to isolate a link + +- Usage: + ``` + config fabric port monitor poll threshold isolation [OPTIONS] + ``` + +- Example: + ``` + admin@sonic:~$ config fabric port monitor poll threshold isolation 2 -n asic0 + ``` + +**config fabric port monitor poll threshold recovery ** + +This command sets the number of consecutive polls in which no error is detected to unisolate a link + +- Usage: + ``` + config fabric port monitor poll threshold recovery [OPTIONS] + ``` + +- Example: + ``` + admin@sonic:~$ config fabric port monitor poll threshold recovery 5 -n asic0 + ``` + ## Feature SONiC includes a capability in which Feature state can be enabled/disabled diff --git a/tests/config_fabric_test.py b/tests/config_fabric_test.py new file mode 100644 index 0000000000..1f56ea416a --- /dev/null +++ b/tests/config_fabric_test.py @@ -0,0 +1,95 @@ +import click +import config.main as config +import operator +import os +import pytest +import sys + +from click.testing import CliRunner +from utilities_common.db import Db + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, modules_path) + +@pytest.fixture(scope='module') +def ctx(scope='module'): + db = Db() + obj = {'config_db':db.cfgdb, 'namespace': ''} + yield obj + +class TestConfigFabric(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def basic_check(self, command_name, para_list, ctx): + # This function issues command of "config fabric xxxx", + # and returns the result of the command. + runner = CliRunner() + result = runner.invoke(config.config.commands["fabric"].commands[command_name], para_list, obj = ctx) + print(result.output) + return result + + def test_config_isolation(self, ctx): + # Issue command "config fabric port isolate 0", + # check if the result is expected. + result = self.basic_check("port", ["isolate", "0"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric port isolate 1", + # check if the result has the error message as port 1 is not in use. + result = self.basic_check("port", ["isolate", "1"], ctx) + assert "Port 1 is not in use" in result.output + + # Issue command "config fabric port unisolate 0", + # check if the result is expected. + result = self.basic_check("port", ["unisolate", "0"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric port unisolate 1", + # check if the result has the error message as port 1 is not in use. + result = self.basic_check("port", ["unisolate", "1"], ctx) + assert "Port 1 is not in use" in result.output + + def test_config_fabric_monitor_threshold(self, ctx): + # Issue command "config fabric port monitor error threshold <#> <#>" + # with an out of range number, check if the result has the error message. + result = self.basic_check("port", ["monitor", "error", "threshold", "1", "2000"], ctx) + assert "rxCells must be in range 10000...100000000" in result.output + + result = self.basic_check("port", ["monitor", "error", "threshold", "10000", "20000"], ctx) + assert "crcCells must be in range 1...1000" in result.output + + # Issue command "config fabric port monitor error threshold <#> <#>" + # with a number in the range, check if the result is expected. + result = self.basic_check("port", ["monitor", "error", "threshold", "1", "20000"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric port monitor poll threshold isolation <#>" + # with an out of range number, check if the result has the error message. + result = self.basic_check("port", ["monitor", "poll", "threshold", "isolation", "15"], ctx) + assert "pollCount must be in range 1...10" in result.output + + # Issue command "config fabric port monitor poll threshold isolation <#>" + # with a number in the range, check if the result is expected. + result = self.basic_check("port", ["monitor", "poll", "threshold", "isolation", "3"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) + + # Issue command "config fabric port monitor poll threshold recovery <#>" + # with an out of range number, check if the result has the error message. + result = self.basic_check("port", ["monitor", "poll", "threshold", "recovery", "15"], ctx) + assert "pollCount must be in range 1...10" in result.output + + # Issue command "config fabric port monitor poll threshold recovery <#>" + # with a number in the range, check if the result is expected. + result = self.basic_check("port", ["monitor", "poll", "threshold", "recovery", "8"], ctx) + expect_result = 0 + assert operator.eq(result.exit_code, expect_result) diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 2b40668883..986da98a9e 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -2656,5 +2656,26 @@ "dst_port": "Ethernet44", "src_port": "Ethernet40,Ethernet48", "direction": "RX" + }, + "FABRIC_MONITOR|FABRIC_MONITOR_DATA": { + "monErrThreshCrcCells": "1", + "monErrThreshRxCells": "61035156", + "monPollThreshIsolation": "1", + "monPollThreshRecovery": "8" + }, + "FABRIC_PORT|Fabric0": { + "alias": "Fabric0", + "isolateStatus": "False", + "lanes": "0" + }, + "FABRIC_PORT|Fabric1": { + "alias": "Fabric1", + "isolateStatus": "False", + "lanes": "1" + }, + "FABRIC_PORT|Fabric2": { + "alias": "Fabric2", + "isolateStatus": "False", + "lanes": "2" } } From 33d665c4cf31bc8ea201439c6fe6083a07605390 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Mon, 15 May 2023 16:50:32 -0400 Subject: [PATCH 08/35] replace shell=True, replace xml, and replace exit() (#2664) Signed-off-by: maipbui #### What I did The [xml.etree.ElementTree](https://docs.python.org/3/library/xml.etree.elementtree.html#module-xml.etree.ElementTree) module is not secure against maliciously constructed data. `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. `sys.exit` is better than `exit`, considered good to use in production code. Ref: https://stackoverflow.com/questions/6501121/difference-between-exit-and-sys-exit-in-python https://stackoverflow.com/questions/19747371/python-exit-commands-why-so-many-and-when-should-each-be-used #### How I did it Remove xml. Use [lxml](https://pypi.org/project/lxml/) XML parsers package that prevent potentially malicious operation. `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) Replace `exit()` by `sys.exit()` #### How to verify it Pass UT Manual test --- pfcwd/main.py | 16 ++++----- scripts/boot_part | 28 ++++++++++----- scripts/check_db_integrity.py | 6 ++-- scripts/configlet | 5 ++- scripts/disk_check.py | 7 ++-- scripts/dropconfig | 8 ++--- scripts/dump_nat_entries.py | 6 ++-- scripts/ipintutil | 10 +++--- scripts/lldpshow | 15 +++++--- scripts/storyteller | 25 +++++++------ scripts/vnet_route_check.py | 6 ++-- tests/disk_check_test.py | 4 +-- tests/dropconfig_test.py | 51 ++++++++++++++++++++++++++ tests/filter_fdb_entries_test.py | 23 ------------ tests/intfutil_test.py | 10 +++--- tests/lldp_test.py | 9 +++++ tests/pfcwd_test.py | 61 +++++++++++++++++++++++++++++++- 17 files changed, 204 insertions(+), 86 deletions(-) create mode 100644 tests/dropconfig_test.py diff --git a/pfcwd/main.py b/pfcwd/main.py index 76fa31b4fb..7813bbd759 100644 --- a/pfcwd/main.py +++ b/pfcwd/main.py @@ -243,7 +243,7 @@ def start(self, action, restoration_time, ports, detection_time): click.echo("Failed to run command, invalid options:") for opt in invalid_ports: click.echo(opt) - exit(1) + sys.exit(1) self.start_cmd(action, restoration_time, ports, detection_time) @@ -263,7 +263,7 @@ def verify_pfc_enable_status_per_port(self, port, pfcwd_info): @multi_asic_util.run_on_multi_asic def start_cmd(self, action, restoration_time, ports, detection_time): if os.geteuid() != 0: - exit("Root privileges are required for this operation") + sys.exit("Root privileges are required for this operation") all_ports = get_all_ports( self.db, self.multi_asic.current_namespace, @@ -299,7 +299,7 @@ def start_cmd(self, action, restoration_time, ports, detection_time): @multi_asic_util.run_on_multi_asic def interval(self, poll_interval): if os.geteuid() != 0: - exit("Root privileges are required for this operation") + sys.exit("Root privileges are required for this operation") pfcwd_info = {} if poll_interval is not None: pfcwd_table = self.config_db.get_table(CONFIG_DB_PFC_WD_TABLE_NAME) @@ -331,7 +331,7 @@ def interval(self, poll_interval): poll_interval, entry_min_str ), err=True ) - exit(1) + sys.exit(1) pfcwd_info['POLL_INTERVAL'] = poll_interval self.config_db.mod_entry( @@ -341,7 +341,7 @@ def interval(self, poll_interval): @multi_asic_util.run_on_multi_asic def stop(self, ports): if os.geteuid() != 0: - exit("Root privileges are required for this operation") + sys.exit("Root privileges are required for this operation") all_ports = get_all_ports( self.db, self.multi_asic.current_namespace, @@ -359,7 +359,7 @@ def stop(self, ports): @multi_asic_util.run_on_multi_asic def start_default(self): if os.geteuid() != 0: - exit("Root privileges are required for this operation") + sys.exit("Root privileges are required for this operation") enable = self.config_db.get_entry('DEVICE_METADATA', 'localhost').get( 'default_pfcwd_status' ) @@ -394,7 +394,7 @@ def start_default(self): @multi_asic_util.run_on_multi_asic def counter_poll(self, counter_poll): if os.geteuid() != 0: - exit("Root privileges are required for this operation") + sys.exit("Root privileges are required for this operation") pfcwd_info = {} pfcwd_info['FLEX_COUNTER_STATUS'] = counter_poll self.config_db.mod_entry("FLEX_COUNTER_TABLE", "PFCWD", pfcwd_info) @@ -402,7 +402,7 @@ def counter_poll(self, counter_poll): @multi_asic_util.run_on_multi_asic def big_red_switch(self, big_red_switch): if os.geteuid() != 0: - exit("Root privileges are required for this operation") + sys.exit("Root privileges are required for this operation") pfcwd_info = {} if big_red_switch is not None: pfcwd_info['BIG_RED_SWITCH'] = big_red_switch diff --git a/scripts/boot_part b/scripts/boot_part index f41950e034..d59f3a94ed 100755 --- a/scripts/boot_part +++ b/scripts/boot_part @@ -4,10 +4,10 @@ import re import os import sys -import commands import argparse import logging import tempfile +from sonic_py_common.general import getstatusoutput_noshell logging.basicConfig(level=logging.WARN) logger = logging.getLogger(__name__) @@ -20,7 +20,7 @@ re_hex = r'[0-9A-F]' ## String - the standard output of the command, may be empty string def runcmd(cmd): logger.info('runcmd: {0}'.format(cmd)) - rc, out = commands.getstatusoutput(cmd) + rc, out = getstatusoutput_noshell(cmd) if rc == 0: return out else: @@ -29,7 +29,7 @@ def runcmd(cmd): def print_partitions(blkdev): assert blkdev - out = runcmd('sudo lsblk -r -o PARTLABEL,NAME') + out = runcmd(['sudo', 'lsblk', '-r', '-o', 'PARTLABEL,NAME']) ## Parse command output and print found_table = False for line in out.splitlines(): @@ -56,7 +56,7 @@ def print_partitions(blkdev): ## Get the current boot partition index def get_boot_partition(blkdev): - out = runcmd('cat /proc/mounts') + out = runcmd(['cat', '/proc/mounts']) if out is None: return None ## Parse command output and return the current boot partition index @@ -76,16 +76,26 @@ def set_boot_partition(blkdev, index): devnode = blkdev + str(index) mntpath = tempfile.mkdtemp() try: - out = runcmd('sudo mount {0} {1}'.format(devnode, mntpath)) + out = runcmd(['sudo', 'mount', devnode, mntpath]) logger.info('mount out={0}'.format(out)) if out is None: return ## Set GRUB bootable - out = runcmd('sudo grub-install --boot-directory="{0}" --recheck "{1}"'.format(mntpath, blkdev)) + out = runcmd(['sudo', 'grub-install', '--boot-directory='+mntpath, "--recheck", blkdev]) return out is not None finally: ## Cleanup - out = runcmd('sudo fuser -km {0} || sudo umount {0}'.format(mntpath)) - logger.info('fuser out={0}'.format(out)) + cmd1 = ['sudo', 'fuser', '-km', mntpath] + rc1, out1 = getstatusoutput_noshell(cmd1) + if rc1 != 0: + logger.error('Failed to run: {0}\n{1}'.format(cmd1, out1)) + cmd2 = ['sudo', 'unmount', mntpath] + rc2, out2 = getstatusoutput_noshell(cmd2) + if rc2 == 0: + logger.info('Running command: {0}\n{1}'.format(' '.join(cmd2), out2)) + else: + logger.error('Failed to run: {0}\n{1}'.format(cmd2, out2)) + else: + logger.info('Running command: {0}\n{1}'.format(' '.join(cmd1), out1)) os.rmdir(mntpath) def main(): @@ -100,7 +110,7 @@ def main(): logger.setLevel(logging.INFO) ## Find ONIE partition and get the block device containing ONIE - out = runcmd("sudo blkid") + out = runcmd(["sudo", "blkid"]) if not out: return -1 for line in out.splitlines(): m = re.match(r'/dev/(\w+)\d+: LABEL="ONIE-BOOT"', line) diff --git a/scripts/check_db_integrity.py b/scripts/check_db_integrity.py index 3a994897b4..57486038d6 100755 --- a/scripts/check_db_integrity.py +++ b/scripts/check_db_integrity.py @@ -36,9 +36,9 @@ def main(): for db_name, schema in DB_SCHEMA.items(): db_dump_file = "/tmp/{}.json".format(db_name) - dump_db_cmd = "sonic-db-dump -n 'COUNTERS_DB' -y > {}".format(db_dump_file) - p = subprocess.Popen(dump_db_cmd, shell=True, text=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + dump_db_cmd = ["sonic-db-dump", "-n", 'COUNTERS_DB', "-y"] + with open(db_dump_file, 'w') as f: + p = subprocess.Popen(dump_db_cmd, text=True, stdout=f, stderr=subprocess.PIPE) (_, err) = p.communicate() rc = p.wait() if rc != 0: diff --git a/scripts/configlet b/scripts/configlet index bc9ba1cf98..217cf1934d 100755 --- a/scripts/configlet +++ b/scripts/configlet @@ -1,4 +1,7 @@ #!/usr/bin/env python3 + +import sys + """ JSON based configlet update A tool to update CONFIG-DB with JSON diffs that can update/delete redis-DB. @@ -195,7 +198,7 @@ def main(): if not do_act: print("Expect an action update/delete or for debug parse/test\n") parser.print_help() - exit(-1) + sys.exit(-1) for json_file in args.json: with open(json_file, 'r') as stream: diff --git a/scripts/disk_check.py b/scripts/disk_check.py index 0f5f882400..de1557ca2a 100644 --- a/scripts/disk_check.py +++ b/scripts/disk_check.py @@ -83,7 +83,7 @@ def test_writable(dirs): def run_cmd(cmd): - proc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) + proc = subprocess.run(cmd, shell=False, stdout=subprocess.PIPE) ret = proc.returncode if ret: log_err("failed: ret={} cmd={}".format(ret, cmd)) @@ -120,9 +120,8 @@ def do_mnt(dirs): os.mkdir(d_upper) os.mkdir(d_work) - ret = run_cmd("mount -t overlay overlay_{} -o lowerdir={}," - "upperdir={},workdir={} {}".format( - d_name, d, d_upper, d_work, d)) + ret = run_cmd(["mount", "-t", "overlay", "overlay_{}".format(d_name),\ + "-o", "lowerdir={},upperdir={},workdir={}".format(d, d_upper, d_work), d]) if ret: break diff --git a/scripts/dropconfig b/scripts/dropconfig index b7a86043a5..180c6166c6 100755 --- a/scripts/dropconfig +++ b/scripts/dropconfig @@ -375,25 +375,25 @@ Examples: reasons) except InvalidArgumentError as err: print('Encountered error trying to install counter: {}'.format(err.message)) - exit(1) + sys.exit(1) elif command == 'uninstall': try: dconfig.delete_counter(name) except InvalidArgumentError as err: print('Encountered error trying to uninstall counter: {}'.format(err.message)) - exit(1) + sys.exit(1) elif command == 'add': try: dconfig.add_reasons(name, reasons) except InvalidArgumentError as err: print('Encountered error trying to add reasons: {}'.format(err.message)) - exit(1) + sys.exit(1) elif command == 'remove': try: dconfig.remove_reasons(name, reasons) except InvalidArgumentError as err: print('Encountered error trying to remove reasons: {}'.format(err.message)) - exit(1) + sys.exit(1) elif command == 'show_config': dconfig.print_counter_config(group) elif command == 'show_capabilities': diff --git a/scripts/dump_nat_entries.py b/scripts/dump_nat_entries.py index 3ab99f245a..6a2bff5d17 100644 --- a/scripts/dump_nat_entries.py +++ b/scripts/dump_nat_entries.py @@ -8,8 +8,10 @@ import subprocess def main(): - ctdumpcmd = 'conntrack -L -j > /host/warmboot/nat/nat_entries.dump' - p = subprocess.Popen(ctdumpcmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + ctdumpcmd = ['conntrack', '-L', '-j'] + file = '/host/warmboot/nat/nat_entries.dump' + with open(file, 'w') as f: + p = subprocess.Popen(ctdumpcmd, text=True, stdout=f, stderr=subprocess.PIPE) (output, err) = p.communicate() rc = p.wait() diff --git a/scripts/ipintutil b/scripts/ipintutil index c61c622a87..5535bce7e7 100755 --- a/scripts/ipintutil +++ b/scripts/ipintutil @@ -72,13 +72,12 @@ def get_if_admin_state(iface, namespace): """ Given an interface name, return its admin state reported by the kernel """ - cmd = "cat /sys/class/net/{0}/flags".format(iface) + cmd = ["cat", "/sys/class/net/{0}/flags".format(iface)] if namespace != constants.DEFAULT_NAMESPACE: - cmd = "sudo ip netns exec {} {}".format(namespace, cmd) + cmd = ["sudo", "ip", "netns", "exec", namespace] + cmd try: proc = subprocess.Popen( cmd, - shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, text=True) @@ -105,13 +104,12 @@ def get_if_oper_state(iface, namespace): """ Given an interface name, return its oper state reported by the kernel. """ - cmd = "cat /sys/class/net/{0}/carrier".format(iface) + cmd = ["cat", "/sys/class/net/{0}/carrier".format(iface)] if namespace != constants.DEFAULT_NAMESPACE: - cmd = "sudo ip netns exec {} {}".format(namespace, cmd) + cmd = ["sudo", "ip", "netns", "exec", namespace] + cmd try: proc = subprocess.Popen( cmd, - shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, text=True) diff --git a/scripts/lldpshow b/scripts/lldpshow index c30f4c5f3f..e09176cf3c 100755 --- a/scripts/lldpshow +++ b/scripts/lldpshow @@ -23,7 +23,7 @@ import argparse import re import subprocess import sys -import xml.etree.ElementTree as ET +from lxml import etree as ET from sonic_py_common import device_info from swsscommon.swsscommon import ConfigDBConnector @@ -80,9 +80,14 @@ class Lldpshow(object): lldp_interface_list = lldp_port if lldp_port is not None else self.lldp_interface[lldp_instace_num] # In detail mode we will pass interface list (only front ports) and get O/P as plain text # and in table format we will get xml output - lldp_cmd = 'sudo docker exec -i lldp{} lldpctl '.format(self.lldp_instance[lldp_instace_num]) + ( - '-f xml' if not lldp_detail_info else lldp_interface_list) - p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, shell=True, text=True) + if not lldp_detail_info: + lldp_args = ['-f', 'xml'] + elif lldp_interface_list == '': + lldp_args = [] + else: + lldp_args = [lldp_interface_list] + lldp_cmd = ['sudo', 'docker', 'exec', '-i', 'lldp{}'.format(self.lldp_instance[lldp_instace_num]), 'lldpctl'] + lldp_args + p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, text=True) (output, err) = p.communicate() ## Wait for end of command. Get return returncode ## returncode = p.wait() @@ -121,7 +126,7 @@ class Lldpshow(object): if lldp_detail_info: return for lldpraw in self.lldpraw: - neis = ET.fromstring(lldpraw) + neis = ET.fromstring(lldpraw.encode()) intfs = neis.findall('interface') for intf in intfs: l_intf = intf.attrib['name'] diff --git a/scripts/storyteller b/scripts/storyteller index 5d1420673f..1fb78e5f6a 100755 --- a/scripts/storyteller +++ b/scripts/storyteller @@ -11,6 +11,7 @@ import subprocess import sys from shlex import quote +from sonic_py_common.general import getstatusoutput_noshell_pipe regex_dict = { 'acl' : r'acl\|ACL\|Acl', @@ -28,7 +29,7 @@ reference_file = '/tmp/storyteller_time_reference' def exec_cmd(cmd): # Use universal_newlines (instead of text) so that this tool can work with any python versions. - out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, universal_newlines=True) + out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) stdout, stderr = out.communicate() return out.returncode, stdout, stderr @@ -36,23 +37,27 @@ def exec_cmd(cmd): def build_options(after=0, before=0, context=0): options = [] if after: - options.append('-A {}'.format(after)) + options += ['-A', str(after)] if before: - options.append('-B {}'.format(before)) + options += ['-B', str(before)] if context: - options.append('-C {}'.format(context)) + options += ['-C', str(context)] - return ' '.join(x for x in options) + return options def find_log(logpath, log, regex, after=0, before=0, context=0, field=0): options = build_options(after, before, context) if field <= 0: - cmd = 'find -L {}/{}* -newer {} | xargs ls -rt | xargs zgrep -a {} "{}"'.format(logpath, log, reference_file, options, regex) + cmd0 = ['find', logpath, "-name", "{}*".format(log), "-newer", reference_file] + cmd1 = ["xargs", "ls", "-rt"] + cmd2 = ["xargs", "zgrep", "-a"] + options + [regex] else: - cmd = 'find -L {0}/{1}* -newer {2} | sort -rn -t . -k {3},{3} | xargs zgrep -a {4} "{5}"'.format(logpath, log, reference_file, field, options, regex) + cmd0 = ['find', logpath, "-name", "{}*".format(log), "-newer", reference_file] + cmd1 = ["sort", "-rn", "-t", ".", "-k", "{0},{0}".format(field)] + cmd2 = ["xargs", "zgrep", "-a"] + options + [regex] - _, out, _ = exec_cmd(cmd) + _, out = getstatusoutput_noshell_pipe(cmd0, cmd1, cmd2) ''' Opportunity to improve: output (out) can be split to lines and send to a filter to @@ -71,12 +76,12 @@ def build_regex(category): def configure_time_filter(since): - ret_code, _, _ = exec_cmd('date --date {}'.format(since)) + ret_code, _, _ = exec_cmd(['date', '--date', since]) if ret_code: print('invalid date "{}"'.format(since)) sys.exit(1) - exec_cmd('touch --date "{}" {}'.format(since, reference_file)) + exec_cmd(['touch', '--date', since, reference_file]) def main(): diff --git a/scripts/vnet_route_check.py b/scripts/vnet_route_check.py index dcd56af186..d925427d40 100755 --- a/scripts/vnet_route_check.py +++ b/scripts/vnet_route_check.py @@ -1,9 +1,9 @@ #!/usr/bin/env python -import os import sys import json import syslog +import subprocess from swsscommon import swsscommon ''' vnet_route_check.py: tool that verifies VNET routes consistancy between SONiC and vendor SDK DBs. @@ -340,7 +340,7 @@ def get_sdk_vnet_routes_diff(routes): ''' routes_diff = {} - res = os.system('docker exec syncd test -f /usr/bin/vnet_route_check.py') + res = subprocess.call(['docker', 'exec', 'syncd', 'test', '-f', '/usr/bin/vnet_route_check.py']) if res != 0: return routes_diff @@ -348,7 +348,7 @@ def get_sdk_vnet_routes_diff(routes): vnet_routes = routes[vnet_name]["routes"] vnet_vrf_oid = routes[vnet_name]["vrf_oid"] - res = os.system('docker exec syncd "/usr/bin/vnet_route_check.py {} {}"'.format(vnet_vrf_oid, vnet_routes)) + res = subprocess.call(['docker', 'exec', 'syncd', "/usr/bin/vnet_route_check.py", vnet_vrf_oid, vnet_routes]) if res: routes_diff[vnet_name] = {} routes_diff[vnet_name]['routes'] = res diff --git a/tests/disk_check_test.py b/tests/disk_check_test.py index ce4faad900..82b8b16ff6 100644 --- a/tests/disk_check_test.py +++ b/tests/disk_check_test.py @@ -27,7 +27,7 @@ "workdir": "/tmp/tmpy", "mounts": "overlay_tmpx blahblah", "err": "/tmpx is not read-write|READ-ONLY: Mounted ['/tmpx'] to make Read-Write", - "cmds": ['mount -t overlay overlay_tmpx -o lowerdir=/tmpx,upperdir=/tmp/tmpx/tmpx,workdir=/tmp/tmpy/tmpx /tmpx'] + "cmds": [['mount', '-t', 'overlay', 'overlay_tmpx', '-o', 'lowerdir=/tmpx,upperdir=/tmp/tmpx/tmpx,workdir=/tmp/tmpy/tmpx', '/tmpx']] }, "3": { "desc": "Not good as /tmpx is not read-write; mount fail as create of upper fails", @@ -94,7 +94,7 @@ def __init__(self, proc_upd = None): def mock_subproc_run(cmd, shell, stdout): global cmds - assert shell == True + assert shell == False assert stdout == subprocess.PIPE upd = (current_tc["proc"][len(cmds)] diff --git a/tests/dropconfig_test.py b/tests/dropconfig_test.py new file mode 100644 index 0000000000..1c2dc4b678 --- /dev/null +++ b/tests/dropconfig_test.py @@ -0,0 +1,51 @@ +import os +import pytest +from unittest.mock import call, patch, MagicMock +from utilities_common.general import load_module_from_source + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") + +dropconfig_path = os.path.join(scripts_path, 'dropconfig') +dropconfig = load_module_from_source('dropconfig', dropconfig_path) + +class TestDropConfig(object): + def setup(self): + print('SETUP') + + @patch('builtins.print') + @patch('sys.argv', ['dropconfig', '-c', 'install']) + def test_install_error(self, mock_print): + with pytest.raises(SystemExit) as e: + dropconfig.main() + mock_print.assert_called_once_with('Encountered error trying to install counter: Counter name not provided') + assert e.value.code == 1 + + @patch('builtins.print') + @patch('sys.argv', ['dropconfig', '-c', 'uninstall']) + def test_delete_error(self, mock_print): + with pytest.raises(SystemExit) as e: + dropconfig.main() + mock_print.assert_called_once_with('Encountered error trying to uninstall counter: No counter name provided') + assert e.value.code == 1 + + @patch('builtins.print') + @patch('sys.argv', ['dropconfig', '-c', 'add']) + def test_add_error(self, mock_print): + with pytest.raises(SystemExit) as e: + dropconfig.main() + mock_print.assert_called_once_with('Encountered error trying to add reasons: No counter name provided') + assert e.value.code == 1 + + @patch('builtins.print') + @patch('sys.argv', ['dropconfig', '-c', 'remove']) + def test_remove_error(self, mock_print): + with pytest.raises(SystemExit) as e: + dropconfig.main() + mock_print.assert_called_once_with('Encountered error trying to remove reasons: No counter name provided') + assert e.value.code == 1 + + def teardown(self): + print('TEARDOWN') + diff --git a/tests/filter_fdb_entries_test.py b/tests/filter_fdb_entries_test.py index 13cb8a44ca..dc73e9c683 100644 --- a/tests/filter_fdb_entries_test.py +++ b/tests/filter_fdb_entries_test.py @@ -77,29 +77,6 @@ def __tearDown(self): os.remove(file) os.remove(self.CONFIG_DB_FILENAME) - def __runCommand(self, cmds): - """ - Runs command 'cmds' on host - - Args: - cmds(list): command to be run on localhost - - Returns: - stdout(str): stdout gathered during command execution - stderr(str): stderr gathered during command execution - returncode(int): command exit code - """ - process = subprocess.Popen( - cmds, - shell=False, - text=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - stdout, stderr = process.communicate() - - return stdout, stderr, process.returncode - def __getFdbEntriesMap(self, filename): """ Generate map for FDB entries diff --git a/tests/intfutil_test.py b/tests/intfutil_test.py index 988c5329fc..ef37199bfe 100644 --- a/tests/intfutil_test.py +++ b/tests/intfutil_test.py @@ -133,7 +133,7 @@ def test_intf_status(self): assert result.output == show_interface_status_output # Test 'intfutil status' - output = subprocess.check_output('intfutil -c status', stderr=subprocess.STDOUT, shell=True, text=True) + output = subprocess.check_output(['intfutil', '-c', 'status'], stderr=subprocess.STDOUT, text=True) print(output) assert result.output == show_interface_status_output @@ -216,7 +216,7 @@ def test_subintf_status(self): self.assertEqual(result.output.strip(), expected_output) # Test 'intfutil status subport' - output = subprocess.check_output('intfutil -c status -i subport', stderr=subprocess.STDOUT, shell=True, text=True) + output = subprocess.check_output(['intfutil', '-c', 'status', '-i', 'subport'], stderr=subprocess.STDOUT, text=True) print(output, file=sys.stderr) self.assertEqual(output.strip(), expected_output) @@ -241,7 +241,7 @@ def test_single_subintf_status(self): self.assertEqual(result.output.strip(), expected_output) # Test 'intfutil status Ethernet0.10' - output = subprocess.check_output('intfutil -c status -i Ethernet0.10', stderr=subprocess.STDOUT, shell=True, text=True) + output = subprocess.check_output(['intfutil', '-c', 'status', '-i', 'Ethernet0.10'], stderr=subprocess.STDOUT, text=True) print(output, file=sys.stderr) self.assertEqual(output.strip(), expected_output) @@ -251,7 +251,7 @@ def test_single_subintf_status(self): " Eth36.10 10M 9100 100 up 802.1q-encapsulation" ) # Test 'intfutil status Eth36.10' - output = subprocess.check_output('intfutil -c status -i Eth36.10', stderr=subprocess.STDOUT, shell=True, text=True) + output = subprocess.check_output(['intfutil', '-c', 'status', '-i', 'Eth36.10'], stderr=subprocess.STDOUT, text=True) print(output, file=sys.stderr) self.assertEqual(output.strip(), expected_output) @@ -261,7 +261,7 @@ def test_single_subintf_status(self): " Po0001.10 40G 9100 100 up 802.1q-encapsulation" ) # Test 'intfutil status Po0001.10' - output = subprocess.check_output('intfutil -c status -i Po0001.10', stderr=subprocess.STDOUT, shell=True, text=True) + output = subprocess.check_output(['intfutil', '-c', 'status', '-i', 'Po0001.10'], stderr=subprocess.STDOUT, text=True) print(output, file=sys.stderr) self.assertEqual(output.strip(), expected_output) diff --git a/tests/lldp_test.py b/tests/lldp_test.py index a70c676a02..89177338e0 100644 --- a/tests/lldp_test.py +++ b/tests/lldp_test.py @@ -74,6 +74,15 @@ def test_show_lldp_2_macs_same_phy_interface(self): output_summary = lldp.get_summary_output(lldp_detail_info=False) assert output_summary == expected_2MACs_Ethernet0_output + def test_get_info(self): + lldp = lldpshow.Lldpshow() + lldp.lldp_instance = [''] + lldp.lldpraw = expected_lldpctl_xml_output + lldp.get_info(lldp_detail_info=True, lldp_port='Ethernet0') + lldp.parse_info(lldp_detail_info=True) + output = lldp.get_summary_output(lldp_detail_info=True) + assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/pfcwd_test.py b/tests/pfcwd_test.py index 4cb95cf8db..2735cd09df 100644 --- a/tests/pfcwd_test.py +++ b/tests/pfcwd_test.py @@ -1,7 +1,7 @@ import importlib import os import sys -from unittest.mock import patch +from unittest.mock import patch, MagicMock from click.testing import CliRunner @@ -275,6 +275,65 @@ def setup_class(cls): import pfcwd.main importlib.reload(pfcwd.main) + @patch('pfcwd.main.os.geteuid', MagicMock(return_value=8)) + def test_pfcwd_start_nonroot(self): + import pfcwd.main as pfcwd + runner = CliRunner() + result = runner.invoke( + pfcwd.cli.commands["start"], + [ + "--action", "drop", "--restoration-time", "601", + "all", "602" + ], + ) + print(result.output) + assert result.exit_code == 1 + assert result.output == 'Root privileges are required for this operation\n' + + @patch('pfcwd.main.os.geteuid', MagicMock(return_value=8)) + def test_pfcwd_stop_nonroot(self): + import pfcwd.main as pfcwd + runner = CliRunner() + result = runner.invoke( + pfcwd.cli.commands['stop'], + ) + print(result.output) + assert result.exit_code == 1 + assert result.output == 'Root privileges are required for this operation\n' + + @patch('pfcwd.main.os.geteuid', MagicMock(return_value=8)) + def test_pfcwd_start_default_nonroot(self): + import pfcwd.main as pfcwd + runner = CliRunner() + result = runner.invoke( + pfcwd.cli.commands['start_default'], + ) + print(result.output) + assert result.exit_code == 1 + assert result.output == 'Root privileges are required for this operation\n' + + @patch('pfcwd.main.os.geteuid', MagicMock(return_value=8)) + def test_pfcwd_counter_poll_nonroot(self): + import pfcwd.main as pfcwd + runner = CliRunner() + result = runner.invoke( + pfcwd.cli.commands['counter_poll'], ['enable'], + ) + print(result.output) + assert result.exit_code == 1 + assert result.output == 'Root privileges are required for this operation\n' + + @patch('pfcwd.main.os.geteuid', MagicMock(return_value=8)) + def test_pfcwd_big_red_switch_nonroot(self): + import pfcwd.main as pfcwd + runner = CliRunner() + result = runner.invoke( + pfcwd.cli.commands['big_red_switch'], ['enable'], + ) + print(result.output) + assert result.exit_code == 1 + assert result.output == 'Root privileges are required for this operation\n' + def test_pfcwd_stats_all(self): import pfcwd.main as pfcwd print(pfcwd.__file__) From e6aacd3735d66cecdfb538089f30596985b7d224 Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Tue, 16 May 2023 19:26:57 -0700 Subject: [PATCH 09/35] Update TRANSCEIVER_INFO table after CDB FW upgrade (#2837) * Update TRANSCEIVER_INFO table after CDB FW upgrade Signed-off-by: Mihir Patel * Added testcases to improve code coverage --------- Signed-off-by: Mihir Patel --- sfputil/main.py | 17 ++++++++++++++++- tests/sfputil_test.py | 41 +++++++++++++++++++++++++++++++++++------ 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/sfputil/main.py b/sfputil/main.py index 53116e1206..3af370d5b4 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -1201,6 +1201,18 @@ def reset(port_name): i += 1 +def update_firmware_info_to_state_db(port_name): + physical_port = logical_port_to_physical_port_index(port_name) + + namespaces = multi_asic.get_front_end_namespaces() + for namespace in namespaces: + state_db = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + if state_db is not None: + state_db.connect(state_db.STATE_DB) + active_firmware, inactive_firmware = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(port_name), "active_firmware", active_firmware) + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) + # 'firmware' subgroup @cli.group() def firmware(): @@ -1271,7 +1283,7 @@ def is_fw_switch_done(port_name): if fw_info['status'] == True: (ImageA, ImageARunning, ImageACommitted, ImageAInvalid, - ImageB, ImageBRunning, ImageBCommitted, ImageBInvalid) = fw_info['result'] + ImageB, ImageBRunning, ImageBCommitted, ImageBInvalid, _, _) = fw_info['result'] if (ImageARunning == 1) and (ImageAInvalid == 1): # ImageA is running, but also invalid. click.echo("FW info error : ImageA shows running, but also shows invalid!") @@ -1382,6 +1394,7 @@ def download_firmware(port_name, filepath): sfp.set_optoe_write_max(1) status = api.cdb_firmware_download_complete() + update_firmware_info_to_state_db(port_name) click.echo('CDB: firmware download complete') return status @@ -1409,6 +1422,7 @@ def run(port_name, mode): click.echo('Failed to run firmware in mode={}! CDB status: {}'.format(mode, status)) sys.exit(EXIT_FAIL) + update_firmware_info_to_state_db(port_name) click.echo("Firmware run in mode={} success".format(mode)) # 'commit' subcommand @@ -1430,6 +1444,7 @@ def commit(port_name): click.echo('Failed to commit firmware! CDB status: {}'.format(status)) sys.exit(EXIT_FAIL) + update_firmware_info_to_state_db(port_name) click.echo("Firmware commit successful") # 'upgrade' subcommand diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index e4e55b897d..bbdd124516 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -710,12 +710,12 @@ def test_run_firmwre(self, mock_chassis): @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @pytest.mark.parametrize("mock_response, expected", [ ({'status': False, 'result': None} , -1), - ({'status': True, 'result': ("1.0.1", 1, 1, 0, "1.0.2", 0, 0, 0)} , -1), - ({'status': True, 'result': ("1.0.1", 0, 0, 0, "1.0.2", 1, 1, 0)} , -1), - ({'status': True, 'result': ("1.0.1", 1, 0, 0, "1.0.2", 0, 1, 0)} , 1), - ({'status': True, 'result': ("1.0.1", 0, 1, 0, "1.0.2", 1, 0, 0)} , 1), - ({'status': True, 'result': ("1.0.1", 1, 0, 1, "1.0.2", 0, 1, 0)} , -1), - ({'status': True, 'result': ("1.0.1", 0, 1, 0, "1.0.2", 1, 0, 1)} , -1), + ({'status': True, 'result': ("1.0.1", 1, 1, 0, "1.0.2", 0, 0, 0, "1.0.1", "1.0.2")} , -1), + ({'status': True, 'result': ("1.0.1", 0, 0, 0, "1.0.2", 1, 1, 0, "1.0.2", "1.0.1")} , -1), + ({'status': True, 'result': ("1.0.1", 1, 0, 0, "1.0.2", 0, 1, 0, "1.0.1", "1.0.2")} , 1), + ({'status': True, 'result': ("1.0.1", 0, 1, 0, "1.0.2", 1, 0, 0, "1.0.2", "1.0.1")} , 1), + ({'status': True, 'result': ("1.0.1", 1, 0, 1, "1.0.2", 0, 1, 0, "1.0.1", "1.0.2")} , -1), + ({'status': True, 'result': ("1.0.1", 0, 1, 0, "1.0.2", 1, 0, 1, "1.0.2", "1.0.1")} , -1), # "is_fw_switch_done" function will waiting until timeout under below condition, so that this test will spend around 1min. ({'status': False, 'result': 0} , -1), @@ -790,3 +790,32 @@ def test_firmware_download_RJ45(self): result = runner.invoke(sfputil.cli.commands['firmware'].commands['download'], ["Ethernet0", "a.b"]) assert result.output == 'This functionality is not applicable for RJ45 port Ethernet0.\n' assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.is_sfp_present', MagicMock(return_value=True)) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.run_firmware', MagicMock(return_value=1)) + @patch('sfputil.main.update_firmware_info_to_state_db', MagicMock()) + def test_firmware_run_cli(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['firmware'].commands['run'], ["Ethernet0"]) + assert result.exit_code == 0 + + @patch('sfputil.main.is_sfp_present', MagicMock(return_value=True)) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.commit_firmware', MagicMock(return_value=1)) + @patch('sfputil.main.update_firmware_info_to_state_db', MagicMock()) + def test_firmware_commit_cli(self): + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['firmware'].commands['commit'], ["Ethernet0"]) + assert result.exit_code == 0 + + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sonic_py_common.multi_asic.get_front_end_namespaces', MagicMock(return_value=[''])) + @patch('sfputil.main.SonicV2Connector', MagicMock()) + @patch('sfputil.main.platform_chassis') + def test_update_firmware_info_to_state_db(self, mock_chassis): + mock_sfp = MagicMock() + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_transceiver_info_firmware_versions.return_value = ['a.b.c', 'd.e.f'] + + sfputil.update_firmware_info_to_state_db("Ethernet0") From 3d89589ff8aa17ad25743f06ba8bc2f536056fde Mon Sep 17 00:00:00 2001 From: cytsao1 <111393130+cytsao1@users.noreply.github.com> Date: Fri, 19 May 2023 19:17:32 -0700 Subject: [PATCH 10/35] Update pcieutil error message on loading common pcie module (#2786) * Update pcieutil load module error message * Add pcieutil test for load module warning to not print to output * Update pcieutil import test * Update pcieutil import test * Fix pcieutil import test --- pcieutil/main.py | 2 +- tests/pcieutil_test.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/pcieutil/main.py b/pcieutil/main.py index ad6a1ebfa4..0fef193598 100644 --- a/pcieutil/main.py +++ b/pcieutil/main.py @@ -54,7 +54,7 @@ def load_platform_pcieutil(): from sonic_platform.pcie import Pcie platform_pcieutil = Pcie(platform_path) except ImportError as e: - log.log_warning("Failed to load platform Pcie module. Error : {}, fallback to load Pcie common utility.".format(str(e)), True) + log.log_warning("Failed to load platform Pcie module. Warning : {}, fallback to load Pcie common utility.".format(str(e))) try: from sonic_platform_base.sonic_pcie.pcie_common import PcieUtil platform_pcieutil = PcieUtil(platform_path) diff --git a/tests/pcieutil_test.py b/tests/pcieutil_test.py index cee1feec88..acac60f8d5 100644 --- a/tests/pcieutil_test.py +++ b/tests/pcieutil_test.py @@ -3,6 +3,7 @@ from unittest import mock from click.testing import CliRunner +from io import StringIO test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -156,6 +157,8 @@ +---------------------+-----------+ """ +pcieutil_load_module_warning_msg = "Failed to load platform Pcie module. Warning : No module named 'sonic_platform.pcie', fallback to load Pcie common utility." + class TestPcieUtil(object): @classmethod def setup_class(cls): @@ -199,6 +202,16 @@ def test_aer_option_device(self): result = runner.invoke(pcieutil.cli.commands["pcie-aer"].commands["correctable"], ["-d", "0:1.0"]) assert result.output == pcieutil_pcie_aer_correctable_dev_output + def test_load_pcie_module_warning(self): + stdout = sys.stdout + sys.stdout = result = StringIO() + try: + pcieutil.load_platform_pcieutil() + except ImportError: + pass + sys.stdout = stdout + assert pcieutil_load_module_warning_msg not in result.getvalue() + @classmethod def teardown_class(cls): print("TEARDOWN") From b4f4e63ed7310a1e8684a68eb4a33cc4a559bd6d Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Tue, 23 May 2023 17:01:04 +0300 Subject: [PATCH 11/35] Revert "Revert frr route check (#2761)" (#2762) This reverts commit 79a21ce. DEPENDS ON: sonic-net/sonic-buildimage#12853 What I did Reverted changes back How I did it Reverted changes back How to verify it UT --- scripts/route_check.py | 122 ++++++++++++++++++++++++++++--- tests/mock_tables/config_db.json | 3 +- tests/route_check_test.py | 17 ++++- tests/route_check_test_data.py | 122 ++++++++++++++++++++++++++++++- 4 files changed, 248 insertions(+), 16 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index 75806cf928..a85222cee3 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -11,11 +11,11 @@ How: NOTE: The flow from APPL-DB to ASIC-DB takes non zero milliseconds. 1) Initiate subscribe for ASIC-DB updates. - 2) Read APPL-DB & ASIC-DB + 2) Read APPL-DB & ASIC-DB 3) Get the diff. - 4) If any diff, + 4) If any diff, 4.1) Collect subscribe messages for a second - 4.2) check diff against the subscribe messages + 4.2) check diff against the subscribe messages 5) Rule out local interfaces & default routes 6) If still outstanding diffs, report failure. @@ -29,7 +29,7 @@ down to ensure failure. Analyze the reported failures to match expected. You may use the exit code to verify the result as success or not. - + """ @@ -45,7 +45,9 @@ import time import signal import traceback +import subprocess +from ipaddress import ip_network from swsscommon import swsscommon from utilities_common import chassis @@ -71,6 +73,9 @@ PRINT_MSG_LEN_MAX = 1000 +FRR_CHECK_RETRIES = 3 +FRR_WAIT_TIME = 15 + class Level(Enum): ERR = 'ERR' INFO = 'INFO' @@ -141,7 +146,7 @@ def add_prefix(ip): ip = ip + PREFIX_SEPARATOR + "32" else: ip = ip + PREFIX_SEPARATOR + "128" - return ip + return str(ip_network(ip)) def add_prefix_ifnot(ip): @@ -150,7 +155,7 @@ def add_prefix_ifnot(ip): :param ip: IP to add prefix as string. :return ip with prefix """ - return ip if ip.find(PREFIX_SEPARATOR) != -1 else add_prefix(ip) + return str(ip_network(ip)) if ip.find(PREFIX_SEPARATOR) != -1 else add_prefix(ip) def is_local(ip): @@ -293,7 +298,7 @@ def get_routes(): def get_route_entries(): """ - helper to read present route entries from ASIC-DB and + helper to read present route entries from ASIC-DB and as well initiate selector for ASIC-DB:ASIC-state updates. :return (selector, subscriber, ) """ @@ -309,7 +314,7 @@ def get_route_entries(): res, e = checkout_rt_entry(k) if res: rt.append(e) - + print_message(syslog.LOG_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": sorted(rt)}, indent=4)) selector = swsscommon.Select() @@ -317,6 +322,31 @@ def get_route_entries(): return (selector, subs, sorted(rt)) +def is_suppress_fib_pending_enabled(): + """ + Returns True if FIB suppression is enabled, False otherwise + """ + cfg_db = swsscommon.ConfigDBConnector() + cfg_db.connect() + + state = cfg_db.get_entry('DEVICE_METADATA', 'localhost').get('suppress-fib-pending') + + return state == 'enabled' + + +def get_frr_routes(): + """ + Read routes from zebra through CLI command + :return frr routes dictionary + """ + + output = subprocess.check_output('show ip route json', shell=True) + routes = json.loads(output) + output = subprocess.check_output('show ipv6 route json', shell=True) + routes.update(json.loads(output)) + return routes + + def get_interfaces(): """ helper to read interface table from APPL-DB. @@ -354,7 +384,7 @@ def filter_out_local_interfaces(keys): chassis_local_intfs = chassis.get_chassis_local_interfaces() local_if_lst.update(set(chassis_local_intfs)) - + db = swsscommon.DBConnector(APPL_DB_NAME, 0) tbl = swsscommon.Table(db, 'ROUTE_TABLE') @@ -493,6 +523,61 @@ def filter_out_standalone_tunnel_routes(routes): return updated_routes +def check_frr_pending_routes(): + """ + Check FRR routes for offload flag presence by executing "show ip route json" + Returns a list of routes that have no offload flag. + """ + + missed_rt = [] + + retries = FRR_CHECK_RETRIES + for i in range(retries): + missed_rt = [] + frr_routes = get_frr_routes() + + for _, entries in frr_routes.items(): + for entry in entries: + if entry['protocol'] != 'bgp': + continue + + # TODO: Also handle VRF routes. Currently this script does not check for VRF routes so it would be incorrect for us + # to assume they are installed in ASIC_DB, so we don't handle them. + if entry['vrfName'] != 'default': + continue + + if not entry.get('offloaded', False): + missed_rt.append(entry) + + if not missed_rt: + break + + time.sleep(FRR_WAIT_TIME) + + return missed_rt + + +def mitigate_installed_not_offloaded_frr_routes(missed_frr_rt, rt_appl): + """ + Mitigate installed but not offloaded FRR routes. + + In case route exists in APPL_DB, this function will manually send a notification to fpmsyncd + to trigger the flow that sends offload flag to zebra. + + It is designed to mitigate a problem when orchagent fails to send notification about installed route to fpmsyncd + or fpmsyncd not being able to read the notification or in case zebra fails to receive offload update due to variety of reasons. + All of the above mentioned cases must be considered as a bug, but even in that case we will report an error in the log but + given that this script ensures the route is installed in the hardware it will automitigate such a bug. + """ + db = swsscommon.DBConnector('APPL_STATE_DB', 0) + response_producer = swsscommon.NotificationProducer(db, f'{APPL_DB_NAME}_{swsscommon.APP_ROUTE_TABLE_NAME}_RESPONSE_CHANNEL') + for entry in [entry for entry in missed_frr_rt if entry['prefix'] in rt_appl]: + fvs = swsscommon.FieldValuePairs([('err_str', 'SWSS_RC_SUCCESS'), ('protocol', entry['protocol'])]) + response_producer.send('SWSS_RC_SUCCESS', entry['prefix'], fvs) + + print_message(syslog.LOG_ERR, f'Mitigated route {entry["prefix"]}') + + def get_soc_ips(config_db): mux_table = config_db.get_table('MUX_CABLE') soc_ips = [] @@ -536,7 +621,7 @@ def check_routes(): """ The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. - Checkout routes in ASIC-DB to match APPL-DB, discounting local & + Checkout routes in ASIC-DB to match APPL-DB, discounting local & default routes. In case of missed / unexpected entries in ASIC, it might be due to update latency between APPL & ASIC DBs. So collect ASIC-DB subscribe updates for a second, and checkout if you see SET @@ -545,12 +630,16 @@ def check_routes(): If there are still some unjustifiable diffs, between APPL & ASIC DB, related to routes report failure, else all good. + If there are FRR routes that aren't marked offloaded but all APPL & ASIC DB + routes are in sync report failure and perform a mitigation action. + :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ intf_appl_miss = [] rt_appl_miss = [] rt_asic_miss = [] + rt_frr_miss = [] results = {} adds = [] @@ -599,11 +688,22 @@ def check_routes(): if rt_asic_miss: results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss + rt_frr_miss = check_frr_pending_routes() + + if rt_frr_miss: + results["missed_FRR_routes"] = rt_frr_miss + if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) + + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR but all routes in APPL_DB and ASIC_DB are in sync") + if is_suppress_fib_pending_enabled(): + mitigate_installed_not_offloaded_frr_routes(rt_frr_miss, rt_appl) + return -1, results else: print_message(syslog.LOG_INFO, "All good!") @@ -649,7 +749,7 @@ def main(): return ret, res else: return ret, res - + if __name__ == "__main__": diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 986da98a9e..5cf11f9f66 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -870,7 +870,8 @@ "mac": "1d:34:db:16:a6:00", "platform": "x86_64-mlnx_msn3800-r0", "peer_switch": "sonic-switch", - "type": "ToRRouter" + "type": "ToRRouter", + "suppress-fib-pending": "enabled" }, "SNMP_COMMUNITY|msft": { "TYPE": "RO" diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 85e6a64a95..118e9eab56 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -7,7 +7,7 @@ import time from sonic_py_common import device_info from unittest.mock import MagicMock, patch -from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD +from tests.route_check_test_data import APPL_DB, ARGS, ASIC_DB, CONFIG_DB, DEFAULT_CONFIG_DB, DESCR, OP_DEL, OP_SET, PRE, RESULT, RET, TEST_DATA, UPD, FRR_ROUTES import pytest @@ -239,6 +239,7 @@ def setup(self): def init(self): route_check.UNIT_TESTING = 1 + route_check.FRR_WAIT_TIME = 0 @pytest.fixture def force_hang(self): @@ -258,7 +259,8 @@ def mock_dbs(self): patch("route_check.swsscommon.Table") as mock_table, \ patch("route_check.swsscommon.Select") as mock_sel, \ patch("route_check.swsscommon.SubscriberStateTable") as mock_subs, \ - patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db): + patch("route_check.swsscommon.ConfigDBConnector", return_value=mock_config_db), \ + patch("route_check.swsscommon.NotificationProducer"): device_info.get_platform = MagicMock(return_value='unittest') set_mock(mock_table, mock_conn, mock_sel, mock_subs, mock_config_db) yield @@ -272,7 +274,16 @@ def test_route_check(self, mock_dbs, test_num): set_test_case_data(ct_data) logger.info("Running test case {}: {}".format(test_num, ct_data[DESCR])) - with patch('sys.argv', ct_data[ARGS].split()): + with patch('sys.argv', ct_data[ARGS].split()), \ + patch('route_check.subprocess.check_output') as mock_check_output: + + routes = ct_data.get(FRR_ROUTES, {}) + + def side_effect(*args, **kwargs): + return json.dumps(routes) + + mock_check_output.side_effect = side_effect + ret, res = route_check.main() expect_ret = ct_data[RET] if RET in ct_data else 0 expect_res = ct_data[RESULT] if RESULT in ct_data else None diff --git a/tests/route_check_test_data.py b/tests/route_check_test_data.py index 9e4cd3a009..7ed1eee41f 100644 --- a/tests/route_check_test_data.py +++ b/tests/route_check_test_data.py @@ -6,6 +6,7 @@ CONFIG_DB = 4 PRE = "pre-value" UPD = "update" +FRR_ROUTES = "frr-routes" RESULT = "res" OP_SET = "SET" @@ -359,5 +360,124 @@ } } } - } + }, + "10": { + DESCR: "basic good one, check FRR routes", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + FRR_ROUTES: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + }, + "11": { + DESCR: "failure test case, missing FRR routes", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + "0.0.0.0/0" : { "ifname": "portchannel0" }, + "10.10.196.12/31" : { "ifname": "portchannel0" }, + }, + INTF_TABLE: { + "PortChannel1013:10.10.196.24/31": {}, + "PortChannel1023:2603:10b0:503:df4::5d/126": {}, + "PortChannel1024": {} + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "10.10.196.12/31" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "10.10.196.24/32" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "2603:10b0:503:df4::5d/128" + RT_ENTRY_KEY_SUFFIX: {}, + RT_ENTRY_KEY_PREFIX + "0.0.0.0/0" + RT_ENTRY_KEY_SUFFIX: {} + } + }, + }, + FRR_ROUTES: { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "vrfName": "default", + "protocol": "bgp", + "offloaded": "true", + }, + ], + "10.10.196.12/31": [ + { + "prefix": "10.10.196.12/31", + "vrfName": "default", + "protocol": "bgp", + }, + ], + "10.10.196.24/31": [ + { + "protocol": "connected", + }, + ], + }, + RESULT: { + "missed_FRR_routes": [ + {"prefix": "10.10.196.12/31", "vrfName": "default", "protocol": "bgp"} + ], + }, + RET: -1, + }, + "10": { + DESCR: "basic good one with IPv6 address", + ARGS: "route_check -m INFO -i 1000", + PRE: { + APPL_DB: { + ROUTE_TABLE: { + }, + INTF_TABLE: { + "PortChannel1013:2000:31:0:0::1/64": {}, + } + }, + ASIC_DB: { + RT_ENTRY_TABLE: { + RT_ENTRY_KEY_PREFIX + "2000:31::1/128" + RT_ENTRY_KEY_SUFFIX: {}, + } + } + } + }, } From f258e2a3e1b705c7b05a8a42489b9219fd9a5967 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Wed, 24 May 2023 15:32:48 -0700 Subject: [PATCH 12/35] [GCU] Complete RDMA Platform Validation Checks (#2791) --- .../field_operation_validators.py | 117 ++++++++++++++- .../gcu_field_operation_validators.conf.json | 119 ++++++++++++++- generic_config_updater/gu_common.py | 6 +- .../field_operation_validator_test.py | 142 ++++++++++++++++++ .../gcu_feature_patch_application_test.py | 5 +- .../generic_config_updater/gu_common_test.py | 56 ------- 6 files changed, 379 insertions(+), 66 deletions(-) create mode 100644 tests/generic_config_updater/field_operation_validator_test.py diff --git a/generic_config_updater/field_operation_validators.py b/generic_config_updater/field_operation_validators.py index 84cc48547f..883944c282 100644 --- a/generic_config_updater/field_operation_validators.py +++ b/generic_config_updater/field_operation_validators.py @@ -1,10 +1,117 @@ -from sonic_py_common import device_info +import os import re +import json +import jsonpointer +import subprocess +from sonic_py_common import device_info +from .gu_common import GenericConfigUpdaterError -def rdma_config_update_validator(): - version_info = device_info.get_sonic_version_info() - asic_type = version_info.get('asic_type') - if (asic_type != 'mellanox' and asic_type != 'broadcom' and asic_type != 'cisco-8000'): +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +GCU_TABLE_MOD_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" + +def get_asic_name(): + asic = "unknown" + + if os.path.exists(GCU_TABLE_MOD_CONF_FILE): + with open(GCU_TABLE_MOD_CONF_FILE, "r") as s: + gcu_field_operation_conf = json.load(s) + else: + raise GenericConfigUpdaterError("GCU table modification validators config file not found") + + asic_mapping = gcu_field_operation_conf["helper_data"]["rdma_config_update_validator"] + + if device_info.get_sonic_version_info()['asic_type'] == 'cisco-8000': + asic = "cisco-8000" + elif device_info.get_sonic_version_info()['asic_type'] == 'mellanox': + GET_HWSKU_CMD = "sonic-cfggen -d -v DEVICE_METADATA.localhost.hwsku" + spc1_hwskus = asic_mapping["mellanox_asics"]["spc1"] + proc = subprocess.Popen(GET_HWSKU_CMD, shell=True, universal_newlines=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + hwsku = output.rstrip('\n') + if hwsku.lower() in [spc1_hwsku.lower() for spc1_hwsku in spc1_hwskus]: + asic = "spc1" + elif device_info.get_sonic_version_info()['asic_type'] == 'broadcom': + command = ["sudo", "lspci"] + proc = subprocess.Popen(command, universal_newlines=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + broadcom_asics = asic_mapping["broadcom_asics"] + for asic_shorthand, asic_descriptions in broadcom_asics.items(): + if asic != "unknown": + break + for asic_description in asic_descriptions: + if asic_description in output: + asic = asic_shorthand + break + + return asic + + +def rdma_config_update_validator(patch_element): + asic = get_asic_name() + if asic == "unknown": return False + version_info = device_info.get_sonic_version_info() + build_version = version_info.get('build_version') + version_substrings = build_version.split('.') + branch_version = None + + for substring in version_substrings: + if substring.isdigit() and re.match(r'^\d{8}$', substring): + branch_version = substring + + path = patch_element["path"] + table = jsonpointer.JsonPointer(path).parts[0] + + # Helper function to return relevant cleaned paths, consdiers case where the jsonpatch value is a dict + # For paths like /PFC_WD/Ethernet112/action, remove Ethernet112 from the path so that we can clearly determine the relevant field (i.e. action, not Ethernet112) + def _get_fields_in_patch(): + cleaned_fields = [] + + field_elements = jsonpointer.JsonPointer(path).parts[1:] + cleaned_field_elements = [elem for elem in field_elements if not any(char.isdigit() for char in elem)] + cleaned_field = '/'.join(cleaned_field_elements).lower() + + + if 'value' in patch_element.keys() and isinstance(patch_element['value'], dict): + for key in patch_element['value']: + cleaned_fields.append(cleaned_field+ '/' + key) + else: + cleaned_fields.append(cleaned_field) + + return cleaned_fields + + if os.path.exists(GCU_TABLE_MOD_CONF_FILE): + with open(GCU_TABLE_MOD_CONF_FILE, "r") as s: + gcu_field_operation_conf = json.load(s) + else: + raise GenericConfigUpdaterError("GCU table modification validators config file not found") + + tables = gcu_field_operation_conf["tables"] + scenarios = tables[table]["validator_data"]["rdma_config_update_validator"] + + cleaned_fields = _get_fields_in_patch() + for cleaned_field in cleaned_fields: + scenario = None + for key in scenarios.keys(): + if cleaned_field in scenarios[key]["fields"]: + scenario = scenarios[key] + break + + if scenario is None: + return False + + if scenario["platforms"][asic] == "": + return False + + if patch_element['op'] not in scenario["operations"]: + return False + + if branch_version is not None: + if asic in scenario["platforms"]: + if branch_version < scenario["platforms"][asic]: + return False + else: + return False + return True diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index f12a14d8eb..d89c5ae1aa 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -10,11 +10,128 @@ "e.g. 'show.acl.test_acl'", "", "field_operation_validators for a given table defines a list of validators that all must pass for modification to the specified field and table to be allowed", + "", + "validator_data provides data relevant to each validator", "" ], + "helper_data": { + "rdma_config_update_validator": { + "mellanox_asics": { + "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-D48C8" ] + }, + "broadcom_asics": { + "th": ["Broadcom Limited Device b960", "Broadcom Limited Broadcom BCM56960"], + "th2": ["Broadcom Limited Device b971"], + "td2": ["Broadcom Limited Device b850", "Broadcom Limited Broadcom BCM56850"], + "td3": ["Broadcom Limited Device b870", "Broadcom Inc. and subsidiaries Device b870"] + } + } + }, "tables": { "PFC_WD": { - "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ] + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "PFCWD enable/disable": { + "fields": [ + "restoration_time", + "detection_time", + "action", + "global/poll_interval" + ], + "operations": ["remove", "add", "replace"], + "platforms": { + "spc1": "20181100", + "td2": "20181100", + "th": "20181100", + "th2": "20181100", + "td3": "20201200", + "cisco-8000": "20201200" + } + } + } + } + }, + "BUFFER_POOL": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "Shared/headroom pool size changes": { + "fields": [ + "ingress_lossless_pool/xoff", + "ingress_lossless_pool/size", + "egress_lossy_pool/size" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20191100", + "td2": "", + "th": "20221100", + "th2": "20221100", + "td3": "20221100", + "cisco-8000": "" + } + } + } + } + }, + "BUFFER_PROFILE": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "Dynamic threshold tuning": { + "fields": [ + "dynamic_th" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20181100", + "td2": "20181100", + "th": "20181100", + "th2": "20181100", + "td3": "20201200", + "cisco-8000": "" + } + }, + "PG headroom modification": { + "fields": [ + "xoff" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20191100", + "td2": "", + "th": "20221100", + "th2": "20221100", + "td3": "20221100", + "cisco-8000": "" + } + } + } + } + }, + "WRED_PROFILE": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "ECN tuning": { + "fields": [ + "azure_lossless/green_min_threshold", + "azure_lossless/green_max_threshold", + "azure_lossless/green_drop_probability" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20181100", + "td2": "20181100", + "th": "20181100", + "th2": "20181100", + "td3": "20201200", + "cisco-8000": "" + } + } + } + } } } } diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index e8c66fcbbe..a6cb8de094 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -166,7 +166,7 @@ def validate_field_operation(self, old_config, target_config): if any(op['op'] == operation and field == op['path'] for op in patch): raise IllegalPatchOperationError("Given patch operation is invalid. Operation: {} is illegal on field: {}".format(operation, field)) - def _invoke_validating_function(cmd): + def _invoke_validating_function(cmd, jsonpatch_element): # cmd is in the format as . method_name = cmd.split(".")[-1] module_name = ".".join(cmd.split(".")[0:-1]) @@ -174,7 +174,7 @@ def _invoke_validating_function(cmd): raise GenericConfigUpdaterError("Attempting to call invalid method {} in module {}. Module must be generic_config_updater.field_operation_validators, and method must be a defined validator".format(method_name, module_name)) module = importlib.import_module(module_name, package=None) method_to_call = getattr(module, method_name) - return method_to_call() + return method_to_call(jsonpatch_element) if os.path.exists(GCU_FIELD_OP_CONF_FILE): with open(GCU_FIELD_OP_CONF_FILE, "r") as s: @@ -194,7 +194,7 @@ def _invoke_validating_function(cmd): validating_functions.update(tables.get(table, {}).get("field_operation_validators", [])) for function in validating_functions: - if not _invoke_validating_function(function): + if not _invoke_validating_function(function, element): raise IllegalPatchOperationError("Modification of {} table is illegal- validating function {} returned False".format(table, function)) diff --git a/tests/generic_config_updater/field_operation_validator_test.py b/tests/generic_config_updater/field_operation_validator_test.py new file mode 100644 index 0000000000..3b08b31d89 --- /dev/null +++ b/tests/generic_config_updater/field_operation_validator_test.py @@ -0,0 +1,142 @@ +import io +import unittest +import mock +import json +import subprocess +import generic_config_updater +import generic_config_updater.field_operation_validators as fov +import generic_config_updater.gu_common as gu_common + +from unittest.mock import MagicMock, Mock, mock_open +from mock import patch +from sonic_py_common.device_info import get_hwsku, get_sonic_version_info + + +class TestValidateFieldOperation(unittest.TestCase): + + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="unknown")) + def test_rdma_config_update_validator_unknown_asic(self): + patch_element = {"path": "/PFC_WD/Ethernet4/restoration_time", "op": "replace", "value": "234234"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="td3")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"BUFFER_POOL": {"validator_data": {"rdma_config_update_validator": {"Shared/headroom pool size changes": {"fields": ["ingress_lossless_pool/xoff", "ingress_lossless_pool/size", "egress_lossy_pool/size"], "operations": ["replace"], "platforms": {"td3": "20221100"}}}}}}}')) + def test_rdma_config_update_validator_td3_asic_invalid_version(self): + patch_element = {"path": "/BUFFER_POOL/ingress_lossless_pool/xoff", "op": "replace", "value": "234234"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"PFC_WD": {"validator_data": {"rdma_config_update_validator": {"PFCWD enable/disable": {"fields": ["detection_time", "action"], "operations": ["remove", "replace", "add"], "platforms": {"spc1": "20181100"}}}}}}}')) + def test_rdma_config_update_validator_spc_asic_valid_version(self): + patch_element = {"path": "/PFC_WD/Ethernet8/detection_time", "op": "remove"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == True + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"BUFFER_POOL": {"validator_data": {"rdma_config_update_validator": {"Shared/headroom pool size changes": {"fields": ["ingress_lossless_pool/xoff", "egress_lossy_pool/size"], "operations": ["replace"], "platforms": {"spc1": "20181100"}}}}}}}')) + def test_rdma_config_update_validator_spc_asic_invalid_op(self): + patch_element = {"path": "/BUFFER_POOL/ingress_lossless_pool/xoff", "op": "remove"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"PFC_WD": {"validator_data": {"rdma_config_update_validator": {"PFCWD enable/disable": {"fields": ["detection_time", "action"], "operations": ["remove", "replace", "add"], "platforms": {"spc1": "20181100"}}}}}}}')) + def test_rdma_config_update_validator_spc_asic_other_field(self): + patch_element = {"path": "/PFC_WD/Ethernet8/other_field", "op": "add", "value": "sample_value"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + def test_validate_field_operation_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + def test_validate_field_operation_legal__rm_loopback1(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__rm_loopback0(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + +class TestGetAsicName(unittest.TestCase): + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_spc1(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'mellanox'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Mellanox-SN2700-D48C8", 0] + self.assertEqual(fov.get_asic_name(), "spc1") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_th(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b960", 0] + self.assertEqual(fov.get_asic_name(), "th") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_th2(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b971", 0] + self.assertEqual(fov.get_asic_name(), "th2") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_td2(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b850", 0] + self.assertEqual(fov.get_asic_name(), "td2") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_td3(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b870", 0] + self.assertEqual(fov.get_asic_name(), "td3") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_cisco(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'cisco-8000'} + self.assertEqual(fov.get_asic_name(), "cisco-8000") diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py index 3f744e20ca..db625e8cd1 100644 --- a/tests/generic_config_updater/gcu_feature_patch_application_test.py +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -1,6 +1,7 @@ import jsonpatch import unittest import copy +import mock from unittest.mock import MagicMock, Mock from mock import patch @@ -31,7 +32,8 @@ def get_running_config(): class TestFeaturePatchApplication(unittest.TestCase): def setUp(self): self.config_wrapper = ConfigWrapper() - + + @patch("generic_config_updater.field_operation_validators.rdma_config_update_validator", mock.Mock(return_value=True)) def test_feature_patch_application_success(self): # Format of the JSON file containing the test-cases: # @@ -52,6 +54,7 @@ def test_feature_patch_application_success(self): with self.subTest(name=test_case_name): self.run_single_success_case_applier(data[test_case_name]) + @patch("generic_config_updater.field_operation_validators.rdma_config_update_validator", mock.Mock(return_value=True)) def test_feature_patch_application_failure(self): # Fromat of the JSON file containing the test-cases: # diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index a319a25ead..a2a776c0bb 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -71,62 +71,6 @@ def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "mellanox", "build_version": "SONiC.20181131"})) - def test_validate_field_operation_legal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} - target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "40"}}} - config_wrapper = gu_common.ConfigWrapper() - config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} - target_config = {"PFC_WD": {"GLOBAL": {}}} - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "invalid-asic", "build_version": "SONiC.20181131"})) - def test_validate_field_modification_illegal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} - target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "80"}}} - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - - def test_validate_field_operation_legal__rm_loopback1(self): - old_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {}, - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - target_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {} - } - } - config_wrapper = gu_common.ConfigWrapper() - config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal__rm_loopback0(self): - old_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {}, - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - target_config = { - "LOOPBACK_INTERFACE": { - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - def test_ctor__default_values_set(self): config_wrapper = gu_common.ConfigWrapper() From d5544b4afbf40624cde02107ba64fed2048782f6 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Thu, 25 May 2023 10:54:08 +0800 Subject: [PATCH 13/35] [config] Generate sysinfo as needed when override config (#2836) ADO: 17921518 What I did The generated Golden Config will not have knowledge of configs that are produced in run time, such as mac and platform. Generate that info in Override Config if missing. How I did it Reuse the mac and platform in existing device runnning config and generate that if missing. How to verify it Unit test --- config/main.py | 43 +++++++++++++ .../multi_asic_dm_gen_sysinfo.json | 55 ++++++++++++++++ tests/config_override_test.py | 63 +++++++++++++++++++ 3 files changed, 161 insertions(+) create mode 100644 tests/config_override_input/multi_asic_dm_gen_sysinfo.json diff --git a/config/main.py b/config/main.py index abef5397af..f6bec33f8f 100644 --- a/config/main.py +++ b/config/main.py @@ -1824,6 +1824,47 @@ def override_config_by(golden_config_path): return +# This funtion is to generate sysinfo if that is missing in config_input. +# It will keep the same with sysinfo in cur_config if sysinfo exists. +# Otherwise it will modify config_input with generated sysinfo. +def generate_sysinfo(cur_config, config_input, ns=None): + # Generate required sysinfo for Golden Config. + device_metadata = config_input.get('DEVICE_METADATA') + + if not device_metadata or 'localhost' not in device_metadata: + return + + mac = None + platform = None + cur_device_metadata = cur_config.get('DEVICE_METADATA') + + # Reuse current config's mac and platform. Generate if absent + if cur_device_metadata is not None: + mac = cur_device_metadata.get('localhost', {}).get('mac') + platform = cur_device_metadata.get('localhost', {}).get('platform') + + if not mac: + if ns: + asic_role = device_metadata.get('localhost', {}).get('sub_role') + switch_type = device_metadata.get('localhost', {}).get('switch_type') + + if ((switch_type is not None and switch_type.lower() == "chassis-packet") or + (asic_role is not None and asic_role.lower() == "backend")): + mac = device_info.get_system_mac(namespace=ns) + else: + mac = device_info.get_system_mac() + else: + mac = device_info.get_system_mac() + + if not platform: + platform = device_info.get_platform() + + device_metadata['localhost']['mac'] = mac + device_metadata['localhost']['platform'] = platform + + return + + # # 'override-config-table' command ('config override-config-table ...') # @@ -1865,6 +1906,8 @@ def override_config_table(db, input_config_db, dry_run): ns_config_input = config_input["localhost"] else: ns_config_input = config_input[ns] + # Generate sysinfo if missing in ns_config_input + generate_sysinfo(current_config, ns_config_input, ns) else: ns_config_input = config_input updated_config = update_config(current_config, ns_config_input) diff --git a/tests/config_override_input/multi_asic_dm_gen_sysinfo.json b/tests/config_override_input/multi_asic_dm_gen_sysinfo.json new file mode 100644 index 0000000000..2cb92bbe8d --- /dev/null +++ b/tests/config_override_input/multi_asic_dm_gen_sysinfo.json @@ -0,0 +1,55 @@ +{ + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } +} diff --git a/tests/config_override_test.py b/tests/config_override_test.py index ca14ae75bb..3df8cea562 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -23,6 +23,7 @@ FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") MULTI_ASIC_MACSEC_OV = os.path.join(DATA_DIR, "multi_asic_macsec_ov.json") MULTI_ASIC_DEVICE_METADATA_RM = os.path.join(DATA_DIR, "multi_asic_dm_rm.json") +MULTI_ASIC_DEVICE_METADATA_GEN_SYSINFO = os.path.join(DATA_DIR, "multi_asic_dm_gen_sysinfo.json") # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -318,6 +319,68 @@ def read_json_file_side_effect(filename): for ns, config_db in cfgdb_clients.items(): assert 'DEVICE_METADATA' not in config_db.get_config() + def test_device_metadata_keep_sysinfo(self): + def read_json_file_side_effect(filename): + with open(MULTI_ASIC_DEVICE_METADATA_GEN_SYSINFO, "r") as f: + device_metadata = json.load(f) + return device_metadata + db = Db() + cfgdb_clients = db.cfgdb_clients + + # Save original sysinfo in dict, compare later to see if it is override + orig_sysinfo = {} + for ns, config_db in cfgdb_clients.items(): + platform = config_db.get_config()['DEVICE_METADATA']['localhost'].get('platform') + mac = config_db.get_config()['DEVICE_METADATA']['localhost'].get('mac') + orig_sysinfo[ns] = {} + orig_sysinfo[ns]['platform'] = platform + orig_sysinfo[ns]['mac'] = mac + + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + assert result.exit_code == 0 + + for ns, config_db in cfgdb_clients.items(): + platform = config_db.get_config()['DEVICE_METADATA']['localhost'].get('platform') + mac = config_db.get_config()['DEVICE_METADATA']['localhost'].get('mac') + assert platform == orig_sysinfo[ns]['platform'] + assert mac == orig_sysinfo[ns]['mac'] + + def test_device_metadata_gen_sysinfo(self): + def read_json_file_side_effect(filename): + with open(MULTI_ASIC_DEVICE_METADATA_GEN_SYSINFO, "r") as f: + device_metadata = json.load(f) + return device_metadata + db = Db() + cfgdb_clients = db.cfgdb_clients + + # Remove original sysinfo and check if use generated ones + for ns, config_db in cfgdb_clients.items(): + metadata = config_db.get_config()['DEVICE_METADATA']['localhost'] + metadata.pop('platform', None) + metadata.pop('mac', None) + config_db.set_entry('DEVICE_METADATA', 'localhost', metadata) + + with mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('sonic_py_common.device_info.get_platform', + return_value="multi_asic"),\ + mock.patch('sonic_py_common.device_info.get_system_mac', + return_value="11:22:33:44:55:66"): + runner = CliRunner() + result = runner.invoke(config.config.commands["override-config-table"], + ['golden_config_db.json'], obj=db) + assert result.exit_code == 0 + + for ns, config_db in cfgdb_clients.items(): + platform = config_db.get_config()['DEVICE_METADATA']['localhost'].get('platform') + mac = config_db.get_config()['DEVICE_METADATA']['localhost'].get('mac') + assert platform == "multi_asic" + assert mac == "11:22:33:44:55:66" + @classmethod def teardown_class(cls): From db61efcaffe232c0995da1514487fe9e5f6a4ce7 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Mon, 29 May 2023 13:30:45 +0800 Subject: [PATCH 14/35] [vlan][dhcp_relay] Clear dhcpv6 relay counter while deleting vlan (#2852) What I did Fix this issue: sonic-net/sonic-buildimage#15047 Show dhcp_relay ipv6 counter will display vlan which has been deleted. How I did it Remove related info in state_db while deleting a vlan How to verify it Add unit test Build utilities and run cmd to verify Signed-off-by: Yaqiang Zhu --- config/vlan.py | 11 +++++++++++ tests/vlan_test.py | 10 ++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/config/vlan.py b/config/vlan.py index 0540739208..e1ae1f02eb 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -1,6 +1,7 @@ import click import utilities_common.cli as clicommon import utilities_common.dhcp_relay_util as dhcp_relay_util +from swsscommon.swsscommon import SonicV2Connector from jsonpatch import JsonPatchConflict from time import sleep @@ -65,6 +66,14 @@ def is_dhcpv6_relay_config_exist(db, vlan_name): return True +def delete_state_db_entry(entry_name): + state_db = SonicV2Connector() + state_db.connect(state_db.STATE_DB) + exists = state_db.exists(state_db.STATE_DB, 'DHCPv6_COUNTER_TABLE|{}'.format(entry_name)) + if exists: + state_db.delete(state_db.STATE_DB, 'DHCPv6_COUNTER_TABLE|{}'.format(entry_name)) + + @vlan.command('del') @click.argument('vid', metavar='', required=True, type=int) @click.option('--no_restart_dhcp_relay', is_flag=True, type=click.BOOL, required=False, default=False, @@ -109,6 +118,8 @@ def del_vlan(db, vid, no_restart_dhcp_relay): # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, None) + delete_state_db_entry(vlan) + if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): # set dhcpv6_relay table set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) diff --git a/tests/vlan_test.py b/tests/vlan_test.py index ce1271024b..56ac18383c 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -391,10 +391,12 @@ def test_config_vlan_del_vlan(self, mock_restart_dhcp_relay_service): print(result.output) assert result.exit_code == 0 - result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1000"], obj=db) - print(result.exit_code) - print(result.output) - assert result.exit_code == 0 + with mock.patch("config.vlan.delete_state_db_entry") as delete_state_db_entry: + result = runner.invoke(config.config.commands["vlan"].commands["del"], ["1000"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + delete_state_db_entry.assert_called_once_with("Vlan1000") # show output result = runner.invoke(show.cli.commands["vlan"].commands["brief"], [], obj=db) From 4fead8960a4cc4a11a0f3ba94ae43e23ed3e7b38 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 29 May 2023 14:06:54 +0300 Subject: [PATCH 15/35] [sonic-package-manager] fix CLI plugin compatibility issue (#2842) - What I did Overcome a CLI plugin compatibility issue - How I did it DHCP relay extension expects the spm to name its plugin "dhcp-relay". This change keeps that format if there is just one CLI plugin. Signed-off-by: Stepan Blyschak --- sonic_package_manager/manager.py | 6 +++++- tests/sonic_package_manager/test_manager.py | 6 +++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py index 770641a27a..7ed2be0a9e 100644 --- a/sonic_package_manager/manager.py +++ b/sonic_package_manager/manager.py @@ -171,7 +171,11 @@ def get_cli_plugin_path(package: Package, index: int, command: str) -> str: Path generated for this package. """ - plugin_module_file = f'{package.name}_{index}.py' + if index == 0: + plugin_module_file = f'{package.name}.py' + else: + plugin_module_file = f'{package.name}_{index}.py' + return os.path.join(get_cli_plugin_directory(command), plugin_module_file) diff --git a/tests/sonic_package_manager/test_manager.py b/tests/sonic_package_manager/test_manager.py index bc439e9d9a..ad365f946d 100644 --- a/tests/sonic_package_manager/test_manager.py +++ b/tests/sonic_package_manager/test_manager.py @@ -165,7 +165,7 @@ def test_installation_cli_plugin(package_manager, fake_metadata_resolver, anythi with patch('sonic_package_manager.manager.get_cli_plugin_directory') as get_dir_mock: get_dir_mock.return_value = '/' package_manager.install('test-package') - package_manager.docker.extract.assert_called_once_with(anything, '/cli/plugin.py', '/test-package_0.py') + package_manager.docker.extract.assert_called_once_with(anything, '/cli/plugin.py', '/test-package.py') def test_installation_multiple_cli_plugin(package_manager, fake_metadata_resolver, mock_feature_registry, anything): @@ -178,7 +178,7 @@ def test_installation_multiple_cli_plugin(package_manager, fake_metadata_resolve package_manager.install('test-package') package_manager.docker.extract.assert_has_calls( [ - call(anything, '/cli/plugin.py', '/test-package_0.py'), + call(anything, '/cli/plugin.py', '/test-package.py'), call(anything, '/cli/plugin2.py', '/test-package_1.py'), ], any_order=True, @@ -188,7 +188,7 @@ def test_installation_multiple_cli_plugin(package_manager, fake_metadata_resolve package_manager.uninstall('test-package', force=True) remove_mock.assert_has_calls( [ - call('/test-package_0.py'), + call('/test-package.py'), call('/test-package_1.py'), ], any_order=True, From 69abbc3cf13d53e16707f5abdba76d756f1f5783 Mon Sep 17 00:00:00 2001 From: StormLiangMS <89824293+StormLiangMS@users.noreply.github.com> Date: Tue, 30 May 2023 22:27:30 +0800 Subject: [PATCH 16/35] Revert "[GCU] Complete RDMA Platform Validation Checks (#2791)" (#2854) This reverts commit f258e2a3e1b705c7b05a8a42489b9219fd9a5967. --- .../field_operation_validators.py | 117 +-------------- .../gcu_field_operation_validators.conf.json | 119 +-------------- generic_config_updater/gu_common.py | 6 +- .../field_operation_validator_test.py | 142 ------------------ .../gcu_feature_patch_application_test.py | 5 +- .../generic_config_updater/gu_common_test.py | 56 +++++++ 6 files changed, 66 insertions(+), 379 deletions(-) delete mode 100644 tests/generic_config_updater/field_operation_validator_test.py diff --git a/generic_config_updater/field_operation_validators.py b/generic_config_updater/field_operation_validators.py index 883944c282..84cc48547f 100644 --- a/generic_config_updater/field_operation_validators.py +++ b/generic_config_updater/field_operation_validators.py @@ -1,117 +1,10 @@ -import os -import re -import json -import jsonpointer -import subprocess from sonic_py_common import device_info -from .gu_common import GenericConfigUpdaterError - - -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -GCU_TABLE_MOD_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" - -def get_asic_name(): - asic = "unknown" - - if os.path.exists(GCU_TABLE_MOD_CONF_FILE): - with open(GCU_TABLE_MOD_CONF_FILE, "r") as s: - gcu_field_operation_conf = json.load(s) - else: - raise GenericConfigUpdaterError("GCU table modification validators config file not found") - - asic_mapping = gcu_field_operation_conf["helper_data"]["rdma_config_update_validator"] - - if device_info.get_sonic_version_info()['asic_type'] == 'cisco-8000': - asic = "cisco-8000" - elif device_info.get_sonic_version_info()['asic_type'] == 'mellanox': - GET_HWSKU_CMD = "sonic-cfggen -d -v DEVICE_METADATA.localhost.hwsku" - spc1_hwskus = asic_mapping["mellanox_asics"]["spc1"] - proc = subprocess.Popen(GET_HWSKU_CMD, shell=True, universal_newlines=True, stdout=subprocess.PIPE) - output, err = proc.communicate() - hwsku = output.rstrip('\n') - if hwsku.lower() in [spc1_hwsku.lower() for spc1_hwsku in spc1_hwskus]: - asic = "spc1" - elif device_info.get_sonic_version_info()['asic_type'] == 'broadcom': - command = ["sudo", "lspci"] - proc = subprocess.Popen(command, universal_newlines=True, stdout=subprocess.PIPE) - output, err = proc.communicate() - broadcom_asics = asic_mapping["broadcom_asics"] - for asic_shorthand, asic_descriptions in broadcom_asics.items(): - if asic != "unknown": - break - for asic_description in asic_descriptions: - if asic_description in output: - asic = asic_shorthand - break - - return asic - +import re -def rdma_config_update_validator(patch_element): - asic = get_asic_name() - if asic == "unknown": - return False +def rdma_config_update_validator(): version_info = device_info.get_sonic_version_info() - build_version = version_info.get('build_version') - version_substrings = build_version.split('.') - branch_version = None - - for substring in version_substrings: - if substring.isdigit() and re.match(r'^\d{8}$', substring): - branch_version = substring - - path = patch_element["path"] - table = jsonpointer.JsonPointer(path).parts[0] - - # Helper function to return relevant cleaned paths, consdiers case where the jsonpatch value is a dict - # For paths like /PFC_WD/Ethernet112/action, remove Ethernet112 from the path so that we can clearly determine the relevant field (i.e. action, not Ethernet112) - def _get_fields_in_patch(): - cleaned_fields = [] - - field_elements = jsonpointer.JsonPointer(path).parts[1:] - cleaned_field_elements = [elem for elem in field_elements if not any(char.isdigit() for char in elem)] - cleaned_field = '/'.join(cleaned_field_elements).lower() - - - if 'value' in patch_element.keys() and isinstance(patch_element['value'], dict): - for key in patch_element['value']: - cleaned_fields.append(cleaned_field+ '/' + key) - else: - cleaned_fields.append(cleaned_field) - - return cleaned_fields - - if os.path.exists(GCU_TABLE_MOD_CONF_FILE): - with open(GCU_TABLE_MOD_CONF_FILE, "r") as s: - gcu_field_operation_conf = json.load(s) - else: - raise GenericConfigUpdaterError("GCU table modification validators config file not found") - - tables = gcu_field_operation_conf["tables"] - scenarios = tables[table]["validator_data"]["rdma_config_update_validator"] - - cleaned_fields = _get_fields_in_patch() - for cleaned_field in cleaned_fields: - scenario = None - for key in scenarios.keys(): - if cleaned_field in scenarios[key]["fields"]: - scenario = scenarios[key] - break - - if scenario is None: - return False - - if scenario["platforms"][asic] == "": - return False - - if patch_element['op'] not in scenario["operations"]: - return False - - if branch_version is not None: - if asic in scenario["platforms"]: - if branch_version < scenario["platforms"][asic]: - return False - else: - return False + asic_type = version_info.get('asic_type') + if (asic_type != 'mellanox' and asic_type != 'broadcom' and asic_type != 'cisco-8000'): + return False return True diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index d89c5ae1aa..f12a14d8eb 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -10,128 +10,11 @@ "e.g. 'show.acl.test_acl'", "", "field_operation_validators for a given table defines a list of validators that all must pass for modification to the specified field and table to be allowed", - "", - "validator_data provides data relevant to each validator", "" ], - "helper_data": { - "rdma_config_update_validator": { - "mellanox_asics": { - "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-D48C8" ] - }, - "broadcom_asics": { - "th": ["Broadcom Limited Device b960", "Broadcom Limited Broadcom BCM56960"], - "th2": ["Broadcom Limited Device b971"], - "td2": ["Broadcom Limited Device b850", "Broadcom Limited Broadcom BCM56850"], - "td3": ["Broadcom Limited Device b870", "Broadcom Inc. and subsidiaries Device b870"] - } - } - }, "tables": { "PFC_WD": { - "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], - "validator_data": { - "rdma_config_update_validator": { - "PFCWD enable/disable": { - "fields": [ - "restoration_time", - "detection_time", - "action", - "global/poll_interval" - ], - "operations": ["remove", "add", "replace"], - "platforms": { - "spc1": "20181100", - "td2": "20181100", - "th": "20181100", - "th2": "20181100", - "td3": "20201200", - "cisco-8000": "20201200" - } - } - } - } - }, - "BUFFER_POOL": { - "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], - "validator_data": { - "rdma_config_update_validator": { - "Shared/headroom pool size changes": { - "fields": [ - "ingress_lossless_pool/xoff", - "ingress_lossless_pool/size", - "egress_lossy_pool/size" - ], - "operations": ["replace"], - "platforms": { - "spc1": "20191100", - "td2": "", - "th": "20221100", - "th2": "20221100", - "td3": "20221100", - "cisco-8000": "" - } - } - } - } - }, - "BUFFER_PROFILE": { - "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], - "validator_data": { - "rdma_config_update_validator": { - "Dynamic threshold tuning": { - "fields": [ - "dynamic_th" - ], - "operations": ["replace"], - "platforms": { - "spc1": "20181100", - "td2": "20181100", - "th": "20181100", - "th2": "20181100", - "td3": "20201200", - "cisco-8000": "" - } - }, - "PG headroom modification": { - "fields": [ - "xoff" - ], - "operations": ["replace"], - "platforms": { - "spc1": "20191100", - "td2": "", - "th": "20221100", - "th2": "20221100", - "td3": "20221100", - "cisco-8000": "" - } - } - } - } - }, - "WRED_PROFILE": { - "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], - "validator_data": { - "rdma_config_update_validator": { - "ECN tuning": { - "fields": [ - "azure_lossless/green_min_threshold", - "azure_lossless/green_max_threshold", - "azure_lossless/green_drop_probability" - ], - "operations": ["replace"], - "platforms": { - "spc1": "20181100", - "td2": "20181100", - "th": "20181100", - "th2": "20181100", - "td3": "20201200", - "cisco-8000": "" - } - } - } - } + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ] } } } diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index a6cb8de094..e8c66fcbbe 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -166,7 +166,7 @@ def validate_field_operation(self, old_config, target_config): if any(op['op'] == operation and field == op['path'] for op in patch): raise IllegalPatchOperationError("Given patch operation is invalid. Operation: {} is illegal on field: {}".format(operation, field)) - def _invoke_validating_function(cmd, jsonpatch_element): + def _invoke_validating_function(cmd): # cmd is in the format as . method_name = cmd.split(".")[-1] module_name = ".".join(cmd.split(".")[0:-1]) @@ -174,7 +174,7 @@ def _invoke_validating_function(cmd, jsonpatch_element): raise GenericConfigUpdaterError("Attempting to call invalid method {} in module {}. Module must be generic_config_updater.field_operation_validators, and method must be a defined validator".format(method_name, module_name)) module = importlib.import_module(module_name, package=None) method_to_call = getattr(module, method_name) - return method_to_call(jsonpatch_element) + return method_to_call() if os.path.exists(GCU_FIELD_OP_CONF_FILE): with open(GCU_FIELD_OP_CONF_FILE, "r") as s: @@ -194,7 +194,7 @@ def _invoke_validating_function(cmd, jsonpatch_element): validating_functions.update(tables.get(table, {}).get("field_operation_validators", [])) for function in validating_functions: - if not _invoke_validating_function(function, element): + if not _invoke_validating_function(function): raise IllegalPatchOperationError("Modification of {} table is illegal- validating function {} returned False".format(table, function)) diff --git a/tests/generic_config_updater/field_operation_validator_test.py b/tests/generic_config_updater/field_operation_validator_test.py deleted file mode 100644 index 3b08b31d89..0000000000 --- a/tests/generic_config_updater/field_operation_validator_test.py +++ /dev/null @@ -1,142 +0,0 @@ -import io -import unittest -import mock -import json -import subprocess -import generic_config_updater -import generic_config_updater.field_operation_validators as fov -import generic_config_updater.gu_common as gu_common - -from unittest.mock import MagicMock, Mock, mock_open -from mock import patch -from sonic_py_common.device_info import get_hwsku, get_sonic_version_info - - -class TestValidateFieldOperation(unittest.TestCase): - - @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="unknown")) - def test_rdma_config_update_validator_unknown_asic(self): - patch_element = {"path": "/PFC_WD/Ethernet4/restoration_time", "op": "replace", "value": "234234"} - assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False - - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) - @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="td3")) - @patch("os.path.exists", mock.Mock(return_value=True)) - @patch("builtins.open", mock_open(read_data='{"tables": {"BUFFER_POOL": {"validator_data": {"rdma_config_update_validator": {"Shared/headroom pool size changes": {"fields": ["ingress_lossless_pool/xoff", "ingress_lossless_pool/size", "egress_lossy_pool/size"], "operations": ["replace"], "platforms": {"td3": "20221100"}}}}}}}')) - def test_rdma_config_update_validator_td3_asic_invalid_version(self): - patch_element = {"path": "/BUFFER_POOL/ingress_lossless_pool/xoff", "op": "replace", "value": "234234"} - assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False - - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) - @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) - @patch("os.path.exists", mock.Mock(return_value=True)) - @patch("builtins.open", mock_open(read_data='{"tables": {"PFC_WD": {"validator_data": {"rdma_config_update_validator": {"PFCWD enable/disable": {"fields": ["detection_time", "action"], "operations": ["remove", "replace", "add"], "platforms": {"spc1": "20181100"}}}}}}}')) - def test_rdma_config_update_validator_spc_asic_valid_version(self): - patch_element = {"path": "/PFC_WD/Ethernet8/detection_time", "op": "remove"} - assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == True - - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) - @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) - @patch("os.path.exists", mock.Mock(return_value=True)) - @patch("builtins.open", mock_open(read_data='{"tables": {"BUFFER_POOL": {"validator_data": {"rdma_config_update_validator": {"Shared/headroom pool size changes": {"fields": ["ingress_lossless_pool/xoff", "egress_lossy_pool/size"], "operations": ["replace"], "platforms": {"spc1": "20181100"}}}}}}}')) - def test_rdma_config_update_validator_spc_asic_invalid_op(self): - patch_element = {"path": "/BUFFER_POOL/ingress_lossless_pool/xoff", "op": "remove"} - assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False - - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) - @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) - @patch("os.path.exists", mock.Mock(return_value=True)) - @patch("builtins.open", mock_open(read_data='{"tables": {"PFC_WD": {"validator_data": {"rdma_config_update_validator": {"PFCWD enable/disable": {"fields": ["detection_time", "action"], "operations": ["remove", "replace", "add"], "platforms": {"spc1": "20181100"}}}}}}}')) - def test_rdma_config_update_validator_spc_asic_other_field(self): - patch_element = {"path": "/PFC_WD/Ethernet8/other_field", "op": "add", "value": "sample_value"} - assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False - - def test_validate_field_operation_illegal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} - target_config = {"PFC_WD": {"GLOBAL": {}}} - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - - def test_validate_field_operation_legal__rm_loopback1(self): - old_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {}, - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - target_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {} - } - } - config_wrapper = gu_common.ConfigWrapper() - config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal__rm_loopback0(self): - old_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {}, - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - target_config = { - "LOOPBACK_INTERFACE": { - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - -class TestGetAsicName(unittest.TestCase): - - @patch('sonic_py_common.device_info.get_sonic_version_info') - @patch('subprocess.Popen') - def test_get_asic_spc1(self, mock_popen, mock_get_sonic_version_info): - mock_get_sonic_version_info.return_value = {'asic_type': 'mellanox'} - mock_popen.return_value = mock.Mock() - mock_popen.return_value.communicate.return_value = ["Mellanox-SN2700-D48C8", 0] - self.assertEqual(fov.get_asic_name(), "spc1") - - @patch('sonic_py_common.device_info.get_sonic_version_info') - @patch('subprocess.Popen') - def test_get_asic_th(self, mock_popen, mock_get_sonic_version_info): - mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} - mock_popen.return_value = mock.Mock() - mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b960", 0] - self.assertEqual(fov.get_asic_name(), "th") - - @patch('sonic_py_common.device_info.get_sonic_version_info') - @patch('subprocess.Popen') - def test_get_asic_th2(self, mock_popen, mock_get_sonic_version_info): - mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} - mock_popen.return_value = mock.Mock() - mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b971", 0] - self.assertEqual(fov.get_asic_name(), "th2") - - @patch('sonic_py_common.device_info.get_sonic_version_info') - @patch('subprocess.Popen') - def test_get_asic_td2(self, mock_popen, mock_get_sonic_version_info): - mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} - mock_popen.return_value = mock.Mock() - mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b850", 0] - self.assertEqual(fov.get_asic_name(), "td2") - - @patch('sonic_py_common.device_info.get_sonic_version_info') - @patch('subprocess.Popen') - def test_get_asic_td3(self, mock_popen, mock_get_sonic_version_info): - mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} - mock_popen.return_value = mock.Mock() - mock_popen.return_value.communicate.return_value = ["Broadcom Limited Device b870", 0] - self.assertEqual(fov.get_asic_name(), "td3") - - @patch('sonic_py_common.device_info.get_sonic_version_info') - @patch('subprocess.Popen') - def test_get_asic_cisco(self, mock_popen, mock_get_sonic_version_info): - mock_get_sonic_version_info.return_value = {'asic_type': 'cisco-8000'} - self.assertEqual(fov.get_asic_name(), "cisco-8000") diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py index db625e8cd1..3f744e20ca 100644 --- a/tests/generic_config_updater/gcu_feature_patch_application_test.py +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -1,7 +1,6 @@ import jsonpatch import unittest import copy -import mock from unittest.mock import MagicMock, Mock from mock import patch @@ -32,8 +31,7 @@ def get_running_config(): class TestFeaturePatchApplication(unittest.TestCase): def setUp(self): self.config_wrapper = ConfigWrapper() - - @patch("generic_config_updater.field_operation_validators.rdma_config_update_validator", mock.Mock(return_value=True)) + def test_feature_patch_application_success(self): # Format of the JSON file containing the test-cases: # @@ -54,7 +52,6 @@ def test_feature_patch_application_success(self): with self.subTest(name=test_case_name): self.run_single_success_case_applier(data[test_case_name]) - @patch("generic_config_updater.field_operation_validators.rdma_config_update_validator", mock.Mock(return_value=True)) def test_feature_patch_application_failure(self): # Fromat of the JSON file containing the test-cases: # diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index a2a776c0bb..a319a25ead 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -71,6 +71,62 @@ def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "mellanox", "build_version": "SONiC.20181131"})) + def test_validate_field_operation_legal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "40"}}} + config_wrapper = gu_common.ConfigWrapper() + config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "invalid-asic", "build_version": "SONiC.20181131"})) + def test_validate_field_modification_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "80"}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + def test_validate_field_operation_legal__rm_loopback1(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__rm_loopback0(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + def test_ctor__default_values_set(self): config_wrapper = gu_common.ConfigWrapper() From b5c1032573d7db53b6b9f896250f1d8867374ec0 Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Tue, 30 May 2023 10:15:28 -0700 Subject: [PATCH 17/35] [db-migrator] Fix hwsku match for 6100 and add errors when hwsku is None (#2821) * Fix hwsku match for 6100 and add errors when hwsku is None * Asic type fix --- scripts/db_migrator.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index f1bc404d47..cc9506f606 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -76,11 +76,14 @@ def __init__(self, namespace, socket=None): self.loglevelDB.connect(self.loglevelDB.LOGLEVEL_DB) version_info = device_info.get_sonic_version_info() - asic_type = version_info.get('asic_type') - self.asic_type = asic_type + self.asic_type = version_info.get('asic_type') + if not self.asic_type: + log.log_error("ASIC type information not obtained. DB migration will not be reliable") self.hwsku = device_info.get_hwsku() + if not self.hwsku: + log.log_error("HWSKU information not obtained. DB migration will not be reliable") - if asic_type == "mellanox": + if self.asic_type == "mellanox": from mellanox_buffer_migrator import MellanoxBufferMigrator self.mellanox_buffer_migrator = MellanoxBufferMigrator(self.configDB, self.appDB, self.stateDB) @@ -989,7 +992,7 @@ def common_migration_ops(self): # removed together with calling to migrate_copp_table function. if self.asic_type != "mellanox": self.migrate_copp_table() - if self.asic_type == "broadcom" and 'Force10-S6100' in self.hwsku: + if self.asic_type == "broadcom" and 'Force10-S6100' in str(self.hwsku): self.migrate_mgmt_ports_on_s6100() else: log.log_notice("Asic Type: {}, Hwsku: {}".format(self.asic_type, self.hwsku)) From b2c29b0b0c072bbabc15f36626d5bb7651a37e0a Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Wed, 31 May 2023 14:26:41 +0800 Subject: [PATCH 18/35] [config] Generate sysinfo in single asic (#2856) What I did It is a bug introduced from #2836. Need to generate sysinfo for single asic. How I did it Reuse the mac and platform in existing device runnning config and generate that if missing. How to verify it Unit test --- config/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index f6bec33f8f..03feffaf6d 100644 --- a/config/main.py +++ b/config/main.py @@ -1906,10 +1906,10 @@ def override_config_table(db, input_config_db, dry_run): ns_config_input = config_input["localhost"] else: ns_config_input = config_input[ns] - # Generate sysinfo if missing in ns_config_input - generate_sysinfo(current_config, ns_config_input, ns) else: ns_config_input = config_input + # Generate sysinfo if missing in ns_config_input + generate_sysinfo(current_config, ns_config_input, ns) updated_config = update_config(current_config, ns_config_input) yang_enabled = device_info.is_yang_config_validation_enabled(config_db) From 6e0ee3e7f20a97cbb58b92a148f08ef5bad8c1f7 Mon Sep 17 00:00:00 2001 From: Oleksandr Ivantsiv Date: Wed, 31 May 2023 09:36:23 +0200 Subject: [PATCH 19/35] [CRM][DASH] Extend CRM utility to support DASH resources. (#2800) - What I did Extend CRM utility to support DASH resources. - How I did it Add "dash" sub-group to "crm config" and "crm show" commands. "dash" sub-group will be available for the user only if SONiC runs on the DPU. - How to verify it Compile sonic-utilities package. The tests will run automatically. To cover dash-related functionality new test tests/crm_dash_test.py was added. Code coverage is the following: crm/dash_config.py - 99% crm/dash_show.py - 98% crm/main.py - 92% - Previous command output (if the output of a command-line utility has changed) The existing commands are not affected. - New command output (if the output of a command-line utility has changed) crm config thresholds dash [vnet|eni|eni-ether-address] type [percentage|used|count] thresholds dash [vnet|eni|eni-ether-address] [low|high] thresholds dash [ipv4|ipv6] [inbound|outbound] routing type [percentage|used|count] thresholds dash [ipv4|ipv6] [inbound|outbound] routing [low|high] thresholds dash [ipv4|ipv6] pa-validation [percentage|used|count] thresholds dash [ipv4|ipv6] pa-validation [low|high] thresholds dash [ipv4|ipv6] outbound ca-to-pa [percentage|used|count] thresholds dash [ipv4|ipv6] outbound ca-to-pa [low|high] thresholds dash [ipv4|ipv6] acl group [percentage|used|count] thresholds dash [ipv4|ipv6] acl group [low|high] thresholds dash [ipv4|ipv6] acl rule [percentage|used|count] thresholds dash [ipv4|ipv6] acl rule [low|high] crm show [resources|thresholds] dash [vnet|eni|eni-ether-address] [resources|thresholds] dash [ipv4|ipv6] [inbound|outbound] routing [resources|thresholds] dash [ipv4|ipv6] pa-validation [resources|thresholds] dash [ipv4|ipv6] outbound ca-to-pa [resources|thresholds] dash [ipv4|ipv6] acl group [resources|thresholds] dash [ipv4|ipv6] acl rule show thresholds all show resources all Signed-off-by: Oleksandr Ivantsiv --- crm/dash_config.py | 155 ++++++++++++++++++++++++++++++++ crm/dash_show.py | 119 ++++++++++++++++++++++++ crm/main.py | 150 +++++++++++++++++++++++++++---- tests/crm_dash/config_db.json | 55 ++++++++++++ tests/crm_dash/counters_db.json | 54 +++++++++++ tests/crm_dash_test.py | 150 +++++++++++++++++++++++++++++++ 6 files changed, 666 insertions(+), 17 deletions(-) create mode 100644 crm/dash_config.py create mode 100644 crm/dash_show.py create mode 100644 tests/crm_dash/config_db.json create mode 100644 tests/crm_dash/counters_db.json create mode 100644 tests/crm_dash_test.py diff --git a/crm/dash_config.py b/crm/dash_config.py new file mode 100644 index 0000000000..5412d66c51 --- /dev/null +++ b/crm/dash_config.py @@ -0,0 +1,155 @@ +import click + +def get_attr_full_name(ctx, threshold): + attr = 'dash_' + + if ctx.obj["crm"].addr_family: + attr += ctx.obj["crm"].addr_family + '_' + + if ctx.obj["crm"].direction: + attr += ctx.obj["crm"].direction + '_' + + attr += ctx.obj["crm"].res_type + '_' + threshold + return attr + +@click.command('type') +@click.argument('value', type=click.Choice(['percentage', 'used', 'free'])) +@click.pass_context +def config_dash_type(ctx, value): + """CRM threshold type configuration""" + ctx.obj["crm"].config(get_attr_full_name(ctx, 'threshold_type'), value) + +@click.command('low') +@click.argument('value', type=click.INT) +@click.pass_context +def config_dash_low(ctx, value): + """CRM low threshold configuration""" + ctx.obj["crm"].config(get_attr_full_name(ctx, 'low_threshold'), value) + +@click.command('high') +@click.argument('value', type=click.INT) +@click.pass_context +def config_dash_high(ctx, value): + """CRM high threshold configuration""" + ctx.obj["crm"].config(get_attr_full_name(ctx, 'high_threshold'), value) + +def group_add_thresholds(group): + group.add_command(config_dash_type) + group.add_command(config_dash_low) + group.add_command(config_dash_high) + +@click.group('dash') +@click.pass_context +def config_dash(ctx): + """CRM configuration for DASH resource""" + pass + +@config_dash.group('ipv4') +@click.pass_context +def config_dash_ipv4(ctx): + """DASH CRM resource IPv4 address family""" + ctx.obj["crm"].addr_family = 'ipv4' + +@config_dash.group('ipv6') +@click.pass_context +def config_dash_ipv6(ctx): + """DASH CRM resource IPv6 address family""" + ctx.obj["crm"].addr_family = 'ipv6' + +@click.group('inbound') +@click.pass_context +def config_dash_inbound(ctx): + """DASH CRM inbound direction resource""" + ctx.obj["crm"].direction = 'inbound' + +config_dash_ipv4.add_command(config_dash_inbound) +config_dash_ipv6.add_command(config_dash_inbound) + +@click.group('outbound') +@click.pass_context +def config_dash_outbound(ctx): + """DASH CRM outbound direction resource""" + ctx.obj["crm"].direction = 'outbound' + +config_dash_ipv4.add_command(config_dash_outbound) +config_dash_ipv6.add_command(config_dash_outbound) + +@config_dash.group('eni') +@click.pass_context +def config_dash_eni(ctx): + """CRM configuration for DASH ENI resource""" + ctx.obj["crm"].res_type = 'eni' + +group_add_thresholds(config_dash_eni) + +@config_dash.group('eni-ether-address') +@click.pass_context +def config_dash_eni_ether_address_map(ctx): + """CRM configuration for DASH ENI ETHER address map entry""" + ctx.obj["crm"].res_type = 'eni_ether_address_map' + +group_add_thresholds(config_dash_eni_ether_address_map) + +@config_dash.group('vnet') +@click.pass_context +def config_dash_vnet(ctx): + """CRM configuration for DASH VNET resource""" + ctx.obj["crm"].res_type = 'vnet' + +group_add_thresholds(config_dash_vnet) + +@click.group('routing') +@click.pass_context +def config_dash_routing(ctx): + """CRM configuration for DASH inbound routes""" + ctx.obj["crm"].res_type = 'routing' + +group_add_thresholds(config_dash_routing) +config_dash_inbound.add_command(config_dash_routing) +config_dash_outbound.add_command(config_dash_routing) + +@click.group('pa-validation') +@click.pass_context +def config_dash_pa_validation(ctx): + """CRM configuration for DASH PA validation entries""" + ctx.obj["crm"].res_type = 'pa_validation' + +group_add_thresholds(config_dash_pa_validation) +config_dash_ipv4.add_command(config_dash_pa_validation) +config_dash_ipv6.add_command(config_dash_pa_validation) + +@click.group('ca-to-pa') +@click.pass_context +def config_dash_ca_to_pa(ctx): + """CRM configuration for DASH CA to PA entries""" + ctx.obj["crm"].res_type = 'ca_to_pa' + +group_add_thresholds(config_dash_ca_to_pa) +config_dash_outbound.add_command(config_dash_ca_to_pa) + +@click.group('acl') +@click.pass_context +def config_dash_acl(ctx): + """DASH CRM ACL resource""" + +config_dash_ipv4.add_command(config_dash_acl) +config_dash_ipv6.add_command(config_dash_acl) + +@click.group('group') +@click.pass_context +def config_dash_acl_group(ctx): + """CRM configuration for DASH ACL group entries""" + ctx.obj["crm"].res_type = 'acl_group' + +group_add_thresholds(config_dash_acl_group) +config_dash_acl.add_command(config_dash_acl_group) + +@click.group('rule') +@click.pass_context +def config_dash_acl_rule(ctx): + """CRM configuration for DASH ACL rule entries""" + ctx.obj["crm"].res_type = 'acl_rule' + +group_add_thresholds(config_dash_acl_rule) +config_dash_acl.add_command(config_dash_acl_rule) + diff --git a/crm/dash_show.py b/crm/dash_show.py new file mode 100644 index 0000000000..6fa59dc580 --- /dev/null +++ b/crm/dash_show.py @@ -0,0 +1,119 @@ +import click + +def show_resource(ctx, resource): + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds(resource) + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources(resource) + +@click.group('dash') +@click.pass_context +def show_dash(ctx): + """Show CRM information for DASH""" + pass + +@show_dash.group('ipv4') +@click.pass_context +def show_dash_ipv4(ctx): + """Show CRM information for IPv4 address family""" + ctx.obj["crm"].addr_family = 'ipv4' + +@show_dash.group('ipv6') +@click.pass_context +def show_dash_ipv6(ctx): + """Show CRM information for IPv6 address family""" + ctx.obj["crm"].addr_family = 'ipv6' + +@click.group('inbound') +@click.pass_context +def show_dash_inbound(ctx): + """Show CRM information for inbound direction""" + ctx.obj["crm"].direction = 'inbound' + +show_dash_ipv4.add_command(show_dash_inbound) +show_dash_ipv6.add_command(show_dash_inbound) + +@click.group('outbound') +@click.pass_context +def show_dash_outbound(ctx): + """Show CRM information for outbound direction""" + ctx.obj["crm"].direction = 'outbound' + +show_dash_ipv4.add_command(show_dash_outbound) +show_dash_ipv6.add_command(show_dash_outbound) + +@show_dash.command('vnet') +@click.pass_context +def show_dash_vnet(ctx): + """Show CRM information for VNETs""" + show_resource(ctx, 'dash_vnet') + +@show_dash.command('eni') +@click.pass_context +def show_dash_eni(ctx): + """Show CRM information for ENIs""" + show_resource(ctx, 'dash_eni') + +@show_dash.command('eni-ether-address') +@click.pass_context +def show_dash_eni_ether_address_map(ctx): + """Show CRM information for ENI ETHER address map entries""" + show_resource(ctx, 'dash_eni_ether_address_map') + +@click.command('routing') +@click.pass_context +def show_dash_routing(ctx): + """Show CRM information for inbound routes""" + resource = f'dash_{ctx.obj["crm"].addr_family}_{ctx.obj["crm"].direction}_routing' + show_resource(ctx, resource) + +show_dash_inbound.add_command(show_dash_routing) +show_dash_outbound.add_command(show_dash_routing) + +@click.command('pa-validation') +@click.pass_context +def show_dash_pa_validation(ctx): + """Show CRM information for PA validation entries""" + resource = f'dash_{ctx.obj["crm"].addr_family}_pa_validation' + show_resource(ctx, resource) + +show_dash_ipv4.add_command(show_dash_pa_validation) +show_dash_ipv6.add_command(show_dash_pa_validation) + +@click.command('ca-to-pa') +@click.pass_context +def show_dash_ca_to_pa(ctx): + """Show CRM information for CA to PA entries""" + resource = f'dash_{ctx.obj["crm"].addr_family}_{ctx.obj["crm"].direction}_ca_to_pa' + show_resource(ctx, resource) + +show_dash_outbound.add_command(show_dash_ca_to_pa) + +@click.group('acl') +@click.pass_context +def show_dash_acl(ctx): + """Show CRM information for ACL resources""" + +show_dash_ipv4.add_command(show_dash_acl) +show_dash_ipv6.add_command(show_dash_acl) + +@click.command('group') +@click.pass_context +def show_dash_acl_group(ctx): + """Show CRM information for ACL group entries""" + resource = f'dash_{ctx.obj["crm"].addr_family}_acl_group' + show_resource(ctx, resource) + +show_dash_acl.add_command(show_dash_acl_group) + +@click.command('rule') +@click.pass_context +def show_dash_acl_rule(ctx): + """Show CRM information for ACL rule entries""" + resource = f'dash_{ctx.obj["crm"].addr_family}_acl_rule' + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds(resource) + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_acl_group_resources(resource) + +show_dash_acl.add_command(show_dash_acl_rule) diff --git a/crm/main.py b/crm/main.py index 9b0d06e89a..998ff23fc4 100644 --- a/crm/main.py +++ b/crm/main.py @@ -3,11 +3,38 @@ import click from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate - from sonic_py_common import multi_asic from utilities_common.general import load_db_config from utilities_common import multi_asic as multi_asic_util + +from sonic_py_common import device_info + +from .dash_config import config_dash +from .dash_show import show_dash + + +platform_info = device_info.get_platform_info() + + class Crm: + + thresholds = ( + "ipv4_route", "ipv6_route", "ipv4_nexthop", "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor", + "nexthop_group_member", "nexthop_group", "acl_table", "acl_group", "acl_entry", + "acl_counter", "fdb_entry", "ipmc_entry", "snat_entry", "dnat_entry", "mpls_inseg", + "mpls_nexthop","srv6_nexthop", "srv6_my_sid_entry" + ) + + resources = ( + "ipv4_route", "ipv6_route", "ipv4_nexthop", "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor", + "nexthop_group_member", "nexthop_group", "fdb_entry", "ipmc_entry", "snat_entry", "dnat_entry", + "mpls_inseg", "mpls_nexthop","srv6_nexthop", "srv6_my_sid_entry" + ) + + acl_resources = ( + "acl_table", "acl_group", "acl_entry", "acl_counter" + ) + def __init__(self, db=None): self.cli_mode = None self.addr_family = None @@ -16,6 +43,12 @@ def __init__(self, db=None): self.cfgdb = db self.multi_asic = multi_asic_util.MultiAsic() + def get_thresholds_list(self): + return list(self.thresholds) + + def get_resources_list(self): + return list(self.resources) + @multi_asic_util.run_on_multi_asic def config(self, attr, val): """ @@ -53,7 +86,6 @@ def show_thresholds(self, resource): """ CRM Handler to display thresholds information. """ - configdb = self.cfgdb if configdb is None: # Get the namespace list @@ -69,10 +101,7 @@ def show_thresholds(self, resource): if crm_info: if resource == 'all': - for res in ["ipv4_route", "ipv6_route", "ipv4_nexthop", "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor", - "nexthop_group_member", "nexthop_group", "acl_table", "acl_group", "acl_entry", - "acl_counter", "fdb_entry", "ipmc_entry", "snat_entry", "dnat_entry", "mpls_inseg", - "mpls_nexthop","srv6_nexthop", "srv6_my_sid_entry"]: + for res in self.get_thresholds_list(): try: data.append([res, crm_info[res + "_threshold_type"], crm_info[res + "_low_threshold"], crm_info[res + "_high_threshold"]]) except KeyError: @@ -98,9 +127,7 @@ def get_resources(self, resource): if crm_stats: if resource == 'all': - for res in ["ipv4_route", "ipv6_route", "ipv4_nexthop", "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor", - "nexthop_group_member", "nexthop_group", "fdb_entry", "ipmc_entry", "snat_entry", "dnat_entry", - "mpls_inseg", "mpls_nexthop","srv6_nexthop", "srv6_my_sid_entry"]: + for res in self.get_resources_list(): if 'crm_stats_' + res + "_used" in crm_stats.keys() and 'crm_stats_' + res + "_available" in crm_stats.keys(): data.append([res, crm_stats['crm_stats_' + res + "_used"], crm_stats['crm_stats_' + res + "_available"]]) else: @@ -205,6 +232,83 @@ def show_acl_table_resources(self): click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) click.echo() + def show_all_thresholds(self): + self.show_thresholds('all') + + def show_all_resources(self): + self.show_resources('all') + self.show_acl_resources() + self.show_acl_table_resources() + + +class DashCrm(Crm): + + dash_resources = ( + "dash_vnet", "dash_eni", "dash_eni_ether_address_map", "dash_ipv4_inbound_routing", "dash_ipv6_inbound_routing", + "dash_ipv4_outbound_routing", "dash_ipv6_outbound_routing", "dash_ipv4_pa_validation", "dash_ipv6_pa_validation", + "dash_ipv4_outbound_ca_to_pa", "dash_ipv6_outbound_ca_to_pa", "dash_ipv4_acl_group","dash_ipv6_acl_group" + ) + + dash_acl_group_resources = ( + "dash_ipv4_acl_rule", "dash_ipv6_acl_rule" + ) + + dash_thresholds = dash_resources + dash_acl_group_resources + + def __init__(self, *args, **kwargs): + self.direction = None + + super().__init__(*args, *kwargs) + + def get_thresholds_list(self): + thresholds = super().get_thresholds_list() + thresholds.extend(self.dash_thresholds) + return list(thresholds) + + def get_resources_list(self): + resources = super().get_resources_list() + resources.extend(self.dash_resources) + return list(resources) + + def show_all_resources(self): + super().show_all_resources() + self.show_acl_group_resources() + + def get_dash_acl_group_resources(self, resource=None): + # Retrieve all ACL table keys from CRM:ACL_TABLE_STATS + crm_acl_keys = self.db.keys(self.db.COUNTERS_DB, 'CRM:DASH_ACL_GROUP_STATS*') + data = [] + + for key in crm_acl_keys: + id = key.replace('CRM:DASH_ACL_GROUP_STATS:', '') + + crm_stats = self.db.get_all(self.db.COUNTERS_DB, key) + + query = [resource] if resource else self.dash_acl_group_resources + for res in query: + used = f'crm_stats_{res}_used' + available = f'crm_stats_{res}_available' + if used in crm_stats and available in crm_stats: + data.append([id, res, crm_stats[used], crm_stats[available]]) + + return data + + @multi_asic_util.run_on_multi_asic + def show_acl_group_resources(self, resource=None): + if self.multi_asic.is_multi_asic: + click.echo('\nError! Could not get CRM configuration.\n') + return + + header = ("DASH ACL Group ID", "Resource Name", "Used Count", "Available Count") + + data = [] + data = self.get_dash_acl_group_resources(resource) + + click.echo() + click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) + click.echo() + + @click.group() @click.pass_context def cli(ctx): @@ -217,8 +321,13 @@ def cli(ctx): # Load database config files load_db_config() + if device_info.get_platform_info().get('switch_type') == "dpu": + crm = DashCrm(db) + else: + crm = Crm(db) + context = { - "crm": Crm(db) + "crm": crm } ctx.obj = context @@ -290,7 +399,7 @@ def nexthop(ctx): """CRM configuration for nexthop resource""" ctx.obj["crm"].res_type = 'nexthop' -@route.command() +@click.command() @click.argument('value', type=click.Choice(['percentage', 'used', 'free'])) @click.pass_context def type(ctx, value): @@ -304,7 +413,7 @@ def type(ctx, value): ctx.obj["crm"].config(attr, value) -@route.command() +@click.command() @click.argument('value', type=click.INT) @click.pass_context def low(ctx, value): @@ -318,7 +427,7 @@ def low(ctx, value): ctx.obj["crm"].config(attr, value) -@route.command() +@click.command() @click.argument('value', type=click.INT) @click.pass_context def high(ctx, value): @@ -332,6 +441,9 @@ def high(ctx, value): ctx.obj["crm"].config(attr, value) +route.add_command(type) +route.add_command(low) +route.add_command(high) neighbor.add_command(type) neighbor.add_command(low) neighbor.add_command(high) @@ -480,6 +592,9 @@ def srv6_my_sid_entry(ctx): srv6_my_sid_entry.add_command(low) srv6_my_sid_entry.add_command(high) +if device_info.get_platform_info().get('switch_type') == "dpu": + thresholds.add_command(config_dash) + @cli.group() @click.pass_context def show(ctx): @@ -509,11 +624,9 @@ def thresholds(ctx): def all(ctx): """Show CRM information for all resources""" if ctx.obj["crm"].cli_mode == 'thresholds': - ctx.obj["crm"].show_thresholds('all') + ctx.obj["crm"].show_all_thresholds() elif ctx.obj["crm"].cli_mode == 'resources': - ctx.obj["crm"].show_resources('all') - ctx.obj["crm"].show_acl_resources() - ctx.obj["crm"].show_acl_table_resources() + ctx.obj["crm"].show_all_resources() @resources.group() @click.pass_context @@ -695,6 +808,9 @@ def srv6_my_sid_entry(ctx): thresholds.add_command(srv6_nexthop) thresholds.add_command(srv6_my_sid_entry) +if device_info.get_platform_info().get('switch_type') == "dpu": + resources.add_command(show_dash) + thresholds.add_command(show_dash) if __name__ == '__main__': cli() diff --git a/tests/crm_dash/config_db.json b/tests/crm_dash/config_db.json new file mode 100644 index 0000000000..f166b0b4f7 --- /dev/null +++ b/tests/crm_dash/config_db.json @@ -0,0 +1,55 @@ +{ + "CRM|Config": { + "expireat": 1680191314.928466, + "ttl": -0.001, + "type": "hash", + "value": { + "dash_eni_ether_address_map_high_threshold": "85", + "dash_eni_ether_address_map_low_threshold": "70", + "dash_eni_ether_address_map_threshold_type": "percentage", + "dash_eni_high_threshold": "85", + "dash_eni_low_threshold": "70", + "dash_eni_threshold_type": "percentage", + "dash_ipv4_acl_group_high_threshold": "85", + "dash_ipv4_acl_group_low_threshold": "70", + "dash_ipv4_acl_group_threshold_type": "percentage", + "dash_ipv4_acl_rule_high_threshold": "85", + "dash_ipv4_acl_rule_low_threshold": "70", + "dash_ipv4_acl_rule_threshold_type": "percentage", + "dash_ipv4_inbound_routing_high_threshold": "85", + "dash_ipv4_inbound_routing_low_threshold": "70", + "dash_ipv4_inbound_routing_threshold_type": "percentage", + "dash_ipv4_outbound_ca_to_pa_high_threshold": "85", + "dash_ipv4_outbound_ca_to_pa_low_threshold": "70", + "dash_ipv4_outbound_ca_to_pa_threshold_type": "percentage", + "dash_ipv4_outbound_routing_high_threshold": "85", + "dash_ipv4_outbound_routing_low_threshold": "70", + "dash_ipv4_outbound_routing_threshold_type": "percentage", + "dash_ipv4_pa_validation_high_threshold": "85", + "dash_ipv4_pa_validation_low_threshold": "70", + "dash_ipv4_pa_validation_threshold_type": "percentage", + "dash_ipv6_acl_group_high_threshold": "85", + "dash_ipv6_acl_group_low_threshold": "70", + "dash_ipv6_acl_group_threshold_type": "percentage", + "dash_ipv6_acl_rule_high_threshold": "85", + "dash_ipv6_acl_rule_low_threshold": "70", + "dash_ipv6_acl_rule_threshold_type": "percentage", + "dash_ipv6_inbound_routing_high_threshold": "85", + "dash_ipv6_inbound_routing_low_threshold": "70", + "dash_ipv6_inbound_routing_threshold_type": "percentage", + "dash_ipv6_outbound_ca_to_pa_high_threshold": "85", + "dash_ipv6_outbound_ca_to_pa_low_threshold": "70", + "dash_ipv6_outbound_ca_to_pa_threshold_type": "percentage", + "dash_ipv6_outbound_routing_high_threshold": "85", + "dash_ipv6_outbound_routing_low_threshold": "70", + "dash_ipv6_outbound_routing_threshold_type": "percentage", + "dash_ipv6_pa_validation_high_threshold": "85", + "dash_ipv6_pa_validation_low_threshold": "70", + "dash_ipv6_pa_validation_threshold_type": "percentage", + "dash_vnet_high_threshold": "85", + "dash_vnet_low_threshold": "70", + "dash_vnet_threshold_type": "percentage" + } + } +} + diff --git a/tests/crm_dash/counters_db.json b/tests/crm_dash/counters_db.json new file mode 100644 index 0000000000..b0576a72d1 --- /dev/null +++ b/tests/crm_dash/counters_db.json @@ -0,0 +1,54 @@ +{ + "CRM:DASH_ACL_GROUP_STATS:0x6a00000000002d": { + "expireat": 1680172664.591134, + "ttl": -0.001, + "type": "hash", + "value": { + "crm_stats_dash_ipv4_acl_rule_available": "200000000", + "crm_stats_dash_ipv4_acl_rule_used": "100" + } + }, + "CRM:DASH_ACL_GROUP_STATS:0x6a00000000009d": { + "expireat": 1680172664.5912013, + "ttl": -0.001, + "type": "hash", + "value": { + "crm_stats_dash_ipv6_acl_rule_available": "200000000", + "crm_stats_dash_ipv6_acl_rule_used": "1000" + } + }, + "CRM:STATS": { + "expireat": 1680172664.5911696, + "ttl": -0.001, + "type": "hash", + "value": { + "crm_stats_dash_eni_available": "1000000", + "crm_stats_dash_eni_ether_address_map_available": "1000000", + "crm_stats_dash_eni_ether_address_map_used": "9", + "crm_stats_dash_eni_used": "9", + "crm_stats_dash_ipv4_acl_group_available": "200000000", + "crm_stats_dash_ipv4_acl_group_used": "27", + "crm_stats_dash_ipv4_inbound_routing_available": "200000000", + "crm_stats_dash_ipv4_inbound_routing_used": "9", + "crm_stats_dash_ipv4_outbound_ca_to_pa_available": "1000000", + "crm_stats_dash_ipv4_outbound_ca_to_pa_used": "0", + "crm_stats_dash_ipv4_outbound_routing_available": "1000000", + "crm_stats_dash_ipv4_outbound_routing_used": "9", + "crm_stats_dash_ipv4_pa_validation_available": "1000000", + "crm_stats_dash_ipv4_pa_validation_used": "0", + "crm_stats_dash_ipv6_acl_group_available": "200000000", + "crm_stats_dash_ipv6_acl_group_used": "0", + "crm_stats_dash_ipv6_inbound_routing_available": "200000000", + "crm_stats_dash_ipv6_inbound_routing_used": "0", + "crm_stats_dash_ipv6_outbound_ca_to_pa_available": "1000000", + "crm_stats_dash_ipv6_outbound_ca_to_pa_used": "0", + "crm_stats_dash_ipv6_outbound_routing_available": "1000000", + "crm_stats_dash_ipv6_outbound_routing_used": "0", + "crm_stats_dash_ipv6_pa_validation_available": "1000000", + "crm_stats_dash_ipv6_pa_validation_used": "0", + "crm_stats_dash_vnet_available": "200000000", + "crm_stats_dash_vnet_used": "2" + } + } +} + diff --git a/tests/crm_dash_test.py b/tests/crm_dash_test.py new file mode 100644 index 0000000000..23b89658c7 --- /dev/null +++ b/tests/crm_dash_test.py @@ -0,0 +1,150 @@ +import os +import sys +import mock +import pytest +from importlib import reload +from tabulate import tabulate + +from click.testing import CliRunner +from utilities_common.db import Db +from .mock_tables import dbconnector + +import crm.main as crm + + +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "crm_dash") + + +@mock.patch('sonic_py_common.device_info.get_platform_info', mock.MagicMock(return_value={"switch_type": "dpu"})) +class TestCrmDash(object): + + dash_thresholds = [ + ("dash_vnet", ('dash', 'vnet')), + ("dash_eni", ('dash', 'eni')), + ("dash_eni_ether_address_map", ('dash', 'eni-ether-address')), + ("dash_ipv4_inbound_routing", ('dash', 'ipv4', 'inbound', 'routing')), + ("dash_ipv6_inbound_routing", ('dash', 'ipv6', 'inbound', 'routing')), + ("dash_ipv4_outbound_routing", ('dash', 'ipv4', 'outbound', 'routing')), + ("dash_ipv6_outbound_routing", ('dash', 'ipv6', 'outbound', 'routing')), + ("dash_ipv4_pa_validation", ('dash', 'ipv4', 'pa-validation')), + ("dash_ipv6_pa_validation", ('dash', 'ipv6', 'pa-validation')), + ("dash_ipv4_outbound_ca_to_pa", ('dash', 'ipv4', 'outbound', 'ca-to-pa')), + ("dash_ipv6_outbound_ca_to_pa", ('dash', 'ipv6', 'outbound', 'ca-to-pa')), + ("dash_ipv4_acl_group", ('dash', 'ipv4', 'acl', 'group')), + ("dash_ipv6_acl_group", ('dash', 'ipv6', 'acl', 'group')), + ("dash_ipv4_acl_rule", ('dash', 'ipv4', 'acl', 'rule')), + ("dash_ipv6_acl_rule", ('dash', 'ipv6', 'acl', 'rule')), + ] + + dash_resources = [ + ("dash_vnet", ('dash', 'vnet'), (2, 200000000)), + ("dash_eni", ('dash', 'eni'), (9, 1000000)), + ("dash_eni_ether_address_map", ('dash', 'eni-ether-address'), (9, 1000000)), + ("dash_ipv4_inbound_routing", ('dash', 'ipv4', 'inbound', 'routing'), (9, 200000000)), + ("dash_ipv4_outbound_routing", ('dash', 'ipv4', 'outbound', 'routing'), (9, 1000000)), + ("dash_ipv6_inbound_routing", ('dash', 'ipv6', 'inbound', 'routing'), (0, 200000000)), + ("dash_ipv6_outbound_routing", ('dash', 'ipv6', 'outbound', 'routing'), (0, 1000000)), + ("dash_ipv4_pa_validation", ('dash', 'ipv4', 'pa-validation'), (0, 1000000)), + ("dash_ipv6_pa_validation", ('dash', 'ipv6', 'pa-validation'), (0, 1000000)), + ("dash_ipv4_outbound_ca_to_pa", ('dash', 'ipv4', 'outbound', 'ca-to-pa'), (0, 1000000)), + ("dash_ipv6_outbound_ca_to_pa", ('dash', 'ipv6', 'outbound', 'ca-to-pa'), (0, 1000000)), + ("dash_ipv4_acl_group", ('dash', 'ipv4', 'acl', 'group'), (27, 200000000)), + ("dash_ipv6_acl_group", ('dash', 'ipv6', 'acl', 'group'), (0, 200000000)), + ] + + dash_acl_group_resources = [ + ("dash_ipv4_acl_rule", ('dash', 'ipv4', 'acl', 'rule'), "0x6a00000000002d", (100, 200000000)), + ("dash_ipv6_acl_rule", ('dash', 'ipv6', 'acl', 'rule'), "0x6a00000000009d", (1000, 200000000)), + ] + + dash_thresholds_header = ("Resource Name", "Threshold Type", "Low Threshold", "High Threshold") + dash_resources_header = ("Resource Name", "Used Count", "Available Count") + dash_acl_group_resources_header = ("DASH ACL Group ID", "Resource Name", "Used Count", "Available Count") + + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db') + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, "counters_db") + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + dbconnector.dedicated_dbs['COUNTERS_DB'] = None + dbconnector.load_namespace_config() + + @pytest.mark.parametrize("obj, cmd", dash_thresholds) + def test_crm_show_thresholds(self, obj, cmd): + reload(crm) + + db = Db() + runner = CliRunner() + result = runner.invoke(crm.cli, ('show', 'thresholds') + cmd, obj=db) + print(sys.stderr, result.output) + assert result.exit_code == 0 + + expected_output = tabulate([(obj, "percentage", "70", "85")], headers=self.dash_thresholds_header, tablefmt="simple", missingval="") + assert result.output == "\n" + expected_output + "\n\n" + + result = runner.invoke(crm.cli, ('config', 'thresholds') + cmd + ('high', '90'), obj=db) + print(sys.stderr, result.output) + assert result.exit_code == 0 + + result = runner.invoke(crm.cli, ('config', 'thresholds') + cmd + ('low', '60'), obj=db) + print(sys.stderr, result.output) + assert result.exit_code == 0 + + result = runner.invoke(crm.cli, ('show', 'thresholds') + cmd, obj=db) + print(sys.stderr, result.output) + assert result.exit_code == 0 + + expected_output = tabulate([(obj, "percentage", "60", "90")], headers=self.dash_thresholds_header, tablefmt="simple", missingval="") + assert result.output == "\n" + expected_output + "\n\n" + + def test_crm_show_all_thresholds(self): + reload(crm) + + db = Db() + runner = CliRunner() + result = runner.invoke(crm.cli, ('show', 'thresholds', 'all'), obj=db) + print(sys.stderr, result.output) + assert result.exit_code == 0 + + table = [] + for obj in self.dash_thresholds: + table.append((obj[0], "percentage", "70", "85")) + + expected_output = tabulate(table, headers=self.dash_thresholds_header, tablefmt="simple", missingval="") + assert result.output == "\n" + expected_output + "\n\n" + + @pytest.mark.parametrize("obj, cmd, cnt", dash_resources) + def test_crm_show_resources(self, obj, cmd, cnt): + reload(crm) + + db = Db() + runner = CliRunner() + result = runner.invoke(crm.cli, ('show', 'resources') + cmd, obj=db) + print(sys.stderr, result.output) + assert result.exit_code == 0 + + expected_output = tabulate([(obj,) + cnt], headers=self.dash_resources_header, tablefmt="simple", missingval="") + assert result.output == "\n" + expected_output + "\n\n" + + @pytest.mark.parametrize("obj, cmd, obj_id, cnt", dash_acl_group_resources) + def test_crm_show_acl_group_resources(self, obj, cmd, obj_id, cnt): + reload(crm) + + db = Db() + runner = CliRunner() + result = runner.invoke(crm.cli, ('show', 'resources') + cmd, obj=db) + print(sys.stderr, result.output) + assert result.exit_code == 0 + + expected_output = tabulate([(obj_id, obj) + cnt], headers=self.dash_acl_group_resources_header, tablefmt="simple", missingval="") + assert result.output == "\n" + expected_output + "\n\n" + From 575005727066022ae9153737abe554e94c7d4fec Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 31 May 2023 09:00:24 -0400 Subject: [PATCH 20/35] [utilities_common] replace shell=True (#2718) #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. #### How I did it remove shell=True in utilities_common.cli.run_command() function. `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) #### How to verify it Pass UT Manual test Signed-off-by: Mai Bui --- clear/main.py | 16 +- config/main.py | 370 +++++++++--------- config/plugins/mlnx.py | 8 +- config/syslog.py | 8 +- config/vlan.py | 25 +- scripts/sonic-bootchart | 8 +- show/acl.py | 10 +- show/chassis_modules.py | 20 +- show/dropcounters.py | 12 +- show/fabric.py | 16 +- show/flow_counters.py | 20 +- show/gearbox.py | 4 +- show/interfaces/__init__.py | 128 +++---- show/kdump.py | 12 +- show/main.py | 12 +- show/nat.py | 29 +- show/platform.py | 22 +- show/processes.py | 6 +- show/system_health.py | 6 +- show/vxlan.py | 6 +- tests/chassis_modules_test.py | 14 +- tests/clear_test.py | 33 ++ tests/config_int_ip_test.py | 2 +- tests/config_test.py | 430 +++++++++++++++++++-- tests/conftest.py | 2 +- tests/ecn_test.py | 2 +- tests/fabricstat_test.py | 16 +- tests/fdbshow_test.py | 48 +-- tests/flow_counter_stats_test.py | 14 +- tests/multi_asic_intfutil_test.py | 20 +- tests/multi_asic_queue_counter_test.py | 4 +- tests/pfcstat_test.py | 10 +- tests/portstat_test.py | 50 +-- tests/sfp_test.py | 26 +- tests/show_ip_int_test.py | 20 +- tests/show_platform_test.py | 10 +- tests/show_test.py | 496 ++++++++++++++++++++++++- tests/sonic_bootchart_test.py | 4 +- tests/utils.py | 2 +- tests/vlan_test.py | 12 +- utilities_common/bgp_util.py | 8 +- utilities_common/cli.py | 47 ++- 42 files changed, 1451 insertions(+), 557 deletions(-) diff --git a/clear/main.py b/clear/main.py index 19c1b6073a..21c87d555b 100755 --- a/clear/main.py +++ b/clear/main.py @@ -492,10 +492,10 @@ def flowcnt_route(ctx, namespace): """Clear all route flow counters""" exit_if_route_flow_counter_not_support() if ctx.invoked_subcommand is None: - command = "flow_counters_stat -c -t route" + command = ['flow_counters_stat', '-c', '-t', 'route'] # None namespace means default namespace if namespace is not None: - command += " -n {}".format(namespace) + command += ['-n', str(namespace)] clicommon.run_command(command) @@ -506,12 +506,12 @@ def flowcnt_route(ctx, namespace): @click.argument('prefix-pattern', required=True) def pattern(prefix_pattern, vrf, namespace): """Clear route flow counters by pattern""" - command = "flow_counters_stat -c -t route --prefix_pattern {}".format(prefix_pattern) + command = ['flow_counters_stat', '-c', '-t', 'route', '--prefix_pattern', str(prefix_pattern)] if vrf: - command += ' --vrf {}'.format(vrf) + command += ['--vrf', str(vrf)] # None namespace means default namespace if namespace is not None: - command += " -n {}".format(namespace) + command += ['-n', str(namespace)] clicommon.run_command(command) @@ -522,12 +522,12 @@ def pattern(prefix_pattern, vrf, namespace): @click.argument('prefix', required=True) def route(prefix, vrf, namespace): """Clear route flow counters by prefix""" - command = "flow_counters_stat -c -t route --prefix {}".format(prefix) + command = ['flow_counters_stat', '-c', '-t', 'route', '--prefix', str(prefix)] if vrf: - command += ' --vrf {}'.format(vrf) + command += ['--vrf', str(vrf)] # None namespace means default namespace if namespace is not None: - command += " -n {}".format(namespace) + command += ['-n', str(namespace)] clicommon.run_command(command) diff --git a/config/main.py b/config/main.py index 03feffaf6d..aa207455af 100644 --- a/config/main.py +++ b/config/main.py @@ -23,6 +23,7 @@ from portconfig import get_child_ports from socket import AF_INET, AF_INET6 from sonic_py_common import device_info, multi_asic +from sonic_py_common.general import getstatusoutput_noshell from sonic_py_common.interface import get_interface_table_name, get_port_table_name, get_intf_longname from utilities_common import util_base from swsscommon import swsscommon @@ -635,10 +636,12 @@ def _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname): def _change_hostname(hostname): current_hostname = os.uname()[1] if current_hostname != hostname: - clicommon.run_command('echo {} > /etc/hostname'.format(hostname), display_cmd=True) - clicommon.run_command('hostname -F /etc/hostname', display_cmd=True) - clicommon.run_command(r'sed -i "/\s{}$/d" /etc/hosts'.format(current_hostname), display_cmd=True) - clicommon.run_command('echo "127.0.0.1 {}" >> /etc/hosts'.format(hostname), display_cmd=True) + with open('/etc/hostname', 'w') as f: + f.write(str(hostname) + '\n') + clicommon.run_command(['hostname', '-F', '/etc/hostname'], display_cmd=True) + clicommon.run_command(['sed', '-i', r"/\s{}$/d".format(current_hostname), '/etc/hosts'], display_cmd=True) + with open('/etc/hosts', 'a') as f: + f.write("127.0.0.1 " + str(hostname) + '\n') def _clear_cbf(): CBF_TABLE_NAMES = [ @@ -858,45 +861,44 @@ def _stop_services(): try: subprocess.check_call(['sudo', 'monit', 'status'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) click.echo("Disabling container monitoring ...") - clicommon.run_command("sudo monit unmonitor container_checker") + clicommon.run_command(['sudo', 'monit', 'unmonitor', 'container_checker']) except subprocess.CalledProcessError as err: pass click.echo("Stopping SONiC target ...") - clicommon.run_command("sudo systemctl stop sonic.target --job-mode replace-irreversibly") + clicommon.run_command(['sudo', 'systemctl', 'stop', 'sonic.target', '--job-mode', 'replace-irreversibly']) def _get_sonic_services(): - out, _ = clicommon.run_command("systemctl list-dependencies --plain sonic.target | sed '1d'", return_cmd=True) - return (unit.strip() for unit in out.splitlines()) - + cmd = ['systemctl', 'list-dependencies', '--plain', 'sonic.target'] + out, _ = clicommon.run_command(cmd, return_cmd=True) + out = out.strip().split('\n')[1:] + return (unit.strip() for unit in out) def _reset_failed_services(): for service in _get_sonic_services(): - clicommon.run_command("systemctl reset-failed {}".format(service)) - + clicommon.run_command(['systemctl', 'reset-failed', str(service)]) def _restart_services(): click.echo("Restarting SONiC target ...") - clicommon.run_command("sudo systemctl restart sonic.target") + clicommon.run_command(['sudo', 'systemctl', 'restart', 'sonic.target']) try: subprocess.check_call(['sudo', 'monit', 'status'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) click.echo("Enabling container monitoring ...") - clicommon.run_command("sudo monit monitor container_checker") + clicommon.run_command(['sudo', 'monit', 'monitor', 'container_checker']) except subprocess.CalledProcessError as err: pass # Reload Monit configuration to pick up new hostname in case it changed click.echo("Reloading Monit configuration ...") - clicommon.run_command("sudo monit reload") - + clicommon.run_command(['sudo', 'monit', 'reload']) def _per_namespace_swss_ready(service_name): - out, _ = clicommon.run_command("systemctl show {} --property ActiveState --value".format(service_name), return_cmd=True) + out, _ = clicommon.run_command(['systemctl', 'show', str(service_name), '--property', 'ActiveState', '--value'], return_cmd=True) if out.strip() != "active": return False - out, _ = clicommon.run_command("systemctl show {} --property ActiveEnterTimestampMonotonic --value".format(service_name), return_cmd=True) + out, _ = clicommon.run_command(['systemctl', 'show', str(service_name), '--property', 'ActiveEnterTimestampMonotonic', '--value'], return_cmd=True) swss_up_time = float(out.strip())/1000000 now = time.monotonic() if (now - swss_up_time > 120): @@ -921,7 +923,7 @@ def _swss_ready(): return True def _is_system_starting(): - out, _ = clicommon.run_command("sudo systemctl is-system-running", return_cmd=True) + out, _ = clicommon.run_command(['sudo', 'systemctl', 'is-system-running'], return_cmd=True) return out.strip() == "starting" def interface_is_in_vlan(vlan_member_table, interface_name): @@ -1092,13 +1094,8 @@ def update_sonic_environment(): if os.path.isfile(SONIC_ENV_TEMPLATE_FILE) and os.path.isfile(SONIC_VERSION_YML_FILE): clicommon.run_command( - "{} -d -y {} -t {},{}".format( - SONIC_CFGGEN_PATH, - SONIC_VERSION_YML_FILE, - SONIC_ENV_TEMPLATE_FILE, - SONIC_ENV_FILE - ), - display_cmd=True + [SONIC_CFGGEN_PATH, '-d', '-y', SONIC_VERSION_YML_FILE, '-t', '{},{}'.format(SONIC_ENV_TEMPLATE_FILE, SONIC_ENV_FILE)], + display_cmd=True ) def remove_router_interface_ip_address(config_db, interface_name, ipaddress_to_remove): @@ -1247,7 +1244,7 @@ def save(filename): command = "{} -n {} -d --print-data > {}".format(SONIC_CFGGEN_PATH, namespace, file) log.log_info("'save' executing...") - clicommon.run_command(command, display_cmd=True) + clicommon.run_command(command, display_cmd=True, shell=True) config_db = sort_dict(read_json_file(file)) with open(file, 'w') as config_db_file: @@ -1307,9 +1304,9 @@ def load(filename, yes): return if namespace is None: - command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file) + command = [str(SONIC_CFGGEN_PATH), '-j', file, '--write-to-db'] else: - command = "{} -n {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, namespace, file) + command = [str(SONIC_CFGGEN_PATH), '-n', str(namespace), '-j', file, '--write-to-db'] log.log_info("'load' executing...") clicommon.run_command(command, display_cmd=True) @@ -1581,9 +1578,9 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form if load_sysinfo: if namespace is None: - command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku) + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] else: - command = "{} -H -k {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku, namespace) + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] clicommon.run_command(command, display_cmd=True) # For the database service running in linux host we use the file user gives as input @@ -1591,23 +1588,20 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form # the default config_db.json format is used. - config_gen_opts = "" + config_gen_opts = [] if os.path.isfile(INIT_CFG_FILE): - config_gen_opts += " -j {} ".format(INIT_CFG_FILE) + config_gen_opts += ['-j', str(INIT_CFG_FILE)] if file_format == 'config_db': - config_gen_opts += ' -j {} '.format(file) + config_gen_opts += ['-j', str(file)] else: - config_gen_opts += ' -Y {} '.format(file) + config_gen_opts += ['-Y', str(file)] if namespace is not None: - config_gen_opts += " -n {} ".format(namespace) - + config_gen_opts += ['-n', str(namespace)] - command = "{sonic_cfggen} {options} --write-to-db".format( - sonic_cfggen=SONIC_CFGGEN_PATH, - options=config_gen_opts) + command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] clicommon.run_command(command, display_cmd=True) client.set(config_db.INIT_INDICATOR, 1) @@ -1616,9 +1610,9 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form db_migrator='/usr/local/bin/db_migrator.py' if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): if namespace is None: - command = "{} -o migrate".format(db_migrator) + command = [db_migrator, '-o', 'migrate'] else: - command = "{} -o migrate -n {}".format(db_migrator, namespace) + command = [db_migrator, '-o', 'migrate', '-n', str(namespace)] clicommon.run_command(command, display_cmd=True) # Re-generate the environment variable in case config_db.json was edited @@ -1638,7 +1632,7 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form def load_mgmt_config(filename): """Reconfigure hostname and mgmt interface based on device description file.""" log.log_info("'load_mgmt_config' executing...") - command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) + command = [SONIC_CFGGEN_PATH, '-M', str(filename), '--write-to-db'] clicommon.run_command(command, display_cmd=True) #FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here config_data = parse_device_desc_xml(filename) @@ -1650,19 +1644,29 @@ def load_mgmt_config(filename): mgmt_conf = netaddr.IPNetwork(key[1]) gw_addr = config_data['MGMT_INTERFACE'][key]['gwaddr'] if mgmt_conf.version == 4: - command = "ifconfig eth0 {} netmask {}".format(str(mgmt_conf.ip), str(mgmt_conf.netmask)) + command = ['ifconfig', 'eth0', str(mgmt_conf.ip), 'netmask', str(mgmt_conf.netmask)] clicommon.run_command(command, display_cmd=True) else: - command = "ifconfig eth0 add {}".format(str(mgmt_conf)) + command = ['ifconfig', 'eth0', 'add', str(mgmt_conf)] # Ignore error for IPv6 configuration command due to it not allows config the same IP twice clicommon.run_command(command, display_cmd=True, ignore_error=True) - command = "ip{} route add default via {} dev eth0 table default".format(" -6" if mgmt_conf.version == 6 else "", gw_addr) + command = ['ip'] + (["-6"] if mgmt_conf.version == 6 else []) + ['route', 'add', 'default', 'via', str(gw_addr), 'dev', 'eth0', 'table', 'default'] clicommon.run_command(command, display_cmd=True, ignore_error=True) - command = "ip{} rule add from {} table default".format(" -6" if mgmt_conf.version == 6 else "", str(mgmt_conf.ip)) + command = ['ip'] + (["-6"] if mgmt_conf.version == 6 else []) + ['rule', 'add', 'from', str(mgmt_conf.ip), 'table', 'default'] clicommon.run_command(command, display_cmd=True, ignore_error=True) if len(config_data['MGMT_INTERFACE'].keys()) > 0: - command = "[ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid" - clicommon.run_command(command, display_cmd=True, ignore_error=True) + filepath = '/var/run/dhclient.eth0.pid' + if not os.path.isfile(filepath): + sys.exit('File {} does not exist'.format(filepath)) + + out0, rc0 = clicommon.run_command(['cat', filepath], display_cmd=True, return_cmd=True) + if rc0 != 0: + sys.exit('Exit: {}. Command: cat {} failed.'.format(rc0, filepath)) + + out1, rc1 = clicommon.run_command(['kill', str(out0).strip('\n')], return_cmd=True) + if rc1 != 0: + sys.exit('Exit: {}. Command: kill {} failed.'.format(rc1, out0)) + clicommon.run_command(['rm', '-f', filepath], display_cmd=True, return_cmd=True) click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") @config.command("load_minigraph") @@ -1694,19 +1698,19 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, for namespace in namespace_list: if namespace is DEFAULT_NAMESPACE: config_db = ConfigDBConnector() - cfggen_namespace_option = " " + cfggen_namespace_option = [] ns_cmd_prefix = "" else: config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) - cfggen_namespace_option = " -n {}".format(namespace) + cfggen_namespace_option = ['-n', str(namespace)] ns_cmd_prefix = "sudo ip netns exec {} ".format(namespace) config_db.connect() client = config_db.get_redis_client(config_db.CONFIG_DB) client.flushdb() if os.path.isfile('/etc/sonic/init_cfg.json'): - command = "{} -H -m -j /etc/sonic/init_cfg.json {} --write-to-db".format(SONIC_CFGGEN_PATH, cfggen_namespace_option) + command = [SONIC_CFGGEN_PATH, '-H', '-m', '-j', '/etc/sonic/init_cfg.json'] + cfggen_namespace_option + ['--write-to-db'] else: - command = "{} -H -m --write-to-db {}".format(SONIC_CFGGEN_PATH, cfggen_namespace_option) + command = [SONIC_CFGGEN_PATH, '-H', '-m', '--write-to-db'] + cfggen_namespace_option clicommon.run_command(command, display_cmd=True) client.set(config_db.INIT_INDICATOR, 1) @@ -1714,7 +1718,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, update_sonic_environment() if os.path.isfile('/etc/sonic/acl.json'): - clicommon.run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) + clicommon.run_command(['acl-loader', 'update', 'full', '/etc/sonic/acl.json'], display_cmd=True) # Load port_config.json try: @@ -1723,26 +1727,26 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, click.secho("Failed to load port_config.json, Error: {}".format(str(e)), fg='magenta') # generate QoS and Buffer configs - clicommon.run_command("config qos reload --no-dynamic-buffer --no-delay", display_cmd=True) + clicommon.run_command(['config', 'qos', 'reload', '--no-dynamic-buffer', '--no-delay'], display_cmd=True) # get the device type device_type = _get_device_type() if device_type != 'MgmtToRRouter' and device_type != 'MgmtTsToR' and device_type != 'BmcMgmtToRRouter' and device_type != 'EPMS': - clicommon.run_command("pfcwd start_default", display_cmd=True) + clicommon.run_command(['pfcwd', 'start_default'], display_cmd=True) # Write latest db version string into db - db_migrator='/usr/local/bin/db_migrator.py' + db_migrator = '/usr/local/bin/db_migrator.py' if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): for namespace in namespace_list: if namespace is DEFAULT_NAMESPACE: - cfggen_namespace_option = " " + cfggen_namespace_option = [] else: - cfggen_namespace_option = " -n {}".format(namespace) - clicommon.run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) + cfggen_namespace_option = ['-n', str(namespace)] + clicommon.run_command([db_migrator, '-o', 'set_version'] + cfggen_namespace_option) # Keep device isolated with TSA if traffic_shift_away: - clicommon.run_command("TSA", display_cmd=True) + clicommon.run_command(["TSA"], display_cmd=True) if override_config: log.log_warning("Golden configuration may override System Maintenance state. Please execute TSC to check the current System mode") click.secho("[WARNING] Golden configuration may override Traffic-shift-away state. Please execute TSC to check the current System mode") @@ -1811,16 +1815,15 @@ def load_port_config(config_db, port_config_path): if 'admin_status' in port_table[port_name]: if port_table[port_name]['admin_status'] == port_config[port_name]['admin_status']: continue - clicommon.run_command('config interface {} {}'.format( + clicommon.run_command(['config', 'interface', 'startup' if port_config[port_name]['admin_status'] == 'up' else 'shutdown', - port_name), display_cmd=True) + port_name], display_cmd=True) return def override_config_by(golden_config_path): # Override configDB with golden config - clicommon.run_command('config override-config-table {}'.format( - golden_config_path), display_cmd=True) + clicommon.run_command(['config', 'override-config-table', str(golden_config_path)], display_cmd=True) return @@ -2500,20 +2503,20 @@ def start(action, restoration_time, ports, detection_time, verbose): Example: config pfcwd start --action drop all 400 --restoration-time 400 """ - cmd = "pfcwd start" + cmd = ['pfcwd', 'start'] if action: - cmd += " --action {}".format(action) + cmd += ['--action', str(action)] if ports: ports = set(ports) - set(['ports', 'detection-time']) - cmd += " {}".format(' '.join(ports)) + cmd += list(ports) if detection_time: - cmd += " {}".format(detection_time) + cmd += [str(detection_time)] if restoration_time: - cmd += " --restoration-time {}".format(restoration_time) + cmd += ['--restoration-time', str(restoration_time)] clicommon.run_command(cmd, display_cmd=verbose) @@ -2522,7 +2525,7 @@ def start(action, restoration_time, ports, detection_time, verbose): def stop(verbose): """ Stop PFC watchdog """ - cmd = "pfcwd stop" + cmd = ['pfcwd', 'stop'] clicommon.run_command(cmd, display_cmd=verbose) @@ -2532,7 +2535,7 @@ def stop(verbose): def interval(poll_interval, verbose): """ Set PFC watchdog counter polling interval (ms) """ - cmd = "pfcwd interval {}".format(poll_interval) + cmd = ['pfcwd', 'interval', str(poll_interval)] clicommon.run_command(cmd, display_cmd=verbose) @@ -2542,7 +2545,7 @@ def interval(poll_interval, verbose): def counter_poll(counter_poll, verbose): """ Enable/disable counter polling """ - cmd = "pfcwd counter_poll {}".format(counter_poll) + cmd = ['pfcwd', 'counter_poll', str(counter_poll)] clicommon.run_command(cmd, display_cmd=verbose) @@ -2552,7 +2555,7 @@ def counter_poll(counter_poll, verbose): def big_red_switch(big_red_switch, verbose): """ Enable/disable BIG_RED_SWITCH mode """ - cmd = "pfcwd big_red_switch {}".format(big_red_switch) + cmd = ['pfcwd', 'big_red_switch', str(big_red_switch)] clicommon.run_command(cmd, display_cmd=verbose) @@ -2561,7 +2564,7 @@ def big_red_switch(big_red_switch, verbose): def start_default(verbose): """ Start PFC WD by default configurations """ - cmd = "pfcwd start_default" + cmd = ['pfcwd', 'start_default'] clicommon.run_command(cmd, display_cmd=verbose) @@ -2597,9 +2600,9 @@ def reload(ctx, dry_run, json_data): _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() - from_db = "-d --write-to-db" + from_db = ['-d', '--write-to-db'] if dry_run: - from_db = "--additional-data \'{}\'".format(json_data) if json_data else "" + from_db = ['--additional-data'] + [str(json_data)] if json_data else [] namespace_list = [DEFAULT_NAMESPACE] if multi_asic.get_num_asics() > 1: @@ -2628,12 +2631,9 @@ def reload(ctx, dry_run, json_data): cbf_template_file = os.path.join(hwsku_path, asic_id_suffix, "cbf.json.j2") if os.path.isfile(cbf_template_file): - cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) + cmd_ns = [] if ns is DEFAULT_NAMESPACE else ['-n', str(ns)] fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db" - command = "{} {} {} -t {},{} -y {}".format( - SONIC_CFGGEN_PATH, cmd_ns, from_db, - cbf_template_file, fname, sonic_version_file - ) + command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + ['-t', '{},{}'.format(cbf_template_file, fname), '-y', str(sonic_version_file)] # Apply the configuration clicommon.run_command(command, display_cmd=True) @@ -2695,9 +2695,9 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() - from_db = "-d --write-to-db" + from_db = ['-d', '--write-to-db'] if dry_run: - from_db = "--additional-data \'{}\'".format(json_data) if json_data else "" + from_db = ['--additional-data'] + [str(json_data)] if json_data else [] namespace_list = [DEFAULT_NAMESPACE] if multi_asic.get_num_asics() > 1: @@ -2740,12 +2740,9 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) hwsku_path, asic_id_suffix, "qos.json.j2" ) if os.path.isfile(qos_template_file): - cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) + cmd_ns = [] if ns is DEFAULT_NAMESPACE else ['-n', str(ns)] fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db" - command = "{} {} {} -t {},{} -t {},{} -y {}".format( - SONIC_CFGGEN_PATH, cmd_ns, from_db, buffer_template_file, - fname, qos_template_file, fname, sonic_version_file - ) + command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + ['-t', '{},{}'.format(buffer_template_file, fname), '-t', '{},{}'.format(qos_template_file, fname), '-y', sonic_version_file] # Apply the configurations only when both buffer and qos # configuration files are present clicommon.run_command(command, display_cmd=True) @@ -2786,9 +2783,9 @@ def _qos_update_ports(ctx, ports, dry_run, json_data): 'BUFFER_QUEUE'] if json_data: - from_db = "--additional-data \'{}\'".format(json_data) if json_data else "" + from_db = ['--additional-data'] + [json_data] if json_data else [] else: - from_db = "-d" + from_db = ["-d"] items_to_update = {} config_dbs = {} @@ -2832,10 +2829,8 @@ def _qos_update_ports(ctx, ports, dry_run, json_data): continue config_db.set_entry(table_name, '|'.join(key), None) - cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) - command = "{} {} {} -t {},config-db -t {},config-db -y {} --print-data".format( - SONIC_CFGGEN_PATH, cmd_ns, from_db, buffer_template_file, qos_template_file, sonic_version_file - ) + cmd_ns = [] if ns is DEFAULT_NAMESPACE else ['-n', str(ns)] + command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + ['-t', '{},config-db'.format(buffer_template_file), '-t', '{},config-db'.format(qos_template_file), '-y', sonic_version_file, '--print-data'] jsonstr, _ = clicommon.run_command(command, display_cmd=False, return_cmd=True) jsondict = json.loads(jsonstr) @@ -2913,8 +2908,8 @@ def _qos_update_ports(ctx, ports, dry_run, json_data): json.dump(items_to_apply, f, sort_keys=True, indent=4) else: jsonstr = json.dumps(items_to_apply) - cmd_ns = "" if ns is DEFAULT_NAMESPACE else "-n {}".format(ns) - command = "{} {} --additional-data '{}' --write-to-db".format(SONIC_CFGGEN_PATH, cmd_ns, jsonstr) + cmd_ns = [] if ns is DEFAULT_NAMESPACE else ['-n', str(ns)] + command = [SONIC_CFGGEN_PATH] + cmd_ns + ['--additional-data', jsonstr, '--write-to-db'] clicommon.run_command(command, display_cmd=False) if portset_to_handle != portset_handled: @@ -3320,8 +3315,8 @@ def add_community(db, community, string_type): try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3348,8 +3343,8 @@ def del_community(db, community): try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3379,8 +3374,8 @@ def replace_community(db, current_community, new_community): click.echo('SNMP community {} replace community {}'.format(new_community, current_community)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3413,8 +3408,8 @@ def add_contact(db, contact, contact_email): "configuration".format(contact, contact_email)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3428,8 +3423,8 @@ def add_contact(db, contact, contact_email): "configuration".format(contact, contact_email)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3450,8 +3445,8 @@ def del_contact(db, contact): click.echo("SNMP contact {} removed from configuration".format(contact)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3485,8 +3480,8 @@ def modify_contact(db, contact, contact_email): click.echo("SNMP contact {} email updated to {}".format(contact, contact_email)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3499,8 +3494,8 @@ def modify_contact(db, contact, contact_email): click.echo("SNMP contact {} and contact email {} updated".format(contact, contact_email)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3541,8 +3536,8 @@ def add_location(db, location): ctx.fail("Failed to set SNMP location. Error: {}".format(e)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3569,8 +3564,8 @@ def delete_location(db, location): ctx.fail("Failed to remove SNMP location from configuration. Error: {}".format(e)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3608,8 +3603,8 @@ def modify_location(db, location): ctx.fail("Failed to modify SNMP location. Error: {}".format(e)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3722,8 +3717,8 @@ def add_user(db, user, user_type, user_permission_type, user_auth_type, user_aut click.echo("SNMP user {} added to configuration".format(user)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -3743,8 +3738,8 @@ def del_user(db, user): click.echo("SNMP user {} removed from configuration".format(user)) try: click.echo("Restarting SNMP service...") - clicommon.run_command("systemctl reset-failed snmp.service", display_cmd=False) - clicommon.run_command("systemctl restart snmp.service", display_cmd=False) + clicommon.run_command(['systemctl', 'reset-failed', 'snmp.service'], display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'snmp.service'], display_cmd=False) except SystemExit as e: click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() @@ -4024,12 +4019,12 @@ def speed(ctx, interface_name, interface_speed, verbose): log.log_info("'interface speed {} {}' executing...".format(interface_name, interface_speed)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -s {}".format(interface_name, interface_speed) + command = ['portconfig', '-p', str(interface_name), '-s', str(interface_speed)] else: - command = "portconfig -p {} -s {} -n {}".format(interface_name, interface_speed, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-s', str(interface_speed), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4054,12 +4049,12 @@ def link_training(ctx, interface_name, mode, verbose): log.log_info("'interface link-training {} {}' executing...".format(interface_name, mode)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -lt {}".format(interface_name, mode) + command = ['portconfig', '-p', str(interface_name), '-lt', str(mode)] else: - command = "portconfig -p {} -lt {} -n {}".format(interface_name, mode, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-lt', str(mode), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4084,12 +4079,12 @@ def autoneg(ctx, interface_name, mode, verbose): log.log_info("'interface autoneg {} {}' executing...".format(interface_name, mode)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -an {}".format(interface_name, mode) + command = ['portconfig', '-p', str(interface_name), '-an', str(mode)] else: - command = "portconfig -p {} -an {} -n {}".format(interface_name, mode, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-an', str(mode), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4114,12 +4109,12 @@ def advertised_speeds(ctx, interface_name, speed_list, verbose): log.log_info("'interface advertised_speeds {} {}' executing...".format(interface_name, speed_list)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -S {}".format(interface_name, speed_list) + command = ['portconfig', '-p', str(interface_name), '-S', str(speed_list)] else: - command = "portconfig -p {} -S {} -n {}".format(interface_name, speed_list, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-S', str(speed_list), '-n', ctx.obj['namespace']] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4144,12 +4139,12 @@ def interface_type(ctx, interface_name, interface_type_value, verbose): log.log_info("'interface interface_type {} {}' executing...".format(interface_name, interface_type_value)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -t {}".format(interface_name, interface_type_value) + command = ['portconfig', '-p', str(interface_name), '-t', str(interface_type_value)] else: - command = "portconfig -p {} -t {} -n {}".format(interface_name, interface_type_value, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-t', str(interface_type_value), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4174,12 +4169,12 @@ def advertised_types(ctx, interface_name, interface_type_list, verbose): log.log_info("'interface advertised_interface_types {} {}' executing...".format(interface_name, interface_type_list)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -T {}".format(interface_name, interface_type_list) + command = ['portconfig', '-p', str(interface_name), '-T', str(interface_type_list)] else: - command = "portconfig -p {} -T {} -n {}".format(interface_name, interface_type_list, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-T', str(interface_type_list), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4326,12 +4321,12 @@ def mtu(ctx, interface_name, interface_mtu, verbose): ctx.fail("'interface_name' is in portchannel!") if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -m {}".format(interface_name, interface_mtu) + command = ['portconfig', '-p', str(interface_name), '-m', str(interface_mtu)] else: - command = "portconfig -p {} -m {} -n {}".format(interface_name, interface_mtu, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-m', str(interface_mtu), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4353,12 +4348,12 @@ def tpid(ctx, interface_name, interface_tpid, verbose): ctx.fail("'interface_name' is None!") if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -tp {}".format(interface_name, interface_tpid) + command = ['portconfig', '-p', str(interface_name), '-tp', str(interface_tpid)] else: - command = "portconfig -p {} -tp {} -n {}".format(interface_name, interface_tpid, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-tp', str(interface_tpid), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) @@ -4378,12 +4373,12 @@ def fec(ctx, interface_name, interface_fec, verbose): ctx.fail("'interface_name' is None!") if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -f {}".format(interface_name, interface_fec) + command = ['portconfig', '-p', str(interface_name), '-f', str(interface_fec)] else: - command = "portconfig -p {} -f {} -n {}".format(interface_name, interface_fec, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-f', str(interface_fec), '-n', str(ctx.obj['namespace'])] if verbose: - command += " -vv" + command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) # @@ -4479,7 +4474,6 @@ def remove(ctx, interface_name, ip_addr): """Remove an IP address from the interface""" # Get the config_db connector config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) - if clicommon.get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: @@ -4523,9 +4517,9 @@ def remove(ctx, interface_name, ip_addr): config_db.set_entry(table_name, interface_name, None) if multi_asic.is_multi_asic(): - command = "sudo ip netns exec {} ip neigh flush dev {} {}".format(ctx.obj['namespace'], interface_name, str(ip_address)) + command = ['sudo', 'ip', 'netns', 'exec', str(ctx.obj['namespace']), 'ip', 'neigh', 'flush', 'dev', str(interface_name), str(ip_address)] else: - command = "ip neigh flush dev {} {}".format(interface_name, str(ip_address)) + command = ['ip', 'neigh', 'flush', 'dev', str(interface_name), str(ip_address)] clicommon.run_command(command) # @@ -4935,9 +4929,9 @@ def frequency(ctx, interface_name, frequency): log.log_info("{} Setting transceiver frequency {} GHz".format(interface_name, frequency)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -F {}".format(interface_name, frequency) + command = ['portconfig', '-p', str(interface_name), '-F', str(frequency)] else: - command = "portconfig -p {} -F {} -n {}".format(interface_name, frequency, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-F', str(frequency), '-n', str(ctx.obj['namespace'])] clicommon.run_command(command) @@ -4967,9 +4961,9 @@ def tx_power(ctx, interface_name, tx_power): log.log_info("{} Setting transceiver power {} dBm".format(interface_name, tx_power)) if ctx.obj['namespace'] is DEFAULT_NAMESPACE: - command = "portconfig -p {} -P {}".format(interface_name, tx_power) + command = ['portconfig', '-p', str(interface_name), '-P', str(tx_power)] else: - command = "portconfig -p {} -P {} -n {}".format(interface_name, tx_power, ctx.obj['namespace']) + command = ['portconfig', '-p', str(interface_name), '-P', str(tx_power), '-n', str(ctx.obj['namespace'])] clicommon.run_command(command) @@ -4994,7 +4988,7 @@ def lpmode(ctx, interface_name, state): if interface_name_is_valid(config_db, interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") - cmd = "sudo sfputil lpmode {} {}".format("on" if state == "enable" else "off", interface_name) + cmd = ['sudo', 'sfputil', 'lpmode', "{}".format("on" if state == "enable" else "off"), str(interface_name)] clicommon.run_command(cmd) # @@ -5017,7 +5011,7 @@ def reset(ctx, interface_name): if interface_name_is_valid(config_db, interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") - cmd = "sudo sfputil reset {}".format(interface_name) + cmd = ['sudo', 'sfputil', 'reset', str(interface_name)] clicommon.run_command(cmd) # @@ -5756,7 +5750,7 @@ def update(): def full(file_name): """Full update of ACL rules configuration.""" log.log_info("'acl update full {}' executing...".format(file_name)) - command = "acl-loader update full {}".format(file_name) + command = ['acl-loader', 'update', 'full', str(file_name)] clicommon.run_command(command) @@ -5769,7 +5763,7 @@ def full(file_name): def incremental(file_name): """Incremental update of ACL rule configuration.""" log.log_info("'acl update incremental {}' executing...".format(file_name)) - command = "acl-loader update incremental {}".format(file_name) + command = ['acl-loader', 'update', 'incremental', str(file_name)] clicommon.run_command(command) @@ -5796,13 +5790,13 @@ def dropcounters(): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def install(counter_name, alias, group, counter_type, desc, reasons, verbose): """Install a new drop counter""" - command = "dropconfig -c install -n '{}' -t '{}' -r '{}'".format(counter_name, counter_type, reasons) + command = ['dropconfig', '-c', 'install', '-n', str(counter_name), '-t', str(counter_type), '-r', str(reasons)] if alias: - command += " -a '{}'".format(alias) + command += ['-a', str(alias)] if group: - command += " -g '{}'".format(group) + command += ['-g', str(group)] if desc: - command += " -d '{}'".format(desc) + command += ['-d', str(desc)] clicommon.run_command(command, display_cmd=verbose) @@ -5815,7 +5809,7 @@ def install(counter_name, alias, group, counter_type, desc, reasons, verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def delete(counter_name, verbose): """Delete an existing drop counter""" - command = "dropconfig -c uninstall -n {}".format(counter_name) + command = ['dropconfig', '-c', 'uninstall', '-n', str(counter_name)] clicommon.run_command(command, display_cmd=verbose) @@ -5828,7 +5822,7 @@ def delete(counter_name, verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def add_reasons(counter_name, reasons, verbose): """Add reasons to an existing drop counter""" - command = "dropconfig -c add -n {} -r {}".format(counter_name, reasons) + command = ['dropconfig', '-c', 'add', '-n', str(counter_name), '-r', str(reasons)] clicommon.run_command(command, display_cmd=verbose) @@ -5841,7 +5835,7 @@ def add_reasons(counter_name, reasons, verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def remove_reasons(counter_name, reasons, verbose): """Remove reasons from an existing drop counter""" - command = "dropconfig -c remove -n {} -r {}".format(counter_name, reasons) + command = ['dropconfig', '-c', 'remove', '-n', str(counter_name), '-r', str(reasons)] clicommon.run_command(command, display_cmd=verbose) @@ -5863,17 +5857,17 @@ def remove_reasons(counter_name, reasons, verbose): def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose): """ECN-related configuration tasks""" log.log_info("'ecn -profile {}' executing...".format(profile)) - command = "ecnconfig -p %s" % profile - if rmax is not None: command += " -rmax %d" % rmax - if rmin is not None: command += " -rmin %d" % rmin - if ymax is not None: command += " -ymax %d" % ymax - if ymin is not None: command += " -ymin %d" % ymin - if gmax is not None: command += " -gmax %d" % gmax - if gmin is not None: command += " -gmin %d" % gmin - if rdrop is not None: command += " -rdrop %d" % rdrop - if ydrop is not None: command += " -ydrop %d" % ydrop - if gdrop is not None: command += " -gdrop %d" % gdrop - if verbose: command += " -vv" + command = ['ecnconfig', '-p', str(profile)] + if rmax is not None: command += ['-rmax', str(rmax)] + if rmin is not None: command += ['-rmin', str(rmin)] + if ymax is not None: command += ['-ymax', str(ymax)] + if ymin is not None: command += ['-ymin', str(ymin)] + if gmax is not None: command += ['-gmax', str(gmax)] + if gmin is not None: command += ['-gmin', str(gmin)] + if rdrop is not None: command += ['-rdrop', str(rdrop)] + if ydrop is not None: command += ['-ydrop', str(ydrop)] + if gdrop is not None: command += ['-gdrop', str(gdrop)] + if verbose: command += ["-vv"] clicommon.run_command(command, display_cmd=verbose) @@ -5906,7 +5900,7 @@ def asymmetric(ctx, interface_name, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command("pfc config asymmetric {0} {1}".format(status, interface_name)) + clicommon.run_command(['pfc', 'config', 'asymmetric', str(status), str(interface_name)]) # # 'pfc priority' command ('config interface pfc priority ...') @@ -5927,7 +5921,7 @@ def priority(ctx, interface_name, priority, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command("pfc config priority {0} {1} {2}".format(status, interface_name, priority)) + clicommon.run_command(['pfc', 'config', 'priority', str(status), str(interface_name), str(priority)]) # # 'buffer' group ('config buffer ...') @@ -6248,7 +6242,7 @@ def telemetry(): @click.argument('interval', required=True) def interval(interval): """Configure watermark telemetry interval""" - command = 'watermarkcfg --config-interval ' + interval + command = ['watermarkcfg', '--config-interval', str(interval)] clicommon.run_command(command) @@ -6397,7 +6391,7 @@ def ztp(): @click.argument('run', required=False, type=click.Choice(["run"])) def run(run): """Restart ZTP of the device.""" - command = "ztp run -y" + command = ['ztp', 'run', '-y'] clicommon.run_command(command, display_cmd=True) @ztp.command() @@ -6406,14 +6400,14 @@ def run(run): @click.argument('disable', required=False, type=click.Choice(["disable"])) def disable(disable): """Administratively Disable ZTP.""" - command = "ztp disable -y" + command = ['ztp', 'disable', '-y'] clicommon.run_command(command, display_cmd=True) @ztp.command() @click.argument('enable', required=False, type=click.Choice(["enable"])) def enable(enable): """Administratively Enable ZTP.""" - command = "ztp enable" + command = ['ztp', 'enable'] clicommon.run_command(command, display_cmd=True) # @@ -6448,7 +6442,7 @@ def add_ntp_server(ctx, ntp_ip_address): click.echo("NTP server {} added to configuration".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") - clicommon.run_command("systemctl restart ntp-config", display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'ntp-config'], display_cmd=False) except SystemExit as e: ctx.fail("Restart service ntp-config failed with error {}".format(e)) @@ -6472,7 +6466,7 @@ def del_ntp_server(ctx, ntp_ip_address): ctx.fail("NTP server {} is not configured.".format(ntp_ip_address)) try: click.echo("Restarting ntp-config service...") - clicommon.run_command("systemctl restart ntp-config", display_cmd=False) + clicommon.run_command(['systemctl', 'restart', 'ntp-config'], display_cmd=False) except SystemExit as e: ctx.fail("Restart service ntp-config failed with error {}".format(e)) @@ -6515,8 +6509,8 @@ def enable(ctx): if out != "active": log.log_info("sflow service is not enabled. Starting sflow docker...") - clicommon.run_command("sudo systemctl enable sflow") - clicommon.run_command("sudo systemctl start sflow") + clicommon.run_command(['sudo', 'systemctl', 'enable', 'sflow']) + clicommon.run_command(['sudo', 'systemctl', 'start', 'sflow']) # # 'sflow' command ('config sflow disable') diff --git a/config/plugins/mlnx.py b/config/plugins/mlnx.py index f088926b3f..75846d54e3 100644 --- a/config/plugins/mlnx.py +++ b/config/plugins/mlnx.py @@ -52,7 +52,7 @@ SNIFFER_CONF_FILE = '/etc/supervisor/conf.d/mlnx_sniffer.conf' SNIFFER_CONF_FILE_IN_CONTAINER = CONTAINER_NAME + ':' + SNIFFER_CONF_FILE # Command to restart swss service -COMMAND_RESTART_SWSS = 'systemctl restart swss.service' +COMMAND_RESTART_SWSS = ['systemctl', 'restart', 'swss.service'] # Global logger instance @@ -99,12 +99,12 @@ def env_variable_delete(delete_line): def conf_file_copy(src, dest): - command = 'docker cp ' + src + ' ' + dest + command = ['docker', 'cp', str(src), str(dest)] clicommon.run_command(command) def conf_file_receive(): - command = "docker exec {} bash -c 'touch {}'".format(CONTAINER_NAME, SNIFFER_CONF_FILE) + command = ['docker', 'exec', str(CONTAINER_NAME), 'bash', '-c', 'touch ' + str(SNIFFER_CONF_FILE)] clicommon.run_command(command) conf_file_copy(SNIFFER_CONF_FILE_IN_CONTAINER, TMP_SNIFFER_CONF_FILE) @@ -134,7 +134,7 @@ def sniffer_env_variable_set(enable, env_variable_name, env_variable_string=""): if not ignore: config_file_send() - command = 'rm -rf {}'.format(TMP_SNIFFER_CONF_FILE) + command = ['rm', '-rf', str(TMP_SNIFFER_CONF_FILE)] clicommon.run_command(command) return ignore diff --git a/config/syslog.py b/config/syslog.py index 0df6fffd35..90fc52ec9d 100644 --- a/config/syslog.py +++ b/config/syslog.py @@ -410,8 +410,8 @@ def add(db, server_ip_address, source, port, vrf): try: add_entry(db.cfgdb, table, key, data) - clicommon.run_command("systemctl reset-failed rsyslog-config rsyslog", display_cmd=True) - clicommon.run_command("systemctl restart rsyslog-config", display_cmd=True) + clicommon.run_command(['systemctl', 'reset-failed', 'rsyslog-config', 'rsyslog'], display_cmd=True) + clicommon.run_command(['systemctl', 'restart', 'rsyslog-config'], display_cmd=True) log.log_notice("Added remote syslog logging: server={},source={},port={},vrf={}".format( server_ip_address, data.get(SYSLOG_SOURCE, "N/A"), @@ -442,8 +442,8 @@ def delete(db, server_ip_address): try: del_entry(db.cfgdb, table, key) - clicommon.run_command("systemctl reset-failed rsyslog-config rsyslog", display_cmd=True) - clicommon.run_command("systemctl restart rsyslog-config", display_cmd=True) + clicommon.run_command(['systemctl', 'reset-failed', 'rsyslog-config', 'rsyslog'], display_cmd=True) + clicommon.run_command(['systemctl', 'restart', 'rsyslog-config'], display_cmd=True) log.log_notice("Removed remote syslog logging: server={}".format(server_ip_address)) except Exception as e: log.log_error("Failed to remove remote syslog logging: {}".format(str(e))) diff --git a/config/vlan.py b/config/vlan.py index e1ae1f02eb..ec4b269cc2 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -139,13 +139,13 @@ def del_vlan(db, vid, no_restart_dhcp_relay): def restart_ndppd(): - verify_swss_running_cmd = "docker container inspect -f '{{.State.Status}}' swss" - docker_exec_cmd = "docker exec -i swss {}" - ndppd_status_cmd= "supervisorctl status ndppd" - ndppd_conf_copy_cmd = "cp /usr/share/sonic/templates/ndppd.conf /etc/supervisor/conf.d/" - supervisor_update_cmd = "supervisorctl update" - ndppd_config_gen_cmd = "sonic-cfggen -d -t /usr/share/sonic/templates/ndppd.conf.j2,/etc/ndppd.conf" - ndppd_restart_cmd = "supervisorctl restart ndppd" + verify_swss_running_cmd = ['docker', 'container', 'inspect', '-f', '{{.State.Status}}', 'swss'] + docker_exec_cmd = ['docker', 'exec', '-i', 'swss'] + ndppd_config_gen_cmd = ['sonic-cfggen', '-d', '-t', '/usr/share/sonic/templates/ndppd.conf.j2,/etc/ndppd.conf'] + ndppd_restart_cmd =['supervisorctl', 'restart', 'ndppd'] + ndppd_status_cmd= ["supervisorctl", "status", "ndppd"] + ndppd_conf_copy_cmd = ['cp', '/usr/share/sonic/templates/ndppd.conf', '/etc/supervisor/conf.d/'] + supervisor_update_cmd = ['supervisorctl', 'update'] output, _ = clicommon.run_command(verify_swss_running_cmd, return_cmd=True) @@ -153,17 +153,16 @@ def restart_ndppd(): click.echo(click.style('SWSS container is not running, changes will take effect the next time the SWSS container starts', fg='red'),) return - _, rc = clicommon.run_command(docker_exec_cmd.format(ndppd_status_cmd), ignore_error=True, return_cmd=True) + _, rc = clicommon.run_command(docker_exec_cmd + ndppd_status_cmd, ignore_error=True, return_cmd=True) if rc != 0: - clicommon.run_command(docker_exec_cmd.format(ndppd_conf_copy_cmd)) - clicommon.run_command(docker_exec_cmd.format(supervisor_update_cmd), return_cmd=True) + clicommon.run_command(docker_exec_cmd + ndppd_conf_copy_cmd) + clicommon.run_command(docker_exec_cmd + supervisor_update_cmd, return_cmd=True) click.echo("Starting ndppd service") - clicommon.run_command(docker_exec_cmd.format(ndppd_config_gen_cmd)) + clicommon.run_command(docker_exec_cmd + ndppd_config_gen_cmd) sleep(3) - clicommon.run_command(docker_exec_cmd.format(ndppd_restart_cmd), return_cmd=True) - + clicommon.run_command(docker_exec_cmd + ndppd_restart_cmd, return_cmd=True) @vlan.command('proxy_arp') @click.argument('vid', metavar='', required=True, type=int) diff --git a/scripts/sonic-bootchart b/scripts/sonic-bootchart index 0b7646c74c..31f54ba0d8 100755 --- a/scripts/sonic-bootchart +++ b/scripts/sonic-bootchart @@ -50,12 +50,12 @@ def check_bootchart_installed(): def get_enabled_status(): """ Get systemd-bootchart status """ - output, _ = clicommon.run_command("systemctl is-enabled systemd-bootchart", return_cmd=True) + output, _ = clicommon.run_command(['systemctl', 'is-enabled', 'systemd-bootchart'], return_cmd=True) return output def get_active_status(): """ Get systemd-bootchart status """ - output, _ = clicommon.run_command("systemctl is-active systemd-bootchart", return_cmd=True) + output, _ = clicommon.run_command(['systemctl', 'is-active', 'systemd-bootchart'], return_cmd=True) return output def get_output_files(): @@ -75,14 +75,14 @@ def cli(): @root_privileges_required def enable(): """ Enable bootchart """ - clicommon.run_command("systemctl enable systemd-bootchart", display_cmd=True) + clicommon.run_command(['systemctl', 'enable', 'systemd-bootchart'], display_cmd=True) @cli.command() @root_privileges_required def disable(): """ Disable bootchart """ - clicommon.run_command("systemctl disable systemd-bootchart", display_cmd=True) + clicommon.run_command(['systemctl', 'disable', 'systemd-bootchart'], display_cmd=True) @cli.command() diff --git a/show/acl.py b/show/acl.py index a2c8ab496f..de3d2d0693 100644 --- a/show/acl.py +++ b/show/acl.py @@ -19,13 +19,13 @@ def acl(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def rule(table_name, rule_id, verbose): """Show existing ACL rules""" - cmd = "acl-loader show rule" + cmd = ['acl-loader', 'show', 'rule'] if table_name is not None: - cmd += " {}".format(table_name) + cmd += [str(table_name)] if rule_id is not None: - cmd += " {}".format(rule_id) + cmd += [str(rule_id)] clicommon.run_command(cmd, display_cmd=verbose) @@ -36,9 +36,9 @@ def rule(table_name, rule_id, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def table(table_name, verbose): """Show existing ACL tables""" - cmd = "acl-loader show table" + cmd = ['acl-loader', 'show', 'table'] if table_name is not None: - cmd += " {}".format(table_name) + cmd += [str(table_name)] clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/chassis_modules.py b/show/chassis_modules.py index acc0263b6f..73ea92d1ed 100644 --- a/show/chassis_modules.py +++ b/show/chassis_modules.py @@ -117,13 +117,13 @@ def midplane_status(chassis_module_name): def system_ports(systemportname, namespace, verbose): """Show VOQ system ports information""" - cmd = "voqutil -c system_ports" + cmd = ['voqutil', '-c', 'system_ports'] if systemportname is not None: - cmd += " -i \"{}\"".format(systemportname) + cmd += ['-i', str(systemportname)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -134,13 +134,13 @@ def system_ports(systemportname, namespace, verbose): def system_neighbors(asicname, ipaddress, verbose): """Show VOQ system neighbors information""" - cmd = "voqutil -c system_neighbors" + cmd = ['voqutil', '-c', 'system_neighbors'] if ipaddress is not None: - cmd += " -a {}".format(ipaddress) + cmd += ['-a', str(ipaddress)] if asicname is not None: - cmd += " -n {}".format(asicname) + cmd += ['-n', str(asicname)] clicommon.run_command(cmd, display_cmd=verbose) @@ -152,15 +152,15 @@ def system_neighbors(asicname, ipaddress, verbose): def system_lags(systemlagname, asicname, linecardname, verbose): """Show VOQ system lags information""" - cmd = "voqutil -c system_lags" + cmd = ['voqutil', '-c', 'system_lags'] if systemlagname is not None: - cmd += " -s \"{}\"".format(systemlagname) + cmd += ['-s', str(systemlagname)] if asicname is not None: - cmd += " -n {}".format(asicname) + cmd += ['-n', str(asicname)] if linecardname is not None: - cmd += " -l \"{}\"".format(linecardname) + cmd += ['-l', str(linecardname)] clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/dropcounters.py b/show/dropcounters.py index 63ae138f45..30779b9364 100644 --- a/show/dropcounters.py +++ b/show/dropcounters.py @@ -18,10 +18,10 @@ def dropcounters(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def configuration(group, verbose): """Show current drop counter configuration""" - cmd = "dropconfig -c show_config" + cmd = ['dropconfig', '-c', 'show_config'] if group: - cmd += " -g '{}'".format(group) + cmd += ['-g', str(group)] clicommon.run_command(cmd, display_cmd=verbose) @@ -31,7 +31,7 @@ def configuration(group, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def capabilities(verbose): """Show device drop counter capabilities""" - cmd = "dropconfig -c show_capabilities" + cmd = ['dropconfig', '-c', 'show_capabilities'] clicommon.run_command(cmd, display_cmd=verbose) @@ -43,12 +43,12 @@ def capabilities(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def counts(group, counter_type, verbose): """Show drop counts""" - cmd = "dropstat -c show" + cmd = ['dropstat', '-c', 'show'] if group: - cmd += " -g '{}'".format(group) + cmd += ['-g', str(group)] if counter_type: - cmd += " -t '{}'".format(counter_type) + cmd += ['-t', str(counter_type)] clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/fabric.py b/show/fabric.py index 2e55887a0f..c8dc956e44 100644 --- a/show/fabric.py +++ b/show/fabric.py @@ -18,11 +18,11 @@ def counters(): @click.option('-e', '--errors', is_flag=True) def reachability(namespace, errors): """Show fabric reachability""" - cmd = "fabricstat -r" + cmd = ['fabricstat', '-r'] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] if errors: - cmd += " -e" + cmd += ["-e"] clicommon.run_command(cmd) @counters.command() @@ -30,18 +30,18 @@ def reachability(namespace, errors): @click.option('-e', '--errors', is_flag=True) def port(namespace, errors): """Show fabric port stat""" - cmd = "fabricstat" + cmd = ["fabricstat"] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] if errors: - cmd += " -e" + cmd += ["-e"] clicommon.run_command(cmd) @counters.command() @multi_asic_util.multi_asic_click_option_namespace def queue(namespace): """Show fabric queue stat""" - cmd = "fabricstat -q" + cmd = ['fabricstat', '-q'] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd) diff --git a/show/flow_counters.py b/show/flow_counters.py index 0767a2a9a5..4c7764c9da 100644 --- a/show/flow_counters.py +++ b/show/flow_counters.py @@ -21,9 +21,9 @@ def flowcnt_trap(): @click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') def stats(verbose, namespace): """Show trap flow counter statistic""" - cmd = "flow_counters_stat -t trap" + cmd = ['flow_counters_stat', '-t', 'trap'] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) # @@ -57,9 +57,9 @@ def config(db): def stats(ctx, verbose, namespace): """Show statistics of all route flow counters""" if ctx.invoked_subcommand is None: - command = "flow_counters_stat -t route" + command = ['flow_counters_stat', '-t', 'route'] if namespace is not None: - command += " -n {}".format(namespace) + command += ['-n', str(namespace)] clicommon.run_command(command, display_cmd=verbose) @@ -70,11 +70,11 @@ def stats(ctx, verbose, namespace): @click.argument('prefix-pattern', required=True) def pattern(prefix_pattern, vrf, verbose, namespace): """Show statistics of route flow counters by pattern""" - command = "flow_counters_stat -t route --prefix_pattern \"{}\"".format(prefix_pattern) + command = ['flow_counters_stat', '-t', 'route', '--prefix_pattern', str(prefix_pattern)] if vrf: - command += ' --vrf {}'.format(vrf) + command += ['--vrf', str(vrf)] if namespace is not None: - command += " -n {}".format(namespace) + command += ['-n', str(namespace)] clicommon.run_command(command, display_cmd=verbose) @@ -85,9 +85,9 @@ def pattern(prefix_pattern, vrf, verbose, namespace): @click.argument('prefix', required=True) def route(prefix, vrf, verbose, namespace): """Show statistics of route flow counters by prefix""" - command = "flow_counters_stat -t route --prefix {}".format(prefix) + command = ['flow_counters_stat', '-t', 'route', '--prefix', str(prefix)] if vrf: - command += ' --vrf {}'.format(vrf) + command += ['--vrf', str(vrf)] if namespace is not None: - command += " -n {}".format(namespace) + command += ['-n', str(namespace)] clicommon.run_command(command, display_cmd=verbose) diff --git a/show/gearbox.py b/show/gearbox.py index 1c46c78150..cbb16302b1 100644 --- a/show/gearbox.py +++ b/show/gearbox.py @@ -18,7 +18,7 @@ def phys(): @click.pass_context def status(ctx): """Show gearbox phys status""" - clicommon.run_command("gearboxutil phys status") + clicommon.run_command(['gearboxutil', 'phys', 'status']) # 'interfaces' subcommand ("show gearbox interfaces") @gearbox.group(cls=clicommon.AliasedGroup) @@ -31,4 +31,4 @@ def interfaces(): @click.pass_context def status(ctx): """Show gearbox interfaces status""" - clicommon.run_command("gearboxutil interfaces status") + clicommon.run_command(['gearboxutil', 'interfaces', 'status']) diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 3b876cdad9..c376afe71e 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -103,18 +103,18 @@ def description(interfacename, namespace, display, verbose): ctx = click.get_current_context() - cmd = "intfutil -c description" + cmd = ['intfutil', '-c', 'description'] #ignore the display option when interface name is passed if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -i {}".format(interfacename) + cmd += ['-i', str(interfacename)] else: - cmd += " -d {}".format(display) + cmd += ['-d', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -135,17 +135,17 @@ def status(interfacename, namespace, display, verbose): ctx = click.get_current_context() - cmd = "intfutil -c status" + cmd = ['intfutil', '-c', 'status'] if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -i {}".format(interfacename) + cmd += ['-i', str(interfacename)] else: - cmd += " -d {}".format(display) + cmd += ['-d', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -158,17 +158,17 @@ def tpid(interfacename, namespace, display, verbose): ctx = click.get_current_context() - cmd = "intfutil -c tpid" + cmd = ['intfutil', '-c', 'tpid'] if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -i {}".format(interfacename) + cmd += ['-i', str(interfacename)] else: - cmd += " -d {}".format(display) + cmd += ['-d', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -431,18 +431,18 @@ def eeprom(interfacename, dump_dom, namespace, verbose): ctx = click.get_current_context() - cmd = "sfpshow eeprom" + cmd = ['sfpshow', 'eeprom'] if dump_dom: - cmd += " --dom" + cmd += ["--dom"] if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -456,16 +456,16 @@ def pm(interfacename, namespace, verbose): ctx = click.get_current_context() - cmd = "sfpshow pm" + cmd = ['sfpshow', 'pm'] if interfacename is not None: interfacename = try_convert_interfacename_from_alias( ctx, interfacename) - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -479,15 +479,15 @@ def info(interfacename, namespace, verbose): ctx = click.get_current_context() - cmd = "sfpshow info" + cmd = ['sfpshow', 'info'] if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -499,12 +499,12 @@ def lpmode(interfacename, verbose): ctx = click.get_current_context() - cmd = "sudo sfputil show lpmode" + cmd = ['sudo', 'sfputil', 'show', 'lpmode'] if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] clicommon.run_command(cmd, display_cmd=verbose) @@ -519,15 +519,15 @@ def presence(db, interfacename, namespace, verbose): ctx = click.get_current_context() - cmd = "sfpshow presence" + cmd = ['sfpshow', 'presence'] if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -544,15 +544,17 @@ def error_status(db, interfacename, fetch_from_hardware, namespace, verbose): ctx = click.get_current_context() - cmd = "sudo sfputil show error-status" + cmd = ['sudo', 'sfputil', 'show', 'error-status'] if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] if fetch_from_hardware: - cmd += " -hw" + cmd += ["-hw"] + if namespace is not None: + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -571,19 +573,19 @@ def counters(ctx, verbose, period, interface, printall, namespace, display): """Show interface counters""" if ctx.invoked_subcommand is None: - cmd = "portstat" + cmd = ["portstat"] if printall: - cmd += " -a" + cmd += ["-a"] if period is not None: - cmd += " -p {}".format(period) + cmd += ['-p', str(period)] if interface is not None: interface = try_convert_interfacename_from_alias(ctx, interface) - cmd += " -i {}".format(interface) + cmd += ['-i', str(interface)] else: - cmd += " -s {}".format(display) + cmd += ['-s', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -594,13 +596,13 @@ def counters(ctx, verbose, period, interface, printall, namespace, display): @click.option('--verbose', is_flag=True, help="Enable verbose output") def errors(verbose, period, namespace, display): """Show interface counters errors""" - cmd = "portstat -e" + cmd = ['portstat', '-e'] if period is not None: - cmd += " -p {}".format(period) + cmd += ['-p', str(period)] - cmd += " -s {}".format(display) + cmd += ['-s', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -611,13 +613,13 @@ def errors(verbose, period, namespace, display): @click.option('--verbose', is_flag=True, help="Enable verbose output") def fec_stats(verbose, period, namespace, display): """Show interface counters fec-stats""" - cmd = "portstat -f" + cmd = ['portstat', '-f'] if period is not None: - cmd += " -p {}".format(period) + cmd += ['-p', str(period)] - cmd += " -s {}".format(display) + cmd += ['-s', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -628,12 +630,12 @@ def fec_stats(verbose, period, namespace, display): @click.option('--verbose', is_flag=True, help="Enable verbose output") def rates(verbose, period, namespace, display): """Show interface counters rates""" - cmd = "portstat -R" + cmd = ['portstat', '-R'] if period is not None: - cmd += " -p {}".format(period) - cmd += " -s {}".format(display) + cmd += ['-p', str(period)] + cmd += ['-s', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) # 'counters' subcommand ("show interfaces counters rif") @@ -645,13 +647,12 @@ def rif(interface, period, verbose): """Show interface counters""" ctx = click.get_current_context() - - cmd = "intfstat" + cmd = ["intfstat"] if period is not None: - cmd += " -p {}".format(period) + cmd += ['-p', str(period)] if interface is not None: interface = try_convert_interfacename_from_alias(ctx, interface) - cmd += " -i {}".format(interface) + cmd += ['-i', str(interface)] clicommon.run_command(cmd, display_cmd=verbose) @@ -664,13 +665,12 @@ def detailed(interface, period, verbose): """Show interface counters detailed""" ctx = click.get_current_context() - - cmd = "portstat -l" + cmd = ['portstat', '-l'] if period is not None: - cmd += " -p {}".format(period) + cmd += ['-p', str(period)] if interface is not None: interface = try_convert_interfacename_from_alias(ctx, interface) - cmd += " -i {}".format(interface) + cmd += ['-i', str(interface)] clicommon.run_command(cmd, display_cmd=verbose) @@ -694,18 +694,18 @@ def autoneg_status(interfacename, namespace, display, verbose): ctx = click.get_current_context() - cmd = "intfutil -c autoneg" + cmd = ['intfutil', '-c', 'autoneg'] #ignore the display option when interface name is passed if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -i {}".format(interfacename) + cmd += ['-i', str(interfacename)] else: - cmd += " -d {}".format(display) + cmd += ['-d', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) @@ -727,17 +727,17 @@ def link_training_status(interfacename, namespace, display, verbose): ctx = click.get_current_context() - cmd = "intfutil -c link_training" + cmd = ['intfutil', '-c', 'link_training'] #ignore the display option when interface name is passed if interfacename is not None: interfacename = try_convert_interfacename_from_alias(ctx, interfacename) - cmd += " -i {}".format(interfacename) + cmd += ['-i', str(interfacename)] else: - cmd += " -d {}".format(display) + cmd += ['-d', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/kdump.py b/show/kdump.py index e5ce7cde74..6eba55082e 100644 --- a/show/kdump.py +++ b/show/kdump.py @@ -49,7 +49,7 @@ def get_kdump_oper_mode(): returns "Not Ready"; """ oper_mode = "Not Ready" - command_stdout, _ = clicommon.run_command("/usr/sbin/kdump-config status", return_cmd=True) + command_stdout, _ = clicommon.run_command(['/usr/sbin/kdump-config', 'status'], return_cmd=True) for line in command_stdout.splitlines(): if ": ready to kdump" in line: @@ -95,7 +95,7 @@ def get_kdump_core_files(): of 'find' command. dump_file_list: A list contains kernel core dump files. """ - find_core_dump_files = "find /var/crash -name 'kdump.*'" + find_core_dump_files = ['find', '/var/crash', '-name', 'kdump.*'] dump_file_list = [] cmd_message = None @@ -119,7 +119,7 @@ def get_kdump_dmesg_files(): of 'find' command. dmesg_file_list: A list contains kernel dmesg files. """ - find_dmesg_files = "find /var/crash -name 'dmesg.*'" + find_dmesg_files = ['find', '/var/crash', '-name', 'dmesg.*'] dmesg_file_list = [] cmd_message = None @@ -167,13 +167,13 @@ def files(): @click.argument('filename', required=False) @click.option('-l', '--lines', default=10, show_default=True) def logging(filename, lines): - cmd = "sudo tail -{}".format(lines) + cmd = ['sudo', 'tail', '-'+str(lines)] if filename: timestamp = filename.strip().split(".")[-1] file_path = "/var/crash/{}/{}".format(timestamp, filename) if os.path.isfile(file_path): - cmd += " {}".format(file_path) + cmd += [str(file_path)] else: click.echo("Invalid filename: '{}'!".format(filename)) sys.exit(1) @@ -184,6 +184,6 @@ def logging(filename, lines): sys.exit(2) dmesg_file_result.sort(reverse=True) - cmd += " {}".format(dmesg_file_result[0]) + cmd += [str(dmesg_file_result[0])] clicommon.run_command(cmd) diff --git a/show/main.py b/show/main.py index 7f79cd4779..2d21e1b3aa 100755 --- a/show/main.py +++ b/show/main.py @@ -981,11 +981,11 @@ def ip(): @click.pass_context def interfaces(ctx, namespace, display): if ctx.invoked_subcommand is None: - cmd = "sudo ipintutil -a ipv4" + cmd = ['sudo', 'ipintutil', '-a', 'ipv4'] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] - cmd += " -d {}".format(display) + cmd += ['-d', str(display)] clicommon.run_command(cmd) # @@ -1108,12 +1108,12 @@ def prefix_list(prefix_list_name, verbose): @ipv6.command() @multi_asic_util.multi_asic_click_options def interfaces(namespace, display): - cmd = "sudo ipintutil -a ipv6" + cmd = ['sudo', 'ipintutil', '-a', 'ipv6'] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] - cmd += " -d {}".format(display) + cmd += ['-d', str(display)] clicommon.run_command(cmd) diff --git a/show/nat.py b/show/nat.py index 72b81dc61d..9e3c868ed7 100644 --- a/show/nat.py +++ b/show/nat.py @@ -18,7 +18,7 @@ def nat(): def statistics(verbose): """ Show NAT statistics """ - cmd = "sudo natshow -s" + cmd = ['sudo', 'natshow', '-s'] clicommon.run_command(cmd, display_cmd=verbose) @@ -30,16 +30,17 @@ def translations(ctx, verbose): """ Show NAT translations """ if ctx.invoked_subcommand is None: - cmd = "sudo natshow -t" + cmd = ['sudo', 'natshow', '-t'] clicommon.run_command(cmd, display_cmd=verbose) # 'count' subcommand ("show nat translations count") @translations.command() -def count(): +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def count(verbose): """ Show NAT translations count """ - cmd = "sudo natshow -c" + cmd = ['sudo', 'natshow', '-c'] clicommon.run_command(cmd, display_cmd=verbose) @@ -51,23 +52,23 @@ def config(ctx, verbose): """Show NAT config related information""" if ctx.invoked_subcommand is None: click.echo("\nGlobal Values") - cmd = "sudo natconfig -g" + cmd = ['sudo', 'natconfig', '-g'] clicommon.run_command(cmd, display_cmd=verbose) click.echo("Static Entries") - cmd = "sudo natconfig -s" + cmd = ['sudo', 'natconfig', '-s'] clicommon.run_command(cmd, display_cmd=verbose) click.echo("Pool Entries") - cmd = "sudo natconfig -p" + cmd = ['sudo', 'natconfig', '-p'] clicommon.run_command(cmd, display_cmd=verbose) click.echo("NAT Bindings") - cmd = "sudo natconfig -b" + cmd = ['sudo', 'natconfig', '-b'] clicommon.run_command(cmd, display_cmd=verbose) click.echo("NAT Zones") - cmd = "sudo natconfig -z" + cmd = ['sudo', 'natconfig', '-z'] clicommon.run_command(cmd, display_cmd=verbose) @@ -77,7 +78,7 @@ def config(ctx, verbose): def static(verbose): """Show static NAT configuration""" - cmd = "sudo natconfig -s" + cmd = ['sudo', 'natconfig', '-s'] clicommon.run_command(cmd, display_cmd=verbose) @@ -87,7 +88,7 @@ def static(verbose): def pool(verbose): """Show NAT Pool configuration""" - cmd = "sudo natconfig -p" + cmd = ['sudo', 'natconfig', '-p'] clicommon.run_command(cmd, display_cmd=verbose) @@ -97,7 +98,7 @@ def pool(verbose): def bindings(verbose): """Show NAT binding configuration""" - cmd = "sudo natconfig -b" + cmd = ['sudo', 'natconfig', '-b'] clicommon.run_command(cmd, display_cmd=verbose) @@ -107,7 +108,7 @@ def bindings(verbose): def globalvalues(verbose): """Show NAT Global configuration""" - cmd = "sudo natconfig -g" + cmd = ['sudo', 'natconfig', '-g'] clicommon.run_command(cmd, display_cmd=verbose) @@ -117,5 +118,5 @@ def globalvalues(verbose): def zones(verbose): """Show NAT Zone configuration""" - cmd = "sudo natconfig -z" + cmd = ['sudo', 'natconfig', '-z'] clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/platform.py b/show/platform.py index 1916e10d84..85d729df84 100644 --- a/show/platform.py +++ b/show/platform.py @@ -74,7 +74,7 @@ def summary(json): @click.option('--verbose', is_flag=True, help="Enable verbose output") def syseeprom(verbose): """Show system EEPROM information""" - cmd = "sudo decode-syseeprom -d" + cmd = ['sudo', 'decode-syseeprom', '-d'] clicommon.run_command(cmd, display_cmd=verbose) @@ -85,13 +85,13 @@ def syseeprom(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def psustatus(index, json, verbose): """Show PSU status information""" - cmd = "psushow -s" + cmd = ['psushow', '-s'] if index >= 0: - cmd += " -i {}".format(index) + cmd += ['-i', str(index)] if json: - cmd += " -j" + cmd += ["-j"] clicommon.run_command(cmd, display_cmd=verbose) @@ -105,9 +105,9 @@ def ssdhealth(device, verbose, vendor): """Show SSD Health information""" if not device: device = os.popen("lsblk -o NAME,TYPE -p | grep disk").readline().strip().split()[0] - cmd = "sudo ssdutil -d " + device - options = " -v" if verbose else "" - options += " -e" if vendor else "" + cmd = ['sudo', 'ssdutil', '-d', str(device)] + options = ["-v"] if verbose else [] + options += ["-e"] if vendor else [] clicommon.run_command(cmd + options, display_cmd=verbose) @@ -116,9 +116,9 @@ def ssdhealth(device, verbose, vendor): @click.option('-c', '--check', is_flag=True, help="Check the platfome pcie device") def pcieinfo(check, verbose): """Show Device PCIe Info""" - cmd = "sudo pcieutil show" + cmd = ['sudo', 'pcieutil', 'show'] if check: - cmd = "sudo pcieutil check" + cmd = ['sudo', 'pcieutil', 'check'] clicommon.run_command(cmd, display_cmd=verbose) @@ -126,7 +126,7 @@ def pcieinfo(check, verbose): @platform.command() def fan(): """Show fan status information""" - cmd = 'fanshow' + cmd = ['fanshow'] clicommon.run_command(cmd) @@ -134,7 +134,7 @@ def fan(): @platform.command() def temperature(): """Show device temperature information""" - cmd = 'tempershow' + cmd = ['tempershow'] clicommon.run_command(cmd) # 'firmware' subcommand ("show platform firmware") diff --git a/show/processes.py b/show/processes.py index c603826c09..69a5a2fcc0 100644 --- a/show/processes.py +++ b/show/processes.py @@ -17,7 +17,7 @@ def processes(): def summary(verbose): """Show processes info""" # Run top batch mode to prevent unexpected newline after each newline - cmd = "ps -eo pid,ppid,cmd,%mem,%cpu " + cmd = ['ps', '-eo', 'pid,ppid,cmd,%mem,%cpu'] clicommon.run_command(cmd, display_cmd=verbose) @@ -27,7 +27,7 @@ def summary(verbose): def cpu(verbose): """Show processes CPU info""" # Run top in batch mode to prevent unexpected newline after each newline - cmd = "top -bn 1 -o %CPU" + cmd = ['top', '-bn', '1', '-o', '%CPU'] clicommon.run_command(cmd, display_cmd=verbose) @@ -37,5 +37,5 @@ def cpu(verbose): def memory(verbose): """Show processes memory info""" # Run top batch mode to prevent unexpected newline after each newline - cmd = "top -bn 1 -o %MEM" + cmd = ['top', '-bn', '1', '-o', '%MEM'] clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/system_health.py b/show/system_health.py index 08e9e70594..1fa92f6592 100644 --- a/show/system_health.py +++ b/show/system_health.py @@ -137,7 +137,7 @@ def sysready_status(ctx): if ctx.invoked_subcommand is None: try: - cmd = "sysreadyshow" + cmd = ["sysreadyshow"] clicommon.run_command(cmd, display_cmd=False) except Exception as e: click.echo("Exception: {}".format(str(e))) @@ -146,7 +146,7 @@ def sysready_status(ctx): @sysready_status.command('brief') def sysready_status_brief(): try: - cmd = "sysreadyshow --brief" + cmd = ["sysreadyshow", "--brief"] clicommon.run_command(cmd, display_cmd=False) except Exception as e: click.echo("Exception: {}".format(str(e))) @@ -155,7 +155,7 @@ def sysready_status_brief(): @sysready_status.command('detail') def sysready_status_detail(): try: - cmd = "sysreadyshow --detail" + cmd = ["sysreadyshow", "--detail"] clicommon.run_command(cmd, display_cmd=False) except Exception as e: click.echo("Exception: {}".format(str(e))) diff --git a/show/vxlan.py b/show/vxlan.py index 3d04552904..16743fc07a 100644 --- a/show/vxlan.py +++ b/show/vxlan.py @@ -326,11 +326,11 @@ def remotemac(remote_vtep_ip, count): def counters(tunnel, period, verbose): """Show VxLAN counters""" - cmd = "tunnelstat -T vxlan" + cmd = ['tunnelstat', '-T', 'vxlan'] if period is not None: - cmd += " -p {}".format(period) + cmd += ['-p', str(period)] if tunnel is not None: - cmd += " -i {}".format(tunnel) + cmd += ['-i', str(tunnel)] clicommon.run_command(cmd, display_cmd=verbose) diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py index e6dbe569d2..6b9e0f3e6e 100644 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -259,14 +259,14 @@ def test_midplane_show_incorrect_module(self): def test_show_and_verify_system_ports_output_asic0(self): os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" - return_code, result = get_result_and_return_code('voqutil -c system_ports -n asic0') + return_code, result = get_result_and_return_code(['voqutil', '-c', 'system_ports', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == show_chassis_system_ports_output_asic0 def test_show_and_verify_system_ports_output_1_asic0(self): - return_code, result = get_result_and_return_code('voqutil -c system_ports -i "Linecard1|Asic0|Ethernet0" -n asic0') + return_code, result = get_result_and_return_code(['voqutil', '-c', 'system_ports', '-i', "Linecard1|Asic0|Ethernet0", '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -280,21 +280,21 @@ def test_show_and_verify_system_neighbors_output_all(self): assert(result.output == show_chassis_system_neighbors_output_all) def test_show_and_verify_system_neighbors_output_ipv4(self): - return_code, result = get_result_and_return_code('voqutil -c system_neighbors -a 10.0.0.5') + return_code, result = get_result_and_return_code(['voqutil', '-c', 'system_neighbors', '-a', '10.0.0.5']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == show_chassis_system_neighbors_output_ipv4 def test_show_and_verify_system_neighbors_output_ipv6(self): - return_code, result = get_result_and_return_code('voqutil -c system_neighbors -a fc00::16') + return_code, result = get_result_and_return_code(['voqutil', '-c', 'system_neighbors', '-a', 'fc00::16']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == show_chassis_system_neighbors_output_ipv6 def test_show_and_verify_system_neighbors_output_asic0(self): - return_code, result = get_result_and_return_code('voqutil -c system_neighbors -n Asic0') + return_code, result = get_result_and_return_code(['voqutil', '-c', 'system_neighbors', '-n', 'Asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -313,14 +313,14 @@ def test_show_and_verify_system_lags_output_1(self): assert(result.output == show_chassis_system_lags_output_1) def test_show_and_verify_system_lags_output_asic1(self): - return_code, result = get_result_and_return_code('voqutil -c system_lags -n Asic1') + return_code, result = get_result_and_return_code(['voqutil', '-c', 'system_lags', '-n', 'Asic1']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == show_chassis_system_lags_output_asic1 def test_show_and_verify_system_lags_output_lc4(self): - return_code, result = get_result_and_return_code('voqutil -c system_lags -l Linecard4') + return_code, result = get_result_and_return_code(['voqutil', '-c', 'system_lags', '-l', 'Linecard4']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 diff --git a/tests/clear_test.py b/tests/clear_test.py index a9c2a367f2..97f0f55643 100644 --- a/tests/clear_test.py +++ b/tests/clear_test.py @@ -1,3 +1,4 @@ +import click import pytest import clear.main as clear from click.testing import CliRunner @@ -286,3 +287,35 @@ def test_clear_ipv6_frr(self, run_command): def teardown(self): print('TEAR DOWN') + +class TestClearFlowcnt(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_flowcnt_route(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(clear.cli.commands['flowcnt-route'], ['-n', 'asic0']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['flow_counters_stat', '-c', '-t', 'route', '-n', 'asic0']) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_flowcnt_route_pattern(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(clear.cli.commands['flowcnt-route'].commands['pattern'], ['--vrf', 'Vrf_1', '-n', 'asic0', '3.3.0.0/16']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['flow_counters_stat', '-c', '-t', 'route', '--prefix_pattern', '3.3.0.0/16', '--vrf', str('Vrf_1'), '-n', 'asic0']) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_flowcnt_route_route(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(clear.cli.commands['flowcnt-route'].commands['route'], ['--vrf', 'Vrf_1', '-n', 'asic0', '3.3.0.0/16']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['flow_counters_stat', '-c', '-t', 'route', '--prefix', '3.3.0.0/16', '--vrf', str('Vrf_1'), '-n', 'asic0']) + + def teardown(self): + print('TEAR DOWN') + diff --git a/tests/config_int_ip_test.py b/tests/config_int_ip_test.py index d1addd0f6d..170ea4ddf5 100644 --- a/tests/config_int_ip_test.py +++ b/tests/config_int_ip_test.py @@ -185,4 +185,4 @@ def test_config_int_ip_rem_static_multiasic( print(result.exit_code, result.output) assert result.exit_code != 0 assert "Error: Cannot remove the last IP entry of interface Ethernet8. A static ipv6 route is still bound to the RIF." in result.output - assert mock_run_command.call_count == 0 \ No newline at end of file + assert mock_run_command.call_count == 0 diff --git a/tests/config_test.py b/tests/config_test.py index 8d5e6c820b..b5be1717cb 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -17,7 +17,7 @@ from sonic_py_common import device_info from utilities_common.db import Db from utilities_common.general import load_module_from_source -from mock import patch, MagicMock +from mock import call, patch, mock_open, MagicMock from generic_config_updater.generic_updater import ConfigFormat @@ -69,7 +69,8 @@ Running command: ifconfig eth0 10.0.0.100 netmask 255.255.255.0 Running command: ip route add default via 10.0.0.1 dev eth0 table default Running command: ip rule add from 10.0.0.100 table default -Running command: [ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid +Running command: kill `cat /var/run/dhclient.eth0.pid` +Running command: rm -f /var/run/dhclient.eth0.pid Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`. """ @@ -80,7 +81,8 @@ Running command: ifconfig eth0 add fc00:1::32/64 Running command: ip -6 route add default via fc00:1::1 dev eth0 table default Running command: ip -6 rule add from fc00:1::32 table default -Running command: [ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid +Running command: kill `cat /var/run/dhclient.eth0.pid` +Running command: rm -f /var/run/dhclient.eth0.pid Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`. """ @@ -94,29 +96,30 @@ Running command: ifconfig eth0 add fc00:1::32/64 Running command: ip -6 route add default via fc00:1::1 dev eth0 table default Running command: ip -6 rule add from fc00:1::32 table default -Running command: [ -f /var/run/dhclient.eth0.pid ] && kill `cat /var/run/dhclient.eth0.pid` && rm -f /var/run/dhclient.eth0.pid +Running command: kill `cat /var/run/dhclient.eth0.pid` +Running command: rm -f /var/run/dhclient.eth0.pid Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`. """ RELOAD_CONFIG_DB_OUTPUT = """\ Stopping SONiC target ... -Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... """ RELOAD_YANG_CFG_OUTPUT = """\ Stopping SONiC target ... -Running command: /usr/local/bin/sonic-cfggen -Y /tmp/config.json --write-to-db +Running command: /usr/local/bin/sonic-cfggen -Y /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... """ RELOAD_MASIC_CONFIG_DB_OUTPUT = """\ Stopping SONiC target ... -Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db -Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic0 --write-to-db -Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic1 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic0 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic1 --write-to-db Restarting SONiC target ... Reloading Monit configuration ... """ @@ -126,22 +129,28 @@ reload_config_with_disabled_service_output="""\ Stopping SONiC target ... -Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... """ def mock_run_command_side_effect(*args, **kwargs): command = args[0] + if isinstance(command, str): + command = command + elif isinstance(command, list): + command = ' '.join(command) if kwargs.get('display_cmd'): + if 'cat /var/run/dhclient.eth0.pid' in command: + command = 'kill `cat /var/run/dhclient.eth0.pid`' click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) if kwargs.get('return_cmd'): if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": - return 'snmp.timer' , 0 - elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": - return 'swss', 0 + return 'snmp.timer', 0 + elif command == "systemctl list-dependencies --plain sonic.target": + return 'sonic.target\nswss', 0 elif command == "systemctl is-enabled snmp.timer": return 'enabled', 0 else: @@ -149,6 +158,10 @@ def mock_run_command_side_effect(*args, **kwargs): def mock_run_command_side_effect_disabled_timer(*args, **kwargs): command = args[0] + if isinstance(command, str): + command = command + elif isinstance(command, list): + command = ' '.join(command) if kwargs.get('display_cmd'): click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) @@ -156,8 +169,8 @@ def mock_run_command_side_effect_disabled_timer(*args, **kwargs): if kwargs.get('return_cmd'): if command == "systemctl list-dependencies --plain sonic-delayed.target | sed '1d'": return 'snmp.timer', 0 - elif command == "systemctl list-dependencies --plain sonic.target | sed '1d'": - return 'swss', 0 + elif command == "systemctl list-dependencies --plain sonic.target": + return 'sonic.target\nswss', 0 elif command == "systemctl is-enabled snmp.timer": return 'masked', 0 elif command == "systemctl show swss.service --property ActiveState --value": @@ -280,7 +293,7 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output # Verify "systemctl reset-failed" is called for services under sonic.target - mock_run_command.assert_any_call('systemctl reset-failed swss') + mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) assert mock_run_command.call_count == 8 @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) @@ -295,7 +308,7 @@ def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broad assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_platform_plugin_command_output # Verify "systemctl reset-failed" is called for services under sonic.target - mock_run_command.assert_any_call('systemctl reset-failed swss') + mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) assert mock_run_command.call_count == 8 @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_false_path, None))) @@ -1551,7 +1564,7 @@ def test_config_load_mgmt_config_ipv4_only(self, get_cmd_module, setup_single_br } } } - self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv4_only_output, 5) + self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv4_only_output, 7) def test_config_load_mgmt_config_ipv6_only(self, get_cmd_module, setup_single_broadcom_asic): device_desc_result = { @@ -1566,7 +1579,7 @@ def test_config_load_mgmt_config_ipv6_only(self, get_cmd_module, setup_single_br } } } - self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv6_only_output, 5) + self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv6_only_output, 7) def test_config_load_mgmt_config_ipv4_ipv6(self, get_cmd_module, setup_single_broadcom_asic): device_desc_result = { @@ -1584,7 +1597,7 @@ def test_config_load_mgmt_config_ipv4_ipv6(self, get_cmd_module, setup_single_br } } } - self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv4_ipv6_output, 8) + self.check_output(get_cmd_module, device_desc_result, load_mgmt_config_command_ipv4_ipv6_output, 10) def check_output(self, get_cmd_module, parse_device_desc_xml_result, expected_output, expected_command_call_count): def parse_device_desc_xml_side_effect(filename): @@ -1593,20 +1606,21 @@ def parse_device_desc_xml_side_effect(filename): def change_hostname_side_effect(hostname): print("change hostname to {}".format(hostname)) with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: - with mock.patch('config.main.parse_device_desc_xml', mock.MagicMock(side_effect=parse_device_desc_xml_side_effect)): - with mock.patch('config.main._change_hostname', mock.MagicMock(side_effect=change_hostname_side_effect)): - (config, show) = get_cmd_module - runner = CliRunner() - with runner.isolated_filesystem(): - with open('device_desc.xml', 'w') as f: - f.write('dummy') - result = runner.invoke(config.config.commands["load_mgmt_config"], ["-y", "device_desc.xml"]) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == expected_output - assert mock_run_command.call_count == expected_command_call_count + with mock.patch('os.path.isfile', mock.MagicMock(return_value=True)): + with mock.patch('config.main.parse_device_desc_xml', mock.MagicMock(side_effect=parse_device_desc_xml_side_effect)): + with mock.patch('config.main._change_hostname', mock.MagicMock(side_effect=change_hostname_side_effect)): + (config, show) = get_cmd_module + runner = CliRunner() + with runner.isolated_filesystem(): + with open('device_desc.xml', 'w') as f: + f.write('dummy') + result = runner.invoke(config.config.commands["load_mgmt_config"], ["-y", "device_desc.xml"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == expected_output + assert mock_run_command.call_count == expected_command_call_count @classmethod def teardown_class(cls): @@ -2001,3 +2015,351 @@ def test_del_ntp_server_invalid_ip_yang_validation(self): @classmethod def teardown_class(cls): print("TEARDOWN") + + +class TestConfigPfcwd(object): + def setup(self): + print("SETUP") + + @patch('utilities_common.cli.run_command') + def test_start(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['pfcwd'].commands['start'], ['-a', 'forward', '-r', 150, 'Ethernet0', '200', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['pfcwd', 'start', '--action', 'forward', 'Ethernet0', '200', '--restoration-time', '150'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_stop(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['pfcwd'].commands['stop'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['pfcwd', 'stop'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_interval(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['pfcwd'].commands['interval'], ['300', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['pfcwd', 'interval', '300'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_counter_poll(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['pfcwd'].commands['counter_poll'], ['enable', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['pfcwd', 'counter_poll', 'enable'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_big_red_switch(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['pfcwd'].commands['big_red_switch'], ['enable', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['pfcwd', 'big_red_switch', 'enable'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_start_default(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['pfcwd'].commands['start_default'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['pfcwd', 'start_default'], display_cmd=True) + + def teardown(self): + print("TEARDOWN") + + +class TestConfigAclUpdate(object): + def setup(self): + print("SETUP") + + @patch('utilities_common.cli.run_command') + def test_full(self, mock_run_command): + file_name = '/etc/sonic/full_snmp.json' + runner = CliRunner() + result = runner.invoke(config.config.commands['acl'].commands['update'].commands['full'], [file_name]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['acl-loader', 'update', 'full', file_name]) + + @patch('utilities_common.cli.run_command') + def test_incremental(self, mock_run_command): + file_name = '/etc/sonic/full_snmp.json' + runner = CliRunner() + result = runner.invoke(config.config.commands['acl'].commands['update'].commands['incremental'], [file_name]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['acl-loader', 'update', 'incremental', file_name]) + + def teardown(self): + print("TEARDOWN") + + +class TestConfigDropcounters(object): + def setup(self): + print("SETUP") + + @patch('utilities_common.cli.run_command') + def test_install(self, mock_run_command): + counter_name = 'DEBUG_2' + counter_type = 'PORT_INGRESS_DROPS' + reasons = '[EXCEEDS_L2_MTU,DECAP_ERROR]' + alias = 'BAD_DROPS' + group = 'BAD' + desc = 'more port ingress drops' + + runner = CliRunner() + result = runner.invoke(config.config.commands['dropcounters'].commands['install'], [counter_name, counter_type, reasons, '-d', desc, '-g', group, '-a', alias]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['dropconfig', '-c', 'install', '-n', str(counter_name), '-t', str(counter_type), '-r', str(reasons), '-a', str(alias), '-g', str(group), '-d', str(desc)], display_cmd=False) + + @patch('utilities_common.cli.run_command') + def test_delete(self, mock_run_command): + counter_name = 'DEBUG_2' + runner = CliRunner() + result = runner.invoke(config.config.commands['dropcounters'].commands['delete'], [counter_name, '-v']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['dropconfig', '-c', 'uninstall', '-n', str(counter_name)], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_add_reasons(self, mock_run_command): + counter_name = 'DEBUG_2' + reasons = '[EXCEEDS_L2_MTU,DECAP_ERROR]' + runner = CliRunner() + result = runner.invoke(config.config.commands['dropcounters'].commands['add-reasons'], [counter_name, reasons, '-v']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['dropconfig', '-c', 'add', '-n', str(counter_name), '-r', str(reasons)], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_remove_reasons(self, mock_run_command): + counter_name = 'DEBUG_2' + reasons = '[EXCEEDS_L2_MTU,DECAP_ERROR]' + runner = CliRunner() + result = runner.invoke(config.config.commands['dropcounters'].commands['remove-reasons'], [counter_name, reasons, '-v']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['dropconfig', '-c', 'remove', '-n', str(counter_name), '-r', str(reasons)], display_cmd=True) + + def teardown(self): + print("TEARDOWN") + + +class TestConfigWatermarkTelemetry(object): + def setup(self): + print("SETUP") + + @patch('utilities_common.cli.run_command') + def test_interval(self, mock_run_command): + interval = '18' + runner = CliRunner() + result = runner.invoke(config.config.commands['watermark'].commands['telemetry'].commands['interval'], [interval]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['watermarkcfg', '--config-interval', str(interval)]) + + def teardown(self): + print("TEARDOWN") + + +class TestConfigZtp(object): + def setup(self): + print("SETUP") + + @patch('utilities_common.cli.run_command') + def test_run(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['ztp'].commands['run'], ['-y']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['ztp', 'run', '-y'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_disable(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['ztp'].commands['disable'], ['-y']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['ztp', 'disable', '-y'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_enable(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(config.config.commands['ztp'].commands['enable']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['ztp', 'enable'], display_cmd=True) + + def teardown(self): + print("TEARDOWN") + + +@patch('utilities_common.cli.run_command') +@patch('os.uname', MagicMock(return_value=['Linux', 'current-hostname', '5.11.0-34-generic', '#36~20.04.1-Ubuntu SMP Thu Aug 5 14:22:16 UTC 2021', 'x86_64'])) +def test_change_hostname(mock_run_command): + new_hostname = 'new_hostname' + with patch('builtins.open', mock_open()) as mock_file: + config._change_hostname(new_hostname) + + assert mock_file.call_args_list == [ + call('/etc/hostname', 'w'), + call('/etc/hosts', 'a') + ] + assert mock_file().write.call_args_list == [ + call('new_hostname\n'), + call('127.0.0.1 new_hostname\n') + ] + assert mock_run_command.call_args_list == [ + call(['hostname', '-F', '/etc/hostname'], display_cmd=True), + call(['sed', '-i', r"/\scurrent-hostname$/d", '/etc/hosts'], display_cmd=True) + ] + + +class TestConfigInterface(object): + def setup(self): + print("SETUP") + + @patch('utilities_common.cli.run_command') + def test_speed(self, mock_run_command): + interface_name = 'Ethernet0' + interface_speed = '100' + db = Db() + runner = CliRunner() + + obj = {'config_db': db.cfgdb, 'namespace': ''} + result = runner.invoke(config.config.commands['interface'].commands['speed'], [interface_name, interface_speed, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-s', str(interface_speed), '-vv'], display_cmd=True) + + obj = {'config_db': db.cfgdb, 'namespace': 'ns'} + result = runner.invoke(config.config.commands['interface'].commands['speed'], [interface_name, interface_speed, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-s', str(interface_speed), '-n', 'ns', '-vv'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_link_training(self, mock_run_command): + interface_name = 'Ethernet0' + mode = 'on' + db = Db() + runner = CliRunner() + + obj = {'config_db': db.cfgdb, 'namespace': ''} + result = runner.invoke(config.config.commands['interface'].commands['link-training'], [interface_name, mode, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-lt', str(mode), '-vv'], display_cmd=True) + + obj = {'config_db': db.cfgdb, 'namespace': 'ns'} + result = runner.invoke(config.config.commands['interface'].commands['link-training'], [interface_name, mode, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-lt', str(mode), '-n', 'ns', '-vv'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_advertised_speeds(self, mock_run_command): + interface_name = 'Ethernet0' + speed_list = '50,100' + db = Db() + runner = CliRunner() + + obj = {'config_db': db.cfgdb, 'namespace': ''} + result = runner.invoke(config.config.commands['interface'].commands['advertised-speeds'], [interface_name, speed_list, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-S', str(speed_list), '-vv'], display_cmd=True) + + obj = {'config_db': db.cfgdb, 'namespace': 'ns'} + result = runner.invoke(config.config.commands['interface'].commands['advertised-speeds'], [interface_name, speed_list, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-S', str(speed_list), '-n', 'ns', '-vv'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_advertised_types(self, mock_run_command): + interface_name = 'Ethernet0' + interface_type = 'CR,CR4' + db = Db() + runner = CliRunner() + + obj = {'config_db': db.cfgdb, 'namespace': ''} + result = runner.invoke(config.config.commands['interface'].commands['advertised-types'], [interface_name, interface_type, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-T', str(interface_type), '-vv'], display_cmd=True) + + obj = {'config_db': db.cfgdb, 'namespace': 'ns'} + result = runner.invoke(config.config.commands['interface'].commands['advertised-types'], [interface_name, interface_type, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-T', str(interface_type), '-n', 'ns', '-vv'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_mtu(self, mock_run_command): + interface_name = 'Ethernet0' + interface_mtu = '1000' + db = Db() + runner = CliRunner() + + obj = {'config_db': db.cfgdb, 'namespace': ''} + result = runner.invoke(config.config.commands['interface'].commands['mtu'], [interface_name, interface_mtu, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-m', str(interface_mtu), '-vv'], display_cmd=True) + + obj = {'config_db': db.cfgdb, 'namespace': 'ns'} + result = runner.invoke(config.config.commands['interface'].commands['mtu'], [interface_name, interface_mtu, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-m', str(interface_mtu), '-n', 'ns', '-vv'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_tpid(self, mock_run_command): + interface_name = 'Ethernet0' + interface_tpid = '0x9200' + db = Db() + runner = CliRunner() + + obj = {'config_db': db.cfgdb, 'namespace': ''} + result = runner.invoke(config.config.commands['interface'].commands['tpid'], [interface_name, interface_tpid, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-tp', str(interface_tpid), '-vv'], display_cmd=True) + + obj = {'config_db': db.cfgdb, 'namespace': 'ns'} + result = runner.invoke(config.config.commands['interface'].commands['tpid'], [interface_name, interface_tpid, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-tp', str(interface_tpid), '-n', 'ns', '-vv'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_fec(self, mock_run_command): + interface_name = 'Ethernet0' + interface_fec = 'rs' + db = Db() + runner = CliRunner() + + obj = {'config_db': db.cfgdb, 'namespace': ''} + result = runner.invoke(config.config.commands['interface'].commands['fec'], [interface_name, interface_fec, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-f', str(interface_fec), '-vv'], display_cmd=True) + + obj = {'config_db': db.cfgdb, 'namespace': 'ns'} + result = runner.invoke(config.config.commands['interface'].commands['fec'], [interface_name, interface_fec, '--verbose'], obj=obj) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['portconfig', '-p', str(interface_name), '-f', str(interface_fec), '-n', 'ns', '-vv'], display_cmd=True) + + def teardown(self): + print("TEARDOWN") + diff --git a/tests/conftest.py b/tests/conftest.py index 6e70f8c9aa..fe99ef47bd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -204,7 +204,7 @@ def mock_show_run_bgp(request): return mock_frr_data return "" - def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace="", vtysh_shell_cmd=constants.RVTYSH_COMMAND): + def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace=[], vtysh_shell_cmd=constants.RVTYSH_COMMAND): if vtysh_cmd == "show ip route vrf all static": return config_int_ip_common.show_ip_route_with_static_expected_output elif vtysh_cmd == "show ipv6 route vrf all static": diff --git a/tests/ecn_test.py b/tests/ecn_test.py index ef1539af17..0eac54ddbb 100644 --- a/tests/ecn_test.py +++ b/tests/ecn_test.py @@ -132,7 +132,7 @@ def executor(self, input): exit_code = result.exit_code output = result.output elif 'q_cmd' in input['cmd'] : - exit_code, output = get_result_and_return_code("ecnconfig {}".format(" ".join(input['args']))) + exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) else: exec_cmd = config.config.commands["ecn"] result = runner.invoke(exec_cmd, input['args']) diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index 7c2174b761..7e37e993fe 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -135,7 +135,7 @@ def test_single_show_fabric_counters(self): dbconnector.load_database_config dbconnector.load_namespace_config() - return_code, result = get_result_and_return_code('fabricstat') + return_code, result = get_result_and_return_code(['fabricstat']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -159,49 +159,49 @@ def setup_class(cls): os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" def test_multi_show_fabric_counters(self): - return_code, result = get_result_and_return_code('fabricstat') + return_code, result = get_result_and_return_code(['fabricstat']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_fabric_counters def test_multi_show_fabric_counters_asic(self): - return_code, result = get_result_and_return_code('fabricstat -n asic0') + return_code, result = get_result_and_return_code(['fabricstat', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_fabric_counters_asic0 def test_multi_asic_invalid_asic(self): - return_code, result = get_result_and_return_code('fabricstat -n asic99') + return_code, result = get_result_and_return_code(['fabricstat', '-n', 'asic99']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 assert result == fabric_invalid_asic_error def test_multi_show_fabric_counters_queue(self): - return_code, result = get_result_and_return_code('fabricstat -q') + return_code, result = get_result_and_return_code(['fabricstat', '-q']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_fabric_counters_queue def test_multi_show_fabric_counters_queue_asic(self): - return_code, result = get_result_and_return_code('fabricstat -q -n asic0') + return_code, result = get_result_and_return_code(['fabricstat', '-q', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_fabric_counters_queue_asic0 def test_multi_show_fabric_reachability(self): - return_code, result = get_result_and_return_code('fabricstat -r') + return_code, result = get_result_and_return_code(['fabricstat', '-r']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_fabric_reachability def test_multi_show_fabric_reachability_asic(self): - return_code, result = get_result_and_return_code('fabricstat -r -n asic0') + return_code, result = get_result_and_return_code(['fabricstat', '-r', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 diff --git a/tests/fdbshow_test.py b/tests/fdbshow_test.py index 8814a6b323..578b278a95 100755 --- a/tests/fdbshow_test.py +++ b/tests/fdbshow_test.py @@ -171,7 +171,7 @@ def test_show_mac_def_vlan(self): assert result.exit_code == 0 assert result.output == show_mac_output_with_def_vlan - return_code, result = get_result_and_return_code('fdbshow') + return_code, result = get_result_and_return_code(['fdbshow']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -216,7 +216,7 @@ def test_show_mac(self): assert result.exit_code == 0 assert result.output == show_mac_output - return_code, result = get_result_and_return_code('fdbshow') + return_code, result = get_result_and_return_code(['fdbshow']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -231,7 +231,7 @@ def test_show_mac_count(self): assert result.exit_code == 0 assert result.output == show_mac_count_output - return_code, result = get_result_and_return_code('fdbshow -c') + return_code, result = get_result_and_return_code(['fdbshow', '-c']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -246,7 +246,7 @@ def test_show_mac_port_vlan(self): assert result.exit_code == 0 assert result.output == show_mac__port_vlan_output - return_code, result = get_result_and_return_code('fdbshow -p Ethernet0 -v 2') + return_code, result = get_result_and_return_code(['fdbshow', '-p', 'Ethernet0', '-v', '2']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -261,7 +261,7 @@ def test_show_mac_address(self): assert result.exit_code == 0 assert result.output == show_mac__address_output - return_code, result = get_result_and_return_code('fdbshow -a 11:22:33:66:55:44') + return_code, result = get_result_and_return_code(['fdbshow', '-a', '11:22:33:66:55:44']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -276,7 +276,7 @@ def test_show_mac_address_case(self): assert result.exit_code == 0 assert result.output == show_mac__address_case_output - return_code, result = get_result_and_return_code('fdbshow -a 34:5f:78:9a:bc:de') + return_code, result = get_result_and_return_code(['fdbshow', '-a', '34:5f:78:9a:bc:de']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -291,7 +291,7 @@ def test_show_mac_type(self): assert result.exit_code == 0 assert result.output == show_mac__type_output - return_code, result = get_result_and_return_code('fdbshow -t Static') + return_code, result = get_result_and_return_code(['fdbshow', '-t', 'Static']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -306,7 +306,7 @@ def test_show_mac_type_case(self): assert result.exit_code == 0 assert result.output == show_mac__type_case_output - return_code, result = get_result_and_return_code('fdbshow -t DYNAMIC') + return_code, result = get_result_and_return_code(['fdbshow', '-t', 'DYNAMIC']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -321,7 +321,7 @@ def test_show_mac_port_address(self): assert result.exit_code == 0 assert result.output == show_mac__port_address_output - return_code, result = get_result_and_return_code('fdbshow -a 66:55:44:33:22:11 -p Ethernet0') + return_code, result = get_result_and_return_code(['fdbshow', '-a', '66:55:44:33:22:11', '-p', 'Ethernet0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -336,7 +336,7 @@ def test_show_mac_vlan_address(self): assert result.exit_code == 0 assert result.output == show_mac__vlan_address_output - return_code, result = get_result_and_return_code('fdbshow -a 66:55:44:33:22:11 -v 4') + return_code, result = get_result_and_return_code(['fdbshow', '-a', '66:55:44:33:22:11', '-v', '4']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -351,7 +351,7 @@ def test_show_mac_port_type(self): assert result.exit_code == 0 assert result.output == show_mac__port_type_output - return_code, result = get_result_and_return_code('fdbshow -p Ethernet4 -t Static') + return_code, result = get_result_and_return_code(['fdbshow', '-p', 'Ethernet4', '-t', 'Static']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -366,7 +366,7 @@ def test_show_mac_vlan_type(self): assert result.exit_code == 0 assert result.output == show_mac__vlan_type_output - return_code, result = get_result_and_return_code('fdbshow -v 3 -t Static') + return_code, result = get_result_and_return_code(['fdbshow', '-v', '3', '-t', 'Static']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -381,7 +381,7 @@ def test_show_mac_address_type(self): assert result.exit_code == 0 assert result.output == show_mac__address_type_output - return_code, result = get_result_and_return_code('fdbshow -a 11:22:33:66:55:44 -t Static') + return_code, result = get_result_and_return_code(['fdbshow', '-a', '11:22:33:66:55:44', '-t', 'Static']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -396,7 +396,7 @@ def test_show_mac_port_vlan_address_type(self): assert result.exit_code == 0 assert result.output == show_mac__port_vlan_address_type_output - return_code, result = get_result_and_return_code('fdbshow -v 3 -p Ethernet4 -a 11:22:33:66:55:44 -t Static') + return_code, result = get_result_and_return_code(['fdbshow', '-v', '3', '-p', 'Ethernet4', '-a', '11:22:33:66:55:44', '-t', 'Static']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -411,7 +411,7 @@ def test_show_mac_no_port(self): assert result.exit_code == 0 assert result.output == show_mac_no_results_output - return_code, result = get_result_and_return_code('fdbshow -p Ethernet8') + return_code, result = get_result_and_return_code(['fdbshow', '-p', 'Ethernet8']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -426,7 +426,7 @@ def test_show_mac_no_vlan(self): assert result.exit_code == 0 assert result.output == show_mac_no_results_output - return_code, result = get_result_and_return_code('fdbshow -v 123') + return_code, result = get_result_and_return_code(['fdbshow', '-v', '123']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -441,7 +441,7 @@ def test_show_mac_no_address(self): assert result.exit_code == 0 assert result.output == show_mac_no_results_output - return_code, result = get_result_and_return_code('fdbshow -a 12:34:56:78:9A:BC') + return_code, result = get_result_and_return_code(['fdbshow', '-a', '12:34:56:78:9A:BC']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -456,7 +456,7 @@ def test_show_mac_no_type(self): assert result.exit_code == 0 assert result.output == show_mac_no_results_output - return_code, result = get_result_and_return_code('fdbshow -t Static') + return_code, result = get_result_and_return_code(['fdbshow', '-t', 'Static']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -471,7 +471,7 @@ def test_show_mac_no_fdb(self): assert result.exit_code == 0 assert result.output == show_mac_no_results_output - return_code, result = get_result_and_return_code('fdbshow') + return_code, result = get_result_and_return_code(['fdbshow']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -486,7 +486,7 @@ def test_show_mac_no_bridge(self): assert result.exit_code == 0 assert result.output == show_mac_no_results_output - return_code, result = get_result_and_return_code('fdbshow') + return_code, result = get_result_and_return_code(['fdbshow']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -522,7 +522,7 @@ def test_show_mac_invalid_port(self): assert result.exit_code == 1 assert result.output == show_mac_invalid_port_output - return_code, result = get_result_and_return_code('fdbshow -p eth123') + return_code, result = get_result_and_return_code(['fdbshow', '-p', 'eth123']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 @@ -537,7 +537,7 @@ def test_show_mac_invalid_vlan(self): assert result.exit_code == 1 assert result.output == show_mac_invalid_vlan_output - return_code, result = get_result_and_return_code('fdbshow -v 10000') + return_code, result = get_result_and_return_code(['fdbshow', '-v', '10000']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 @@ -552,7 +552,7 @@ def test_show_mac_invalid_type(self): assert result.exit_code == 1 assert result.output == show_mac_invalid_type_output - return_code, result = get_result_and_return_code('fdbshow -t both') + return_code, result = get_result_and_return_code(['fdbshow', '-t', 'both']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 @@ -567,7 +567,7 @@ def test_show_mac_invalid_address(self): assert result.exit_code == 1 assert result.output == show_mac_invalid_address_output - return_code, result = get_result_and_return_code('fdbshow -a 12:345:67:a9:bc:d') + return_code, result = get_result_and_return_code(['fdbshow', '-a', '12:345:67:a9:bc:d']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 diff --git a/tests/flow_counter_stats_test.py b/tests/flow_counter_stats_test.py index dc5bb22dee..1ddd5031bc 100644 --- a/tests/flow_counter_stats_test.py +++ b/tests/flow_counter_stats_test.py @@ -270,7 +270,7 @@ """ def delete_cache(stat_type='trap'): - cmd = 'flow_counters_stat -t {} -d'.format(stat_type) + cmd = ['flow_counters_stat', '-t', stat_type, '-d'] get_result_and_return_code(cmd) @@ -294,7 +294,7 @@ def test_show(self): assert result.output == expect_show_output def test_show_json(self): - cmd = 'flow_counters_stat -t trap -j' + cmd = ['flow_counters_stat', '-t', 'trap', '-j'] return_code, result = get_result_and_return_code(cmd) assert return_code == 0 assert result == expect_show_output_json @@ -382,7 +382,7 @@ def test_show(self): assert result.output == expect_show_output_multi_asic def test_show_json(self): - cmd = 'flow_counters_stat -t trap -j' + cmd = ['flow_counters_stat', '-t', 'trap', '-j'] return_code, result = get_result_and_return_code(cmd) assert return_code == 0 assert result == expect_show_output_json_multi_asic @@ -764,17 +764,17 @@ def test_show_by_route(self): print(result.output) def test_show_json(self): - cmd = 'flow_counters_stat -t route -j' + cmd = ['flow_counters_stat', '-t', 'route', '-j'] return_code, result = get_result_and_return_code(cmd) assert return_code == 0 assert result == expect_show_route_stats_all_json - cmd = 'flow_counters_stat -t route -j --prefix_pattern 1.1.1.0/24' + cmd = ['flow_counters_stat', '-t', 'route', '-j', '--prefix_pattern', '1.1.1.0/24'] return_code, result = get_result_and_return_code(cmd) assert return_code == 0 assert result == expect_show_route_stats_by_pattern_v4_json - cmd = 'flow_counters_stat -t route -j --prefix 2001::1/64 --vrf Vrf_1' + cmd = ['flow_counters_stat', '-t', 'route', '-j', '--prefix', '2001::1/64', '--vrf', 'Vrf_1'] return_code, result = get_result_and_return_code(cmd) assert return_code == 0 assert result == expect_show_route_stats_by_pattern_and_vrf_v6_json @@ -916,7 +916,7 @@ def test_show_all_stats(self): assert expect_show_route_stats_all_multi_asic == result.output def test_show_json(self): - cmd = 'flow_counters_stat -t route -j' + cmd = ['flow_counters_stat', '-t', 'route', '-j'] return_code, result = get_result_and_return_code(cmd) assert return_code == 0 assert result == expect_show_route_stats_all_json_multi_asic diff --git a/tests/multi_asic_intfutil_test.py b/tests/multi_asic_intfutil_test.py index 37e5b5b0f0..1b3655e44e 100644 --- a/tests/multi_asic_intfutil_test.py +++ b/tests/multi_asic_intfutil_test.py @@ -98,70 +98,70 @@ def setUp(self): self.runner = CliRunner() def test_multi_asic_interface_status_all(self): - return_code, result = get_result_and_return_code( 'intfutil -c status -d all') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'status', '-d', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_status_all def test_multi_asic_interface_status(self): - return_code, result = get_result_and_return_code('intfutil -c status') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'status']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_status def test_multi_asic_interface_status_asic0_all(self): - return_code, result = get_result_and_return_code('intfutil -c status -n asic0 -d all') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'status', '-n', 'asic0', '-d', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_status_asic0_all def test_multi_asic_interface_status_asic0(self): - return_code, result = get_result_and_return_code('intfutil -c status -n asic0') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'status', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_status_asic0 def test_multi_asic_interface_desc(self): - return_code, result = get_result_and_return_code('intfutil -c description') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'description']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_description def test_multi_asic_interface_desc_all(self): - return_code, result = get_result_and_return_code( 'intfutil -c description -d all') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'description', '-d', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_description_all def test_multi_asic_interface_asic0(self): - return_code, result = get_result_and_return_code( 'intfutil -c description -n asic0') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'description', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_description_asic0 def test_multi_asic_interface_desc_asic0_all(self): - return_code, result = get_result_and_return_code('intfutil -c description -n asic0 -d all') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'description', '-n', 'asic0', '-d', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == intf_description_asic0_all def test_invalid_asic_name(self): - return_code, result = get_result_and_return_code('intfutil -c description -n asic99 -d all') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'description', '-n', 'asic99', '-d', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 assert result == intf_invalid_asic_error def test_invalid_asic_name(self): - return_code, result = get_result_and_return_code('intfutil -c status -n asic99') + return_code, result = get_result_and_return_code(['intfutil', '-c', 'status', '-n', 'asic99']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 diff --git a/tests/multi_asic_queue_counter_test.py b/tests/multi_asic_queue_counter_test.py index c501c68609..fe8b057b5d 100644 --- a/tests/multi_asic_queue_counter_test.py +++ b/tests/multi_asic_queue_counter_test.py @@ -132,13 +132,13 @@ def setup_class(cls): print("SETUP") def test_queue_counters(self): - return_code, result = get_result_and_return_code('queuestat -n asic0') + return_code, result = get_result_and_return_code(['queuestat', '-n', 'asic0']) assert return_code == 0 print(result) assert result == show_queue_counters def test_queue_counters_port(self): - return_code, result = get_result_and_return_code('queuestat -p Ethernet-BP4 -n asic0') + return_code, result = get_result_and_return_code(['queuestat', '-p', 'Ethernet-BP4', '-n', 'asic0']) assert return_code == 0 print(result) assert result == show_queue_counters_port diff --git a/tests/pfcstat_test.py b/tests/pfcstat_test.py index fc24dd090d..6ac0401b24 100644 --- a/tests/pfcstat_test.py +++ b/tests/pfcstat_test.py @@ -174,11 +174,11 @@ def pfc_clear(expected_output): del_cached_stats() return_code, result = get_result_and_return_code( - 'pfcstat -c' + ['pfcstat', '-c'] ) return_code, result = get_result_and_return_code( - 'pfcstat -s all' + ['pfcstat', '-s', 'all'] ) result_stat = [s for s in result.split("\n") if "Last cached" not in s] expected = expected_output.split("\n") @@ -270,21 +270,21 @@ def test_pfc_counters_all_with_clear(self): def test_pfc_counters_frontend(self): return_code, result = get_result_and_return_code( - 'pfcstat -s frontend' + ['pfcstat', '-s', 'frontend'] ) assert return_code == 0 assert result == show_pfc_counters_asic0_frontend def test_pfc_counters_asic(self): return_code, result = get_result_and_return_code( - 'pfcstat -n asic0' + ['pfcstat', '-n', 'asic0'] ) assert return_code == 0 assert result == show_pfc_counters_asic0_frontend def test_pfc_counters_asic_all(self): return_code, result = get_result_and_return_code( - 'pfcstat -n asic0 -s all' + ['pfcstat', '-n', 'asic0', '-s', 'all'] ) assert return_code == 0 assert result == show_pfc_counters_all_asic diff --git a/tests/portstat_test.py b/tests/portstat_test.py index d418a16feb..bf7a2db190 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -269,7 +269,7 @@ def test_show_intf_counters(self): assert result.exit_code == 0 assert result.output == intf_counters_before_clear - return_code, result = get_result_and_return_code('portstat') + return_code, result = get_result_and_return_code(['portstat']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -278,14 +278,14 @@ def test_show_intf_counters(self): def test_show_intf_counters_ethernet4(self): runner = CliRunner() result = runner.invoke( - show.cli.commands["interfaces"].commands["counters"], ["-i Ethernet4"]) + show.cli.commands["interfaces"].commands["counters"], ["-i", "Ethernet4"]) print(result.exit_code) print(result.output) assert result.exit_code == 0 assert result.output == intf_counters_ethernet4 return_code, result = get_result_and_return_code( - 'portstat -i Ethernet4') + ['portstat', '-i', 'Ethernet4']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -300,7 +300,7 @@ def test_show_intf_counters_all(self): assert result.exit_code == 0 assert result.output == intf_counters_all - return_code, result = get_result_and_return_code('portstat -a') + return_code, result = get_result_and_return_code(['portstat', '-a']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -315,7 +315,7 @@ def test_show_intf_fec_counters(self): assert result.exit_code == 0 assert result.output == intf_fec_counters - return_code, result = get_result_and_return_code('portstat -f') + return_code, result = get_result_and_return_code(['portstat', '-f']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -331,7 +331,7 @@ def test_show_intf_fec_counters_period(self): assert result.output == intf_fec_counters_period return_code, result = get_result_and_return_code( - 'portstat -f -p {}'.format(TEST_PERIOD)) + ['portstat', '-f', '-p', str(TEST_PERIOD)]) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -349,7 +349,7 @@ def test_show_intf_counters_period(self): assert result.output == intf_counters_period return_code, result = get_result_and_return_code( - 'portstat -p {}'.format(TEST_PERIOD)) + ['portstat', '-p', str(TEST_PERIOD)]) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -364,7 +364,7 @@ def test_show_intf_counters_detailed(self): assert result.exit_code == 0 assert result.output == intf_counters_detailed - return_code, result = get_result_and_return_code('portstat -l -i Ethernet4') + return_code, result = get_result_and_return_code(['portstat', '-l', '-i', 'Ethernet4']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -378,7 +378,7 @@ def test_clear_intf_counters(self): assert result.exit_code == 0 assert result.output.rstrip() == clear_counter - return_code, result = get_result_and_return_code('portstat -c') + return_code, result = get_result_and_return_code(['portstat', '-c']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -392,7 +392,7 @@ def test_clear_intf_counters(self): assert result.exit_code == 0 verify_after_clear(result.output, intf_counter_after_clear) - return_code, result = get_result_and_return_code('portstat') + return_code, result = get_result_and_return_code(['portstat']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -417,21 +417,21 @@ def setup_class(cls): remove_tmp_cnstat_file() def test_multi_show_intf_counters(self): - return_code, result = get_result_and_return_code('portstat') + return_code, result = get_result_and_return_code(['portstat']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_external_intf_counters def test_multi_show_intf_counters_all(self): - return_code, result = get_result_and_return_code('portstat -s all') + return_code, result = get_result_and_return_code(['portstat', '-s', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_all_intf_counters def test_multi_show_intf_counters_asic(self): - return_code, result = get_result_and_return_code('portstat -n asic0') + return_code, result = get_result_and_return_code(['portstat', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -439,21 +439,21 @@ def test_multi_show_intf_counters_asic(self): def test_multi_show_intf_counters_asic_all(self): return_code, result = get_result_and_return_code( - 'portstat -n asic0 -s all') + ['portstat', '-n', 'asic0', '-s', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_intf_counters_asic0 def test_multi_show_external_intf_counters_printall(self): - return_code, result = get_result_and_return_code('portstat -a') + return_code, result = get_result_and_return_code(['portstat', '-a']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_external_intf_counters_printall def test_multi_show_intf_counters_printall(self): - return_code, result = get_result_and_return_code('portstat -a -s all') + return_code, result = get_result_and_return_code(['portstat', '-a', '-s', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -461,7 +461,7 @@ def test_multi_show_intf_counters_printall(self): def test_multi_show_intf_counters_printall_asic(self): return_code, result = get_result_and_return_code( - 'portstat --a -n asic0') + ['portstat', '--a', '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -469,7 +469,7 @@ def test_multi_show_intf_counters_printall_asic(self): def test_multi_show_intf_counters_printall_asic_all(self): return_code, result = get_result_and_return_code( - 'portstat -a -n asic0 -s all') + ['portstat', '-a', '-n', 'asic0', '-s', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -477,7 +477,7 @@ def test_multi_show_intf_counters_printall_asic_all(self): def test_multi_show_intf_counters_period(self): return_code, result = get_result_and_return_code( - 'portstat -p {}'.format(TEST_PERIOD)) + ['portstat', '-p', str(TEST_PERIOD)]) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -485,7 +485,7 @@ def test_multi_show_intf_counters_period(self): def test_multi_show_intf_counters_period_all(self): return_code, result = get_result_and_return_code( - 'portstat -p {} -s all'.format(TEST_PERIOD)) + ['portstat', '-p', str(TEST_PERIOD), '-s', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -493,7 +493,7 @@ def test_multi_show_intf_counters_period_all(self): def test_multi_show_intf_counters_period_asic(self): return_code, result = get_result_and_return_code( - 'portstat -p {} -n asic0'.format(TEST_PERIOD)) + ['portstat', '-p', str(TEST_PERIOD), '-n', 'asic0']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 @@ -501,28 +501,28 @@ def test_multi_show_intf_counters_period_asic(self): def test_multi_show_intf_counters_period_asic_all(self): return_code, result = get_result_and_return_code( - 'portstat -p {} -n asic0 -s all'.format(TEST_PERIOD)) + ['portstat', '-p', str(TEST_PERIOD), '-n', 'asic0', '-s', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result == multi_asic_intf_counter_period_asic_all def test_multi_asic_clear_intf_counters(self): - return_code, result = get_result_and_return_code('portstat -c') + return_code, result = get_result_and_return_code(['portstat', '-c']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 assert result.rstrip() == clear_counter # check stats for all the interfaces are cleared - return_code, result = get_result_and_return_code('portstat -s all') + return_code, result = get_result_and_return_code(['portstat', '-s', 'all']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 0 verify_after_clear(result, mutli_asic_intf_counters_after_clear) def test_multi_asic_invalid_asic(self): - return_code, result = get_result_and_return_code('portstat -n asic99') + return_code, result = get_result_and_return_code(['portstat', '-n', 'asic99']) print("return_code: {}".format(return_code)) print("result = {}".format(result)) assert return_code == 1 diff --git a/tests/sfp_test.py b/tests/sfp_test.py index b6b94ebff6..4900071898 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -1,8 +1,8 @@ import sys import os from click.testing import CliRunner - from .mock_tables import dbconnector +from unittest.mock import patch, MagicMock test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -10,6 +10,7 @@ sys.path.insert(0, modules_path) import show.main as show +import show as show_module test_sfp_eeprom_with_dom_output = """\ Ethernet0: SFP EEPROM detected @@ -510,13 +511,13 @@ def test_sfp_presence(self): def test_sfp_eeprom_with_dom(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0 -d"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0", "-d"]) assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_with_dom_output def test_qsfp_dd_eeprom_with_dom(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet8 -d"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet8", "-d"]) assert result.exit_code == 0 assert result.output == test_qsfp_dd_eeprom_with_dom_output @@ -583,9 +584,10 @@ def setup_class(cls): os.environ["UTILITIES_UNIT_TESTING"] = "2" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + @patch.object(show_module.interfaces.click.Choice, 'convert', MagicMock(return_value='asic0')) def test_sfp_presence_with_ns(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"], ["Ethernet0 -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"], ['Ethernet0', '-n', 'asic0']) expected = """Port Presence --------- ---------- Ethernet0 Present @@ -593,7 +595,7 @@ def test_sfp_presence_with_ns(self): assert result.exit_code == 0 assert result.output == expected - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"], ["Ethernet200 -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["presence"], ['Ethernet200', '-n', 'asic0']) expected = """Port Presence ----------- ----------- Ethernet200 Not present @@ -607,33 +609,37 @@ def test_sfp_presence_all(self): assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_presence_all_output + @patch.object(show_module.interfaces.click.Choice, 'convert', MagicMock(return_value='asic0')) def test_sfp_eeprom_with_dom_with_ns(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0 -d -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ['Ethernet0', '-d', '-n', 'asic0']) assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_with_dom_output + @patch.object(show_module.interfaces.click.Choice, 'convert', MagicMock(return_value='asic0')) def test_sfp_eeprom_with_ns(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet0 -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ['Ethernet0', '-n', 'asic0']) assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_sfp_eeprom_output - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ["Ethernet200 -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["eeprom"], ['Ethernet200', '-n', 'asic0']) result_lines = result.output.strip('\n') expected = "Ethernet200: SFP EEPROM Not detected" assert result_lines == expected + @patch.object(show_module.interfaces.click.Choice, 'convert', MagicMock(return_value='asic0')) def test_qsfp_dd_pm_with_ns(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ["Ethernet0 -n asic0"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["pm"], ['Ethernet0', '-n', 'asic0']) result_lines = result.output.strip('\n') expected = "Ethernet0: Transceiver performance monitoring not applicable" assert result_lines == expected + @patch.object(show_module.interfaces.click.Choice, 'convert', MagicMock(return_value='asic1')) def test_cmis_sfp_info_with_ns(self): runner = CliRunner() - result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"], ["Ethernet64 -n asic1"]) + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["info"], ['Ethernet64', '-n', 'asic1']) assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_cmis_eeprom_output diff --git a/tests/show_ip_int_test.py b/tests/show_ip_int_test.py index d2abdbbf5d..16160df75d 100644 --- a/tests/show_ip_int_test.py +++ b/tests/show_ip_int_test.py @@ -101,18 +101,18 @@ def verify_output(output, expected_output): class TestShowIpInt(object): def test_show_ip_intf_v4(self): - return_code, result = get_result_and_return_code(" ipintutil") + return_code, result = get_result_and_return_code(["ipintutil"]) assert return_code == 0 verify_output(result, show_ipv4_intf_with_multple_ips) def test_show_ip_intf_v6(self): - return_code, result = get_result_and_return_code(" ipintutil -a ipv6") + return_code, result = get_result_and_return_code(['ipintutil', '-a', 'ipv6']) assert return_code == 0 verify_output(result, show_ipv6_intf_with_multiple_ips) def test_show_intf_invalid_af_option(self): - return_code, result = get_result_and_return_code(" ipintutil -a ipv5") + return_code, result = get_result_and_return_code(['ipintutil', '-a', 'ipv5']) assert return_code == 1 assert result == show_error_invalid_af @@ -121,36 +121,36 @@ def test_show_intf_invalid_af_option(self): class TestMultiAsicShowIpInt(object): def test_show_ip_intf_v4(self): - return_code, result = get_result_and_return_code("ipintutil") + return_code, result = get_result_and_return_code(["ipintutil"]) assert return_code == 0 verify_output(result, show_multi_asic_ip_intf) def test_show_ip_intf_v4_asic0(self): - return_code, result = get_result_and_return_code("ipintutil -n asic0") + return_code, result = get_result_and_return_code(['ipintutil', '-n', 'asic0']) assert return_code == 0 verify_output(result, show_multi_asic_ip_intf) def test_show_ip_intf_v4_all(self): - return_code, result = get_result_and_return_code("ipintutil -d all") + return_code, result = get_result_and_return_code(['ipintutil', '-d', 'all']) assert return_code == 0 verify_output(result, show_multi_asic_ip_intf_all) def test_show_ip_intf_v6(self): - return_code, result = get_result_and_return_code("ipintutil -a ipv6") + return_code, result = get_result_and_return_code(['ipintutil', '-a', 'ipv6']) assert return_code == 0 verify_output(result, show_multi_asic_ipv6_intf) def test_show_ip_intf_v6_asic0(self): - return_code, result = get_result_and_return_code("ipintutil -a ipv6 -n asic0") + return_code, result = get_result_and_return_code(['ipintutil', '-a', 'ipv6', '-n', 'asic0']) assert return_code == 0 verify_output(result, show_multi_asic_ipv6_intf) def test_show_ip_intf_v6_all(self): - return_code, result = get_result_and_return_code("ipintutil -a ipv6 -d all") + return_code, result = get_result_and_return_code(['ipintutil', '-a', 'ipv6', '-d', 'all']) assert return_code == 0 verify_output(result, show_multi_asic_ipv6_intf_all) def test_show_intf_invalid_af_option(self): - return_code, result = get_result_and_return_code(" ipintutil -a ipv5") + return_code, result = get_result_and_return_code(['ipintutil', '-a', 'ipv5']) assert return_code == 1 assert result == show_error_invalid_af diff --git a/tests/show_platform_test.py b/tests/show_platform_test.py index 4dcf73a978..a867f067ee 100644 --- a/tests/show_platform_test.py +++ b/tests/show_platform_test.py @@ -62,28 +62,28 @@ def test_all_psus(self): with mock.patch('utilities_common.cli.run_command') as mock_run_command: CliRunner().invoke(show.cli.commands['platform'].commands['psustatus'], []) assert mock_run_command.call_count == 1 - mock_run_command.assert_called_with('psushow -s', display_cmd=False) + mock_run_command.assert_called_with(['psushow', '-s'], display_cmd=False) def test_all_psus_json(self): with mock.patch('utilities_common.cli.run_command') as mock_run_command: CliRunner().invoke(show.cli.commands['platform'].commands['psustatus'], ['--json']) assert mock_run_command.call_count == 1 - mock_run_command.assert_called_with('psushow -s -j', display_cmd=False) + mock_run_command.assert_called_with(['psushow', '-s', '-j'], display_cmd=False) def test_single_psu(self): with mock.patch('utilities_common.cli.run_command') as mock_run_command: CliRunner().invoke(show.cli.commands['platform'].commands['psustatus'], ['--index=1']) assert mock_run_command.call_count == 1 - mock_run_command.assert_called_with('psushow -s -i 1', display_cmd=False) + mock_run_command.assert_called_with(['psushow', '-s', '-i', '1'], display_cmd=False) def test_single_psu_json(self): with mock.patch('utilities_common.cli.run_command') as mock_run_command: CliRunner().invoke(show.cli.commands['platform'].commands['psustatus'], ['--index=1', '--json']) assert mock_run_command.call_count == 1 - mock_run_command.assert_called_with('psushow -s -i 1 -j', display_cmd=False) + mock_run_command.assert_called_with(['psushow', '-s', '-i', '1', '-j'], display_cmd=False) def test_verbose(self): with mock.patch('utilities_common.cli.run_command') as mock_run_command: CliRunner().invoke(show.cli.commands['platform'].commands['psustatus'], ['--verbose']) assert mock_run_command.call_count == 1 - mock_run_command.assert_called_with('psushow -s', display_cmd=True) + mock_run_command.assert_called_with(['psushow', '-s'], display_cmd=True) diff --git a/tests/show_test.py b/tests/show_test.py index ddb59078b2..7b3e492fc9 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1,9 +1,10 @@ import os import sys +import click import pytest import show.main as show -from click.testing import CliRunner from unittest import mock +from click.testing import CliRunner from unittest.mock import call, MagicMock, patch EXPECTED_BASE_COMMAND = 'sudo ' @@ -13,6 +14,15 @@ sys.path.insert(0, test_path) sys.path.insert(0, modules_path) +expected_nat_config_output = \ +""" +Global Values +Static Entries +Pool Entries +NAT Bindings +NAT Zones +""" + class TestShowRunAllCommands(object): @classmethod @@ -147,3 +157,487 @@ def test_show_version(): runner = CliRunner() result = runner.invoke(show.cli.commands["version"]) assert "SONiC OS Version: 11" in result.output + + +class TestShowAcl(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + def test_rule(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['acl'].commands['rule'], ['SNMP_ACL', 'RULE_1', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['acl-loader', 'show', 'rule', 'SNMP_ACL', 'RULE_1'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_table(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['acl'].commands['table'], ['EVERFLOW', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['acl-loader', 'show', 'table', 'EVERFLOW'], display_cmd=True) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowChassis(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + def test_system_ports(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['chassis'].commands['system-ports'], ['Linecard1|asic0|Ethernet0', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['voqutil', '-c', 'system_ports', '-i', 'Linecard1|asic0|Ethernet0', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_system_neighbors(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['chassis'].commands['system-neighbors'], ['10.0.0.0', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['voqutil', '-c', 'system_neighbors', '-a', '10.0.0.0', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_system_lags(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['chassis'].commands['system-lags'], ['-l', 'Linecard6' , '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['voqutil', '-c', 'system_lags', '-n', 'asic0', '-l', 'Linecard6'], display_cmd=True) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowFabric(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_port(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['fabric'].commands['counters'].commands['port'], ['-n', 'asic0', '-e']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(["fabricstat", '-n', 'asic0', '-e']) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_queue(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['fabric'].commands['counters'].commands['queue'], ['-n', 'asic0']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(["fabricstat", '-q', '-n', 'asic0']) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowFlowCounters(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_flowcnt_trap_stats(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['flowcnt-trap'].commands['stats'], ['-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['flow_counters_stat', '-t', 'trap', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_flowcnt_route_stats(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['flowcnt-route'].commands['stats'], ['-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['flow_counters_stat', '-t', 'route', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_flowcnt_route_stats_pattern(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['flowcnt-route'].commands['stats'].commands['pattern'], ['2001::/64', '--vrf', 'Vrf_1', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['flow_counters_stat', '-t', 'route', '--prefix_pattern', '2001::/64', '--vrf', 'Vrf_1', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_flowcnt_route_stats_route(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['flowcnt-route'].commands['stats'].commands['route'], ['2001::/64', '--vrf', 'Vrf_1', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['flow_counters_stat', '-t', 'route', '--prefix', '2001::/64', '--vrf', 'Vrf_1', '-n', 'asic0'], display_cmd=True) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowInterfaces(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_description(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['description'], ['Ethernet0', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['intfutil', '-c', 'description', '-i', 'Ethernet0', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_status(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['status'], ['Ethernet0', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['intfutil', '-c', 'status', '-i', 'Ethernet0', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_tpid(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['tpid'], ['Ethernet0', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['intfutil', '-c', 'tpid', '-i', 'Ethernet0', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_transceiver_lpmode(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['transceiver'].commands['lpmode'], ['Ethernet0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'sfputil', 'show', 'lpmode', '-p', 'Ethernet0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_transceiver_error_status(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['transceiver'].commands['error-status'], ['Ethernet0', '-hw', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'sfputil', 'show', 'error-status', '-p', 'Ethernet0', '-hw', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_counters(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['counters'], ['-i', 'Ethernet0', '-p', '3', '-a', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['portstat', '-a', '-p', '3', '-i', 'Ethernet0', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_counters_error(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['counters'].commands['errors'], ['-p', '3', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['portstat', '-e', '-p', '3', '-s', 'all'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_counters_rates(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['counters'].commands['rates'], ['-p', '3', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['portstat', '-R', '-p', '3', '-s', 'all'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_counters_detailed(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['counters'].commands['detailed'], ['Ethernet0', '-p', '3', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['portstat', '-l', '-p', '3', '-i', 'Ethernet0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_autoneg_status(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['autoneg'].commands['status'], ['Ethernet0', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['intfutil', '-c', 'autoneg', '-i', 'Ethernet0', '-n', 'asic0'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch.object(click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_link_training_status(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['interfaces'].commands['link-training'].commands['status'], ['Ethernet0', '-n', 'asic0', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['intfutil', '-c', 'link_training', '-i', 'Ethernet0', '-n', 'asic0'], display_cmd=True) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowIp(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + def test_ip_interfaces(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ip'].commands['interfaces']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'ipintutil', '-a', 'ipv4', '-d', 'all']) + + @patch('utilities_common.cli.run_command') + def test_ipv6_interfaces(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ipv6'].commands['interfaces']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'ipintutil', '-a', 'ipv6', '-d', 'all']) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowVxlan(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + def test_counters(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['vxlan'].commands['counters'], ['-p', '3', 'tunnel1', '--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['tunnelstat', '-T', 'vxlan', '-p', '3', '-i', 'tunnel1'], display_cmd=True) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowNat(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + def test_statistics(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['statistics'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natshow', '-s'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_translations(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['translations'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natshow', '-t'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_translations_count(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['translations'].commands['count'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natshow', '-c'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_config(self, mock_run_command): + expected_calls = [ + call(['sudo', 'natconfig', '-g'], display_cmd=True), + call(['sudo', 'natconfig', '-s'], display_cmd=True), + call(['sudo', 'natconfig', '-p'], display_cmd=True), + call(['sudo', 'natconfig', '-b'], display_cmd=True), + call(['sudo', 'natconfig', '-z'], display_cmd=True), + ] + + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['config'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == expected_nat_config_output + assert mock_run_command.call_args_list == expected_calls + + @patch('utilities_common.cli.run_command') + def test_config_static(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['config'].commands['static'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natconfig', '-s'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_config_pool(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['config'].commands['pool'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natconfig', '-p'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_config_bindings(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['config'].commands['bindings'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natconfig', '-b'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_config_globalvalues(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['config'].commands['globalvalues'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natconfig', '-g'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_config_zones(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['nat'].commands['config'].commands['zones'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'natconfig', '-z'], display_cmd=True) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowProcesses(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + def test_summary(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['processes'].commands['summary'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['ps', '-eo', 'pid,ppid,cmd,%mem,%cpu'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_cpu(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['processes'].commands['cpu'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['top', '-bn', '1', '-o', '%CPU'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_memory(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['processes'].commands['memory'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['top', '-bn', '1', '-o', '%MEM'], display_cmd=True) + + def teardown(self): + print('TEAR DOWN') + + +class TestShowPlatform(object): + def setup(self): + print('SETUP') + + @patch('utilities_common.cli.run_command') + def test_syseeprom(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['platform'].commands['syseeprom'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'decode-syseeprom', '-d'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + @patch('os.popen') + def test_ssdhealth(self, mock_popen, mock_run_command): + mock_popen.return_value.readline.return_value = '/dev/sda\n' + runner = CliRunner() + result = runner.invoke(show.cli.commands['platform'].commands['ssdhealth'], ['--verbose', '--vendor']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_popen.assert_called_once_with('lsblk -o NAME,TYPE -p | grep disk') + mock_run_command.assert_called_once_with(['sudo', 'ssdutil', '-d', '/dev/sda', '-v', '-e'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_pcieinfo(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['platform'].commands['pcieinfo'], ['--verbose']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'pcieutil', 'show'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_pcieinfo_check(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['platform'].commands['pcieinfo'], ['--verbose', '-c']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['sudo', 'pcieutil', 'check'], display_cmd=True) + + @patch('utilities_common.cli.run_command') + def test_temporature(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['platform'].commands['temperature']) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with(['tempershow']) + + def teardown(self): + print('TEAR DOWN') + diff --git a/tests/sonic_bootchart_test.py b/tests/sonic_bootchart_test.py index c9d796a5d6..9a9d3dd929 100755 --- a/tests/sonic_bootchart_test.py +++ b/tests/sonic_bootchart_test.py @@ -40,13 +40,13 @@ def test_enable(self, mock_run_command): runner = CliRunner() result = runner.invoke(sonic_bootchart.cli.commands['enable'], []) assert not result.exit_code - mock_run_command.assert_called_with("systemctl enable systemd-bootchart", display_cmd=True) + mock_run_command.assert_called_with(['systemctl', 'enable', 'systemd-bootchart'], display_cmd=True) def test_disable(self, mock_run_command): runner = CliRunner() result = runner.invoke(sonic_bootchart.cli.commands['disable'], []) assert not result.exit_code - mock_run_command.assert_called_with("systemctl disable systemd-bootchart", display_cmd=True) + mock_run_command.assert_called_with(['systemctl', 'disable', 'systemd-bootchart'], display_cmd=True) def test_config_show(self, mock_run_command): def run_command_side_effect(command, **kwargs): diff --git a/tests/utils.py b/tests/utils.py index ff40865f06..f24210e36f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -8,7 +8,7 @@ def get_result_and_return_code(cmd): return_code = 0 try: output = subprocess.check_output( - cmd, stderr=subprocess.STDOUT, shell=True, text=True) + cmd, stderr=subprocess.STDOUT, text=True) except subprocess.CalledProcessError as e: return_code = e.returncode # store only the error, no need for the traceback diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 56ac18383c..13364f76e6 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -567,12 +567,12 @@ def test_config_vlan_proxy_arp_enable(self): print(result.exit_code) print(result.output) - expected_calls = [mock.call("docker container inspect -f '{{.State.Status}}' swss", return_cmd=True), - mock.call('docker exec -i swss supervisorctl status ndppd', ignore_error=True, return_cmd=True), - mock.call('docker exec -i swss cp /usr/share/sonic/templates/ndppd.conf /etc/supervisor/conf.d/'), - mock.call('docker exec -i swss supervisorctl update', return_cmd=True), - mock.call('docker exec -i swss sonic-cfggen -d -t /usr/share/sonic/templates/ndppd.conf.j2,/etc/ndppd.conf'), - mock.call('docker exec -i swss supervisorctl restart ndppd', return_cmd=True)] + expected_calls = [mock.call(['docker', 'container', 'inspect', '-f', '{{.State.Status}}', 'swss'], return_cmd=True), + mock.call(['docker', 'exec', '-i', 'swss', 'supervisorctl', 'status', 'ndppd'], ignore_error=True, return_cmd=True), + mock.call(['docker', 'exec', '-i', 'swss', 'cp', '/usr/share/sonic/templates/ndppd.conf', '/etc/supervisor/conf.d/']), + mock.call(['docker', 'exec', '-i', 'swss', 'supervisorctl', 'update'], return_cmd=True), + mock.call(['docker', 'exec', '-i', 'swss', 'sonic-cfggen', '-d', '-t', '/usr/share/sonic/templates/ndppd.conf.j2,/etc/ndppd.conf']), + mock.call(['docker', 'exec', '-i', 'swss', 'supervisorctl', 'restart', 'ndppd'], return_cmd=True)] mock_run_command.assert_has_calls(expected_calls) assert result.exit_code == 0 diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 3897d4a103..fd306fdcd0 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -177,14 +177,12 @@ def get_neighbor_dict_from_table(db, table_name): def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, vtysh_shell_cmd=constants.VTYSH_COMMAND): - bgp_instance_id = ' ' + bgp_instance_id = [] output = None if bgp_namespace is not multi_asic.DEFAULT_NAMESPACE: - bgp_instance_id = " -n {} ".format( - multi_asic.get_asic_id_from_name(bgp_namespace)) + bgp_instance_id = ['-n', str(multi_asic.get_asic_id_from_name(bgp_namespace))] - cmd = 'sudo {} {} -c "{}"'.format( - vtysh_shell_cmd, bgp_instance_id, vtysh_cmd) + cmd = ['sudo', vtysh_shell_cmd] + bgp_instance_id + ['-c', vtysh_cmd] try: output, ret = clicommon.run_command(cmd, return_cmd=True) if ret != 0: diff --git a/utilities_common/cli.py b/utilities_common/cli.py index 45b2cc5f3f..9d3cdae710 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -15,7 +15,7 @@ from sonic_py_common import multi_asic from utilities_common.db import Db from utilities_common.general import load_db_config - +from sonic_py_common.general import getstatusoutput_noshell_pipe VLAN_SUB_INTERFACE_SEPARATOR = '.' pass_db = click.make_pass_decorator(Db, ensure=True) @@ -391,12 +391,15 @@ def print_output_in_alias_mode(output, index): click.echo(output.rstrip('\n')) -def run_command_in_alias_mode(command): +def run_command_in_alias_mode(command, shell=False): """Run command and replace all instances of SONiC interface names in output with vendor-sepecific interface aliases. """ - - process = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + if not shell: + command_str = ' '.join(command) + else: + command_str = command + process = subprocess.Popen(command, text=True, shell=shell, stdout=subprocess.PIPE) while True: output = process.stdout.readline() @@ -408,7 +411,7 @@ def run_command_in_alias_mode(command): raw_output = output output = output.lstrip() - if command.startswith("portstat"): + if command_str.startswith("portstat"): """Show interface counters""" index = 0 if output.startswith("IFACE"): @@ -416,7 +419,7 @@ def run_command_in_alias_mode(command): iface_alias_converter.alias_max_length)) print_output_in_alias_mode(output, index) - elif command.startswith("intfstat"): + elif command_str.startswith("intfstat"): """Show RIF counters""" index = 0 if output.startswith("IFACE"): @@ -424,7 +427,7 @@ def run_command_in_alias_mode(command): iface_alias_converter.alias_max_length)) print_output_in_alias_mode(output, index) - elif command == "pfcstat": + elif command_str == "pfcstat": """Show pfc counters""" index = 0 if output.startswith("Port Tx"): @@ -436,12 +439,12 @@ def run_command_in_alias_mode(command): iface_alias_converter.alias_max_length)) print_output_in_alias_mode(output, index) - elif (command.startswith("sudo sfputil show eeprom")): + elif (command_str.startswith("sudo sfputil show eeprom")): """Show interface transceiver eeprom""" index = 0 print_output_in_alias_mode(raw_output, index) - elif (command.startswith("sudo sfputil show")): + elif (command_str.startswith("sudo sfputil show")): """Show interface transceiver lpmode, presence """ @@ -451,7 +454,7 @@ def run_command_in_alias_mode(command): iface_alias_converter.alias_max_length)) print_output_in_alias_mode(output, index) - elif command == "sudo lldpshow": + elif command_str == "sudo lldpshow": """Show lldp table""" index = 0 if output.startswith("LocalPort"): @@ -459,7 +462,7 @@ def run_command_in_alias_mode(command): iface_alias_converter.alias_max_length)) print_output_in_alias_mode(output, index) - elif command.startswith("queuestat"): + elif command_str.startswith("queuestat"): """Show queue counters""" index = 0 if output.startswith("Port"): @@ -467,7 +470,7 @@ def run_command_in_alias_mode(command): iface_alias_converter.alias_max_length)) print_output_in_alias_mode(output, index) - elif command == "fdbshow": + elif command_str == "fdbshow": """Show mac""" index = 3 if output.startswith("No."): @@ -478,13 +481,13 @@ def run_command_in_alias_mode(command): output = " " + output print_output_in_alias_mode(output, index) - elif command.startswith("nbrshow"): + elif command_str.startswith("nbrshow"): """Show arp""" index = 2 if "Vlan" in output: output = output.replace('Vlan', ' Vlan') print_output_in_alias_mode(output, index) - elif command.startswith("sudo ipintutil"): + elif command_str.startswith("sudo ipintutil"): """Show ip(v6) int""" index = 0 if output.startswith("Interface"): @@ -511,7 +514,7 @@ def run_command_in_alias_mode(command): sys.exit(rc) -def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False, interactive_mode=False): +def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False, interactive_mode=False, shell=False): """ Run bash command. Default behavior is to print output to stdout. If the command returns a non-zero return code, the function will exit with that return code. @@ -522,20 +525,24 @@ def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False return_cmd: Boolean; If true, the function will return the output, ignoring any non-zero return code interactive_mode: Boolean; If true, it will treat the process as a long-running process which may generate multiple lines of output over time + shell: Boolean; If true, the command will be run in a shell """ - + if not shell: + command_str = ' '.join(command) + else: + command_str = command if display_cmd == True: - click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green')) + click.echo(click.style("Running command: ", fg='cyan') + click.style(command_str, fg='green')) # No conversion needed for intfutil commands as it already displays # both SONiC interface name and alias name for all interfaces. # IP route table cannot be handled in function run_command_in_alias_mode since it is in JSON format # with a list for next hops - if get_interface_naming_mode() == "alias" and not command.startswith("intfutil") and not re.search("show ip|ipv6 route", command): - run_command_in_alias_mode(command) + if get_interface_naming_mode() == "alias" and not command_str.startswith("intfutil") and not re.search("show ip|ipv6 route", command_str): + run_command_in_alias_mode(command, shell=shell) sys.exit(0) - proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(command, shell=shell, text=True, stdout=subprocess.PIPE) if return_cmd: output = proc.communicate()[0] From a66f41c45357388947500057c5a2d632ceb98918 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 31 May 2023 14:19:57 -0400 Subject: [PATCH 21/35] [show] replace shell=True, replace xml by lxml, replace exit by sys.exit (#2666) #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. `sys.exit` is better than `exit`, considered good to use in production code. Ref: https://stackoverflow.com/questions/6501121/difference-between-exit-and-sys-exit-in-python https://stackoverflow.com/questions/19747371/python-exit-commands-why-so-many-and-when-should-each-be-used #### How I did it `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) Replace `exit()` by `sys.exit()` #### How to verify it Pass UT Manual test Signed-off-by: Mai Bui --- show/bgp_quagga_v4.py | 17 +- show/bgp_quagga_v6.py | 11 +- show/main.py | 271 ++++++++++++------------- show/platform.py | 4 +- tests/fdbshow_test.py | 2 +- tests/intfstat_test.py | 6 +- tests/pfcstat_test.py | 4 +- tests/queue_counter_test.py | 6 +- tests/show_test.py | 384 +++++++++++++++++++++++++++++++++--- tests/techsupport_test.py | 14 +- tests/tunnelstat_test.py | 4 +- 11 files changed, 534 insertions(+), 189 deletions(-) diff --git a/show/bgp_quagga_v4.py b/show/bgp_quagga_v4.py index e384cd9d17..cd42547499 100644 --- a/show/bgp_quagga_v4.py +++ b/show/bgp_quagga_v4.py @@ -1,7 +1,8 @@ import click -from show.main import AliasedGroup, ip, run_command +from show.main import ip, run_command from utilities_common.bgp_util import get_bgp_summary_extended import utilities_common.constants as constants +import utilities_common.cli as clicommon ############################################################################### @@ -11,7 +12,7 @@ ############################################################################### -@ip.group(cls=AliasedGroup) +@ip.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" pass @@ -22,10 +23,10 @@ def bgp(): def summary(): """Show summarized information of IPv4 BGP state""" try: - device_output = run_command('sudo {} -c "show ip bgp summary"'.format(constants.RVTYSH_COMMAND), return_cmd=True) + device_output = run_command(['sudo', constants.RVTYSH_COMMAND, '-c', "show ip bgp summary"], return_cmd=True) get_bgp_summary_extended(device_output) except Exception: - run_command('sudo {} -c "show ip bgp summary"'.format(constants.RVTYSH_COMMAND)) + run_command(['sudo', constants.RVTYSH_COMMAND, '-c', "show ip bgp summary"]) # 'neighbors' subcommand ("show ip bgp neighbors") @@ -35,15 +36,13 @@ def summary(): def neighbors(ipaddress, info_type): """Show IP (IPv4) BGP neighbors""" - command = 'sudo {} -c "show ip bgp neighbor'.format(constants.RVTYSH_COMMAND) + command = ['sudo', constants.RVTYSH_COMMAND, '-c', "show ip bgp neighbor"] if ipaddress is not None: - command += ' {}'.format(ipaddress) + command[-1] += ' {}'.format(ipaddress) # info_type is only valid if ipaddress is specified if info_type is not None: - command += ' {}'.format(info_type) - - command += '"' + command[-1] += ' {}'.format(info_type) run_command(command) diff --git a/show/bgp_quagga_v6.py b/show/bgp_quagga_v6.py index 003f4c94cf..3581a84c92 100644 --- a/show/bgp_quagga_v6.py +++ b/show/bgp_quagga_v6.py @@ -1,7 +1,8 @@ import click -from show.main import AliasedGroup, ipv6, run_command +from show.main import ipv6, run_command from utilities_common.bgp_util import get_bgp_summary_extended import utilities_common.constants as constants +import utilities_common.cli as clicommon ############################################################################### @@ -11,7 +12,7 @@ ############################################################################### -@ipv6.group(cls=AliasedGroup) +@ipv6.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv6 BGP (Border Gateway Protocol) information""" pass @@ -22,10 +23,10 @@ def bgp(): def summary(): """Show summarized information of IPv6 BGP state""" try: - device_output = run_command('sudo {} -c "show ipv6 bgp summary"'.format(constants.RVTYSH_COMMAND), return_cmd=True) + device_output = run_command(['sudo', constants.RVTYSH_COMMAND, '-c', "show ipv6 bgp summary"], return_cmd=True) get_bgp_summary_extended(device_output) except Exception: - run_command('sudo {} -c "show ipv6 bgp summary"'.format(constants.RVTYSH_COMMAND)) + run_command(['sudo', constants.RVTYSH_COMMAND, '-c', "show ipv6 bgp summary"]) # 'neighbors' subcommand ("show ipv6 bgp neighbors") @@ -34,5 +35,5 @@ def summary(): @click.argument('info_type', type=click.Choice(['routes', 'advertised-routes', 'received-routes']), required=True) def neighbors(ipaddress, info_type): """Show IPv6 BGP neighbors""" - command = 'sudo {} -c "show ipv6 bgp neighbor {} {}"'.format(constants.RVTYSH_COMMAND, ipaddress, info_type) + command = ['sudo', constants.RVTYSH_COMMAND, '-c', "show ipv6 bgp neighbor {} {}".format(ipaddress, info_type)] run_command(command) diff --git a/show/main.py b/show/main.py index 2d21e1b3aa..d79777ebeb 100755 --- a/show/main.py +++ b/show/main.py @@ -20,6 +20,7 @@ import utilities_common.constants as constants from utilities_common.general import load_db_config from json.decoder import JSONDecodeError +from sonic_py_common.general import getstatusoutput_noshell_pipe # mock the redis for unit test purposes # try: @@ -104,17 +105,22 @@ def readJsonFile(fileName): raise click.Abort() return result -def run_command(command, display_cmd=False, return_cmd=False): +def run_command(command, display_cmd=False, return_cmd=False, shell=False): + if not shell: + command_str = ' '.join(command) + else: + command_str = command + if display_cmd: - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) + click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) # No conversion needed for intfutil commands as it already displays # both SONiC interface name and alias name for all interfaces. - if clicommon.get_interface_naming_mode() == "alias" and not command.startswith("intfutil"): - clicommon.run_command_in_alias_mode(command) + if clicommon.get_interface_naming_mode() == "alias" and not command_str.startswith("intfutil"): + clicommon.run_command_in_alias_mode(command, shell=shell) raise sys.exit(0) - proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(command, shell=shell, text=True, stdout=subprocess.PIPE) while True: if return_cmd: @@ -377,10 +383,10 @@ def event_counters(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def arp(ipaddress, iface, verbose): """Show IP ARP table""" - cmd = "nbrshow -4" + cmd = ['nbrshow', '-4'] if ipaddress is not None: - cmd += " -ip {}".format(ipaddress) + cmd += ['-ip', str(ipaddress)] if iface is not None: if clicommon.get_interface_naming_mode() == "alias": @@ -388,7 +394,7 @@ def arp(ipaddress, iface, verbose): (iface.startswith("eth"))): iface = iface_alias_converter.alias_to_name(iface) - cmd += " -if {}".format(iface) + cmd += ['-if', str(iface)] run_command(cmd, display_cmd=verbose) @@ -402,22 +408,22 @@ def arp(ipaddress, iface, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def ndp(ip6address, iface, verbose): """Show IPv6 Neighbour table""" - cmd = "nbrshow -6" + cmd = ['nbrshow', '-6'] if ip6address is not None: - cmd += " -ip {}".format(ip6address) + cmd += ['-ip', str(ip6address)] if iface is not None: - cmd += " -if {}".format(iface) + cmd += ['-if', str(iface)] run_command(cmd, display_cmd=verbose) def is_mgmt_vrf_enabled(ctx): """Check if management VRF is enabled""" if ctx.invoked_subcommand is None: - cmd = 'sonic-cfggen -d --var-json "MGMT_VRF_CONFIG"' + cmd = ['sonic-cfggen', '-d', '--var-json', "MGMT_VRF_CONFIG"] - p = subprocess.Popen(cmd, shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) try : mvrf_dict = json.loads(p.stdout.read()) except ValueError: @@ -487,13 +493,13 @@ def mgmt_vrf(ctx,routes): if routes is None: click.echo("\nManagementVRF : Enabled") click.echo("\nManagement VRF interfaces in Linux:") - cmd = "ip -d link show mgmt" + cmd = ['ip', '-d', 'link', 'show', 'mgmt'] run_command(cmd) - cmd = "ip link show vrf mgmt" + cmd = ['ip', 'link', 'show', 'vrf', 'mgmt'] run_command(cmd) else: click.echo("\nRoutes in Management VRF Routing Table:") - cmd = "ip route show table 5000" + cmd = ['ip', 'route', 'show', 'table', '5000'] run_command(cmd) # @@ -577,7 +583,7 @@ def subinterfaces(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def status(subinterfacename, verbose): """Show sub port interface status information""" - cmd = "intfutil -c status" + cmd = ['intfutil', '-c', 'status'] if subinterfacename is not None: sub_intf_sep_idx = subinterfacename.find(VLAN_SUB_INTERFACE_SEPARATOR) @@ -588,9 +594,9 @@ def status(subinterfacename, verbose): if clicommon.get_interface_naming_mode() == "alias": subinterfacename = iface_alias_converter.alias_to_name(subinterfacename) - cmd += " -i {}".format(subinterfacename) + cmd += ['-i', str(subinterfacename)] else: - cmd += " -i subport" + cmd += ['-i', 'subport'] run_command(cmd, display_cmd=verbose) # @@ -609,9 +615,9 @@ def pfc(): def counters(namespace, display, verbose): """Show pfc counters""" - cmd = "pfcstat -s {}".format(display) + cmd = ['pfcstat', '-s', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] run_command(cmd, display_cmd=verbose) @@ -619,12 +625,12 @@ def counters(namespace, display, verbose): @click.argument('interface', type=click.STRING, required=False) def priority(interface): """Show pfc priority""" - cmd = 'pfc show priority' + cmd = ['pfc', 'show', 'priority'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": interface = iface_alias_converter.alias_to_name(interface) if interface is not None: - cmd += ' {0}'.format(interface) + cmd += [str(interface)] run_command(cmd) @@ -632,12 +638,12 @@ def priority(interface): @click.argument('interface', type=click.STRING, required=False) def asymmetric(interface): """Show asymmetric pfc""" - cmd = 'pfc show asymmetric' + cmd = ['pfc', 'show', 'asymmetric'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": interface = iface_alias_converter.alias_to_name(interface) if interface is not None: - cmd += ' {0}'.format(interface) + cmd += [str(interface)] run_command(cmd) @@ -653,9 +659,9 @@ def pfcwd(): def config(namespace, display, verbose): """Show pfc watchdog config""" - cmd = "pfcwd show config -d {}".format(display) + cmd = ['pfcwd', 'show', 'config', '-d', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] run_command(cmd, display_cmd=verbose) @@ -665,9 +671,9 @@ def config(namespace, display, verbose): def stats(namespace, display, verbose): """Show pfc watchdog stats""" - cmd = "pfcwd show stats -d {}".format(display) + cmd = ['pfcwd', 'show', 'stats', '-d', str(display)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] run_command(cmd, display_cmd=verbose) @@ -688,7 +694,7 @@ def telemetry(): @telemetry.command('interval') def show_tm_interval(): """Show telemetry interval""" - command = 'watermarkcfg --show-interval' + command = ['watermarkcfg', '--show-interval'] run_command(command) @@ -711,23 +717,23 @@ def queue(): def counters(interfacename, namespace, display, verbose, json, voq): """Show queue counters""" - cmd = "queuestat" + cmd = ["queuestat"] if interfacename is not None: if clicommon.get_interface_naming_mode() == "alias": interfacename = iface_alias_converter.alias_to_name(interfacename) if interfacename is not None: - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] if namespace is not None: - cmd += " -n {}".format(namespace) + cmd += ['-n', str(namespace)] if json: - cmd += " -j" + cmd += ["-j"] if voq: - cmd += " -V" + cmd += ["-V"] run_command(cmd, display_cmd=verbose) @@ -744,21 +750,21 @@ def watermark(): @watermark.command('unicast') def wm_q_uni(): """Show user WM for unicast queues""" - command = 'watermarkstat -t q_shared_uni' + command = ['watermarkstat', '-t', 'q_shared_uni'] run_command(command) # 'multicast' subcommand ("show queue watermarks multicast") @watermark.command('multicast') def wm_q_multi(): """Show user WM for multicast queues""" - command = 'watermarkstat -t q_shared_multi' + command = ['watermarkstat', '-t', 'q_shared_multi'] run_command(command) # 'all' subcommand ("show queue watermarks all") @watermark.command('all') def wm_q_all(): """Show user WM for all queues""" - command = 'watermarkstat -t q_shared_all' + command = ['watermarkstat', '-t', 'q_shared_all'] run_command(command) # @@ -774,21 +780,21 @@ def persistent_watermark(): @persistent_watermark.command('unicast') def pwm_q_uni(): """Show persistent WM for unicast queues""" - command = 'watermarkstat -p -t q_shared_uni' + command = ['watermarkstat', '-p', '-t', 'q_shared_uni'] run_command(command) # 'multicast' subcommand ("show queue persistent-watermarks multicast") @persistent_watermark.command('multicast') def pwm_q_multi(): """Show persistent WM for multicast queues""" - command = 'watermarkstat -p -t q_shared_multi' + command = ['watermarkstat', '-p', '-t', 'q_shared_multi'] run_command(command) # 'all' subcommand ("show queue persistent-watermarks all") @persistent_watermark.command('all') def pwm_q_all(): """Show persistent WM for all queues""" - command = 'watermarkstat -p -t q_shared_all' + command = ['watermarkstat', '-p', '-t', 'q_shared_all'] run_command(command) # @@ -807,13 +813,13 @@ def watermark(): @watermark.command('headroom') def wm_pg_headroom(): """Show user headroom WM for pg""" - command = 'watermarkstat -t pg_headroom' + command = ['watermarkstat', '-t', 'pg_headroom'] run_command(command) @watermark.command('shared') def wm_pg_shared(): """Show user shared WM for pg""" - command = 'watermarkstat -t pg_shared' + command = ['watermarkstat', '-t', 'pg_shared'] run_command(command) @priority_group.group() @@ -824,7 +830,7 @@ def drop(): @drop.command('counters') def pg_drop_counters(): """Show dropped packets for priority-group""" - command = 'pg-drop -c show' + command = ['pg-drop', '-c', 'show'] run_command(command) @priority_group.group(name='persistent-watermark') @@ -835,13 +841,13 @@ def persistent_watermark(): @persistent_watermark.command('headroom') def pwm_pg_headroom(): """Show persistent headroom WM for pg""" - command = 'watermarkstat -p -t pg_headroom' + command = ['watermarkstat', '-p', '-t', 'pg_headroom'] run_command(command) @persistent_watermark.command('shared') def pwm_pg_shared(): """Show persistent shared WM for pg""" - command = 'watermarkstat -p -t pg_shared' + command = ['watermarkstat', '-p', '-t', 'pg_shared'] run_command(command) @@ -856,13 +862,13 @@ def buffer_pool(): @buffer_pool.command('watermark') def wm_buffer_pool(): """Show user WM for buffer pools""" - command = 'watermarkstat -t buffer_pool' + command = ['watermarkstat', '-t' ,'buffer_pool'] run_command(command) @buffer_pool.command('persistent-watermark') def pwm_buffer_pool(): """Show persistent WM for buffer pools""" - command = 'watermarkstat -p -t buffer_pool' + command = ['watermarkstat', '-p', '-t', 'buffer_pool'] run_command(command) @@ -877,13 +883,13 @@ def headroom_pool(): @headroom_pool.command('watermark') def wm_headroom_pool(): """Show user WM for headroom pool""" - command = 'watermarkstat -t headroom_pool' + command = ['watermarkstat', '-t', 'headroom_pool'] run_command(command) @headroom_pool.command('persistent-watermark') def pwm_headroom_pool(): """Show persistent WM for headroom pool""" - command = 'watermarkstat -p -t headroom_pool' + command = ['watermarkstat', '-p', '-t', 'headroom_pool'] run_command(command) @@ -905,22 +911,22 @@ def mac(ctx, vlan, port, address, type, count, verbose): if ctx.invoked_subcommand is not None: return - cmd = "fdbshow" + cmd = ["fdbshow"] if vlan is not None: - cmd += " -v {}".format(vlan) + cmd += ['-v', str(vlan)] if port is not None: - cmd += " -p {}".format(port) + cmd += ['-p', str(port)] if address is not None: - cmd += " -a {}".format(address) + cmd += ['-a', str(address)] if type is not None: - cmd += " -t {}".format(type) + cmd += ['-t', str(type)] if count: - cmd += " -c" + cmd += ["-c"] run_command(cmd, display_cmd=verbose) @@ -951,10 +957,9 @@ def aging_time(ctx): @click.option('--verbose', is_flag=True, help="Enable verbose output") def route_map(route_map_name, verbose): """show route-map""" - cmd = 'sudo {} -c "show route-map'.format(constants.RVTYSH_COMMAND) + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show route-map'] if route_map_name is not None: - cmd += ' {}'.format(route_map_name) - cmd += '"' + cmd[-1] += ' {}'.format(route_map_name) run_command(cmd, display_cmd=verbose) # @@ -1042,10 +1047,9 @@ def route(args, namespace, display, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def prefix_list(prefix_list_name, verbose): """show ip prefix-list""" - cmd = 'sudo {} -c "show ip prefix-list'.format(constants.RVTYSH_COMMAND) + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show ip prefix-list'] if prefix_list_name is not None: - cmd += ' {}'.format(prefix_list_name) - cmd += '"' + cmd[-1] += ' {}'.format(prefix_list_name) run_command(cmd, display_cmd=verbose) @@ -1054,7 +1058,7 @@ def prefix_list(prefix_list_name, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def protocol(verbose): """Show IPv4 protocol information""" - cmd = 'sudo {} -c "show ip protocol"'.format(constants.RVTYSH_COMMAND) + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', "show ip protocol"] run_command(cmd, display_cmd=verbose) # @@ -1065,9 +1069,9 @@ def protocol(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def fib(ipaddress, verbose): """Show IP FIB table""" - cmd = "fibshow -4" + cmd = ['fibshow', '-4'] if ipaddress is not None: - cmd += " -ip {}".format(ipaddress) + cmd += ['-ip', str(ipaddress)] run_command(cmd, display_cmd=verbose) @@ -1090,10 +1094,9 @@ def ipv6(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def prefix_list(prefix_list_name, verbose): """show ip prefix-list""" - cmd = 'sudo {} -c "show ipv6 prefix-list'.format(constants.RVTYSH_COMMAND) + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show ipv6 prefix-list'] if prefix_list_name is not None: - cmd += ' {}'.format(prefix_list_name) - cmd += '"' + cmd[-1] += ' {}'.format(prefix_list_name) run_command(cmd, display_cmd=verbose) @@ -1138,7 +1141,7 @@ def route(args, namespace, display, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def protocol(verbose): """Show IPv6 protocol information""" - cmd = 'sudo {} -c "show ipv6 protocol"'.format(constants.RVTYSH_COMMAND) + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', "show ipv6 protocol"] run_command(cmd, display_cmd=verbose) # @@ -1207,9 +1210,9 @@ def link_local_mode(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def fib(ipaddress, verbose): """Show IP FIB table""" - cmd = "fibshow -6" + cmd = ['fibshow', '-6'] if ipaddress is not None: - cmd += " -ip {}".format(ipaddress) + cmd += ['-ip', str(ipaddress)] run_command(cmd, display_cmd=verbose) # @@ -1227,13 +1230,13 @@ def lldp(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def neighbors(interfacename, verbose): """Show LLDP neighbors""" - cmd = "sudo lldpshow -d" + cmd = ['sudo', 'lldpshow', '-d'] if interfacename is not None: if clicommon.get_interface_naming_mode() == "alias": interfacename = iface_alias_converter.alias_to_name(interfacename) - cmd += " -p {}".format(interfacename) + cmd += ['-p', str(interfacename)] run_command(cmd, display_cmd=verbose) @@ -1242,7 +1245,7 @@ def neighbors(interfacename, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def table(verbose): """Show LLDP neighbors in tabular format""" - cmd = "sudo lldpshow" + cmd = ['sudo', 'lldpshow'] run_command(cmd, display_cmd=verbose) @@ -1252,7 +1255,7 @@ def table(verbose): @cli.command() @click.argument('process', required=False) -@click.option('-l', '--lines') +@click.option('-l', '--lines', type=int) @click.option('-f', '--follow', is_flag=True) @click.option('--verbose', is_flag=True, help="Enable verbose output") def logging(process, lines, follow, verbose): @@ -1262,7 +1265,7 @@ def logging(process, lines, follow, verbose): else: log_path = "/var/log" if follow: - cmd = "sudo tail -F {}/syslog".format(log_path) + cmd = ['sudo', 'tail', '-F', '{}/syslog'.format(log_path)] run_command(cmd, display_cmd=verbose) else: if os.path.isfile("{}/syslog.1".format(log_path)): @@ -1276,8 +1279,7 @@ def logging(process, lines, follow, verbose): if lines is not None: cmd += " | tail -{}".format(lines) - run_command(cmd, display_cmd=verbose) - + run_command(cmd, display_cmd=verbose, shell=True) # # 'version' command ("show version") @@ -1291,8 +1293,8 @@ def version(verbose): platform_info = device_info.get_platform_info() chassis_info = platform.get_chassis_info() - sys_uptime_cmd = "uptime" - sys_uptime = subprocess.Popen(sys_uptime_cmd, shell=True, text=True, stdout=subprocess.PIPE) + sys_uptime_cmd = ["uptime"] + sys_uptime = subprocess.Popen(sys_uptime_cmd, text=True, stdout=subprocess.PIPE) sys_date = datetime.now() @@ -1313,8 +1315,8 @@ def version(verbose): click.echo("Uptime: {}".format(sys_uptime.stdout.read().strip())) click.echo("Date: {}".format(sys_date.strftime("%a %d %b %Y %X"))) click.echo("\nDocker images:") - cmd = 'sudo docker images --format "table {{.Repository}}\\t{{.Tag}}\\t{{.ID}}\\t{{.Size}}"' - p = subprocess.Popen(cmd, shell=True, text=True, stdout=subprocess.PIPE) + cmd = ['sudo', 'docker', 'images', '--format', "table {{.Repository}}\\t{{.Tag}}\\t{{.ID}}\\t{{.Size}}"] + p = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) click.echo(p.stdout.read()) # @@ -1325,7 +1327,7 @@ def version(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def environment(verbose): """Show environmentals (voltages, fans, temps)""" - cmd = "sudo sensors" + cmd = ['sudo', 'sensors'] run_command(cmd, display_cmd=verbose) @@ -1337,7 +1339,7 @@ def environment(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def users(verbose): """Show users""" - cmd = "who" + cmd = ["who"] run_command(cmd, display_cmd=verbose) @@ -1356,29 +1358,29 @@ def users(verbose): @click.option('--redirect-stderr', '-r', is_flag=True, help="Redirect an intermediate errors to STDERR") def techsupport(since, global_timeout, cmd_timeout, verbose, allow_process_stop, silent, debug_dump, redirect_stderr): """Gather information for troubleshooting""" - cmd = "sudo" + cmd = ["sudo"] if global_timeout: - cmd += " timeout --kill-after={}s -s SIGTERM --foreground {}m".format(COMMAND_TIMEOUT, global_timeout) + cmd += ['timeout', '--kill-after={}s'.format(COMMAND_TIMEOUT), '-s', 'SIGTERM', '--foreground', '{}m'.format(global_timeout)] if silent: - cmd += " generate_dump" + cmd += ["generate_dump"] click.echo("Techsupport is running with silent option. This command might take a long time.") else: - cmd += " generate_dump -v" + cmd += ['generate_dump', '-v'] if allow_process_stop: - cmd += " -a" + cmd += ["-a"] if since: - cmd += " -s '{}'".format(since) + cmd += ['-s', str(since)] if debug_dump: - cmd += " -d" + cmd += ["-d"] - cmd += " -t {}".format(cmd_timeout) + cmd += ['-t', str(cmd_timeout)] if redirect_stderr: - cmd += " -r" + cmd += ["-r"] run_command(cmd, display_cmd=verbose) @@ -1423,7 +1425,7 @@ def all(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def acl(verbose): """Show acl running configuration""" - cmd = "sonic-cfggen -d --var-json ACL_RULE" + cmd = ['sonic-cfggen', '-d', '--var-json', 'ACL_RULE'] run_command(cmd, display_cmd=verbose) @@ -1433,10 +1435,10 @@ def acl(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def ports(portname, verbose): """Show ports running configuration""" - cmd = "sonic-cfggen -d --var-json PORT" + cmd = ['sonic-cfggen', '-d', '--var-json', 'PORT'] if portname is not None: - cmd += " {0} {1}".format("--key", portname) + cmd += ["--key", str(portname)] run_command(cmd, display_cmd=verbose) @@ -1486,10 +1488,10 @@ def bgp(namespace, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def interfaces(interfacename, verbose): """Show interfaces running configuration""" - cmd = "sonic-cfggen -d --var-json INTERFACE" + cmd = ['sonic-cfggen', '-d', '--var-json', 'INTERFACE'] if interfacename is not None: - cmd += " {0} {1}".format("--key", interfacename) + cmd += ["--key", str(interfacename)] run_command(cmd, display_cmd=verbose) @@ -1715,16 +1717,20 @@ def startupconfiguration(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def bgp(verbose): """Show BGP startup configuration""" - cmd = "sudo docker ps | grep bgp | awk '{print$2}' | cut -d'-' -f3 | cut -d':' -f1" - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) - result = proc.stdout.read().rstrip() + cmd0 = ['sudo', 'docker', 'ps'] + cmd1 = ['grep', 'bgp'] + cmd2 = ['awk', '{print$2}'] + cmd3 = ['cut', '-d-', '-f3'] + cmd4 = ['cut', '-d:', "-f1"] + _, stdout = getstatusoutput_noshell_pipe(cmd0, cmd1, cmd2, cmd3, cmd4) + result = stdout.rstrip() click.echo("Routing-Stack is: {}".format(result)) if result == "quagga": - run_command('sudo docker exec bgp cat /etc/quagga/bgpd.conf', display_cmd=verbose) + run_command(['sudo', 'docker', 'exec', 'bgp', 'cat', '/etc/quagga/bgpd.conf'], display_cmd=verbose) elif result == "frr": - run_command('sudo docker exec bgp cat /etc/frr/bgpd.conf', display_cmd=verbose) + run_command(['sudo', 'docker', 'exec', 'bgp', 'cat', '/etc/frr/bgpd.conf'], display_cmd=verbose) elif result == "gobgp": - run_command('sudo docker exec bgp cat /etc/gpbgp/bgpd.conf', display_cmd=verbose) + run_command(['sudo', 'docker', 'exec', 'bgp', 'cat', '/etc/gpbgp/bgpd.conf'], display_cmd=verbose) else: click.echo("Unidentified routing-stack") @@ -1738,18 +1744,18 @@ def bgp(verbose): def ntp(ctx, verbose): """Show NTP information""" from pkg_resources import parse_version - ntpstat_cmd = "ntpstat" - ntpcmd = "ntpq -p -n" + ntpstat_cmd = ["ntpstat"] + ntpcmd = ["ntpq", "-p", "-n"] if is_mgmt_vrf_enabled(ctx) is True: #ManagementVRF is enabled. Call ntpq using "ip vrf exec" or cgexec based on linux version os_info = os.uname() release = os_info[2].split('-') if parse_version(release[0]) > parse_version("4.9.0"): - ntpstat_cmd = "sudo ip vrf exec mgmt ntpstat" - ntpcmd = "sudo ip vrf exec mgmt ntpq -p -n" + ntpstat_cmd = ['sudo', 'ip', 'vrf', 'exec', 'mgmt', 'ntpstat'] + ntpcmd = ['sudo', 'ip', 'vrf', 'exec', 'mgmt', 'ntpq', '-p', '-n'] else: - ntpstat_cmd = "sudo cgexec -g l3mdev:mgmt ntpstat" - ntpcmd = "sudo cgexec -g l3mdev:mgmt ntpq -p -n" + ntpstat_cmd = ['sudo', 'cgexec', '-g', 'l3mdev:mgmt', 'ntpstat'] + ntpcmd = ['sudo', 'cgexec', '-g', 'l3mdev:mgmt', 'ntpq', '-p', '-n'] run_command(ntpstat_cmd, display_cmd=verbose) run_command(ntpcmd, display_cmd=verbose) @@ -1762,37 +1768,38 @@ def ntp(ctx, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def uptime(verbose): """Show system uptime""" - cmd = "uptime -p" + cmd = ['uptime', '-p'] run_command(cmd, display_cmd=verbose) @cli.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") def clock(verbose): """Show date and time""" - cmd ="date" + cmd = ["date"] run_command(cmd, display_cmd=verbose) @cli.command('system-memory') @click.option('--verbose', is_flag=True, help="Enable verbose output") def system_memory(verbose): """Show memory information""" - cmd = "free -m" + cmd = ['free', '-m'] run_command(cmd, display_cmd=verbose) @cli.command('services') def services(): """Show all daemon services""" - cmd = "sudo docker ps --format '{{.Names}}'" - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) + cmd = ["sudo", "docker", "ps", "--format", '{{.Names}}'] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) while True: line = proc.stdout.readline() if line != '': print(line.rstrip()+'\t'+"docker") print("---------------------------") - cmd = "sudo docker exec {} ps aux | sed '$d'".format(line.rstrip()) - proc1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) - print(proc1.stdout.read()) + cmd0 = ["sudo", "docker", "exec", line.rstrip(), "ps", "aux"] + cmd1 = ["sed", '$d'] + _, stdout = getstatusoutput_noshell_pipe(cmd0, cmd1) + print(stdout) else: break @@ -1918,10 +1925,10 @@ def radius(db): @click.option('--verbose', is_flag=True, help="Enable verbose output") def mirror_session(session_name, verbose): """Show existing everflow sessions""" - cmd = "acl-loader show session" + cmd = ['acl-loader', 'show', 'session'] if session_name is not None: - cmd += " {}".format(session_name) + cmd += [str(session_name)] run_command(cmd, display_cmd=verbose) @@ -1934,10 +1941,10 @@ def mirror_session(session_name, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def policer(policer_name, verbose): """Show existing policers""" - cmd = "acl-loader show policer" + cmd = ['acl-loader', 'show', 'policer'] if policer_name is not None: - cmd += " {}".format(policer_name) + cmd += [str(policer_name)] run_command(cmd, display_cmd=verbose) @@ -1949,7 +1956,7 @@ def policer(policer_name, verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def ecn(verbose): """Show ECN configuration""" - cmd = "ecnconfig -l" + cmd = ['ecnconfig', '-l'] run_command(cmd, display_cmd=verbose) @@ -1959,8 +1966,8 @@ def ecn(verbose): @cli.command('boot') def boot(): """Show boot configuration""" - cmd = "sudo sonic-installer list" - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) + cmd = ["sudo", "sonic-installer", "list"] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) click.echo(proc.stdout.read()) @@ -1970,7 +1977,7 @@ def boot(): @cli.command('mmu') def mmu(): """Show mmu configuration""" - cmd = "mmuconfig -l" + cmd = ['mmuconfig', '-l'] run_command(cmd) # @@ -1987,7 +1994,7 @@ def buffer(): @buffer.command() def configuration(): """show buffer configuration""" - cmd = "mmuconfig -l" + cmd = ['mmuconfig', '-l'] run_command(cmd) # @@ -1996,7 +2003,7 @@ def configuration(): @buffer.command() def information(): """show buffer information""" - cmd = "buffershow -l" + cmd = ['buffershow', '-l'] run_command(cmd) @@ -2008,7 +2015,7 @@ def information(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def line(brief, verbose): """Show all console lines and their info include available ttyUSB devices unless specified brief mode""" - cmd = "consutil show" + (" -b" if brief else "") + cmd = ['consutil', 'show'] + (["-b"] if brief else []) run_command(cmd, display_cmd=verbose) return @@ -2022,11 +2029,11 @@ def line(brief, verbose): def ztp(status, verbose): """Show Zero Touch Provisioning status""" if os.path.isfile('/usr/bin/ztp') is False: - exit("ZTP feature unavailable in this image version") + sys.exit("ZTP feature unavailable in this image version") - cmd = "ztp status" + cmd = ['ztp', 'status'] if verbose: - cmd = cmd + " --verbose" + cmd += ["--verbose"] run_command(cmd, display_cmd=verbose) diff --git a/show/platform.py b/show/platform.py index 85d729df84..c4f2c3a29c 100644 --- a/show/platform.py +++ b/show/platform.py @@ -148,9 +148,9 @@ def temperature(): @click.argument('args', nargs=-1, type=click.UNPROCESSED) def firmware(args): """Show firmware information""" - cmd = "sudo fwutil show {}".format(" ".join(args)) + cmd = ["sudo", "fwutil", "show"] + list(args) try: - subprocess.check_call(cmd, shell=True) + subprocess.check_call(cmd) except subprocess.CalledProcessError as e: sys.exit(e.returncode) diff --git a/tests/fdbshow_test.py b/tests/fdbshow_test.py index 578b278a95..0f129df299 100755 --- a/tests/fdbshow_test.py +++ b/tests/fdbshow_test.py @@ -450,7 +450,7 @@ def test_show_mac_no_address(self): def test_show_mac_no_type(self): self.set_mock_variant("6") - result = self.runner.invoke(show.cli.commands["mac"], ["-t Static"]) + result = self.runner.invoke(show.cli.commands["mac"], ["-t", "Static"]) print(result.exit_code) print(result.output) assert result.exit_code == 0 diff --git a/tests/intfstat_test.py b/tests/intfstat_test.py index 4522e08311..f76e54c7b5 100644 --- a/tests/intfstat_test.py +++ b/tests/intfstat_test.py @@ -168,7 +168,7 @@ def test_clear_single_intfs(self): result = runner.invoke(show.cli.commands["interfaces"].commands["counters"].commands["rif"], ["Ethernet20"]) print(result.output) # remove the counters snapshot - show.run_command("intfstat -D") + show.run_command(["intfstat", "-D"]) assert 'Last cached time was' in result.output.split('\n')[0] assert show_interfaces_counters_rif_clear_single_intf in result.output @@ -180,7 +180,7 @@ def test_clear_single_interface_check_all(self): result = runner.invoke(show.cli.commands["interfaces"].commands["counters"].commands["rif"], []) print(result.stdout) # remove the counters snapshot - show.run_command("intfstat -D") + show.run_command(["intfstat", "-D"]) assert 'Last cached time was' in result.output.split('\n')[0] assert show_single_interface_check_all_clear in result.output @@ -192,7 +192,7 @@ def test_clear(self): result = runner.invoke(show.cli.commands["interfaces"].commands["counters"].commands["rif"], []) print(result.stdout) # remove the counters snapshot - show.run_command("intfstat -D") + show.run_command(["intfstat", "-D"]) assert 'Last cached time was' in result.output.split('\n')[0] assert show_interfaces_counters_rif_clear in result.output diff --git a/tests/pfcstat_test.py b/tests/pfcstat_test.py index 6ac0401b24..23d184cc36 100644 --- a/tests/pfcstat_test.py +++ b/tests/pfcstat_test.py @@ -215,7 +215,7 @@ def test_pfc_counters_with_clear(self): [] ) print(result.output) - show.run_command('pfcstat -d') + show.run_command(['pfcstat', '-d']) assert result.exit_code == 0 assert "Last cached time was" in result.output assert show_pfc_counters_output_with_clear[0] in result.output and \ @@ -262,7 +262,7 @@ def test_pfc_counters_all_with_clear(self): [] ) print(result.output) - show.run_command('pfcstat -d') + show.run_command(['pfcstat', '-d']) assert result.exit_code == 0 assert "Last cached time was" in result.output assert show_pfc_counters_all_with_clear[0] in result.output and \ diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py index 165f6e6fb3..1a78b3eeb8 100644 --- a/tests/queue_counter_test.py +++ b/tests/queue_counter_test.py @@ -1280,7 +1280,7 @@ def test_queue_counters_with_clear(self): [] ) print(result.output) - show.run_command('queuestat -d') + show.run_command(['queuestat', '-d']) assert result.exit_code == 0 assert "Ethernet0 Last cached time was" in result.output and \ "Ethernet4 Last cached time was" in result.output and \ @@ -1318,7 +1318,7 @@ def test_queue_counters_port_json(self): runner = CliRunner() result = runner.invoke( show.cli.commands["queue"].commands["counters"], - ["Ethernet8 --json"] + ["Ethernet8", "--json"] ) assert result.exit_code == 0 print(result.output) @@ -1343,7 +1343,7 @@ def test_queue_port_voq_counters(self): runner = CliRunner() result = runner.invoke( show.cli.commands["queue"].commands["counters"], - ["Ethernet0 --voq"] + ["Ethernet0", "--voq"] ) print(result.output) assert result.exit_code == 0 diff --git a/tests/show_test.py b/tests/show_test.py index 7b3e492fc9..b7f6a9baf8 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -2,12 +2,15 @@ import sys import click import pytest +import subprocess import show.main as show from unittest import mock from click.testing import CliRunner +from utilities_common import constants from unittest.mock import call, MagicMock, patch EXPECTED_BASE_COMMAND = 'sudo ' +EXPECTED_BASE_COMMAND_LIST = ['sudo'] test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -65,73 +68,101 @@ def teardown_class(cls): @patch('show.main.run_command') @pytest.mark.parametrize( - "cli_arguments,expected", + "cli_arguments0,expected0", [ ([], 'cat /var/log/syslog'), (['xcvrd'], "cat /var/log/syslog | grep 'xcvrd'"), (['-l', '10'], 'cat /var/log/syslog | tail -10'), - (['-f'], 'tail -F /var/log/syslog'), ] ) -def test_show_logging_default(run_command, cli_arguments, expected): +@pytest.mark.parametrize( + "cli_arguments1,expected1", + [ + (['-f'], ['tail', '-F', '/var/log/syslog']), + ] +) +def test_show_logging_default(run_command, cli_arguments0, expected0, cli_arguments1, expected1): runner = CliRunner() - result = runner.invoke(show.cli.commands["logging"], cli_arguments) - run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + runner.invoke(show.cli.commands["logging"], cli_arguments0) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected0, display_cmd=False, shell=True) + runner.invoke(show.cli.commands["logging"], cli_arguments1) + run_command.assert_called_with(EXPECTED_BASE_COMMAND_LIST + expected1, display_cmd=False) @patch('show.main.run_command') @patch('os.path.isfile', MagicMock(return_value=True)) @pytest.mark.parametrize( - "cli_arguments,expected", + "cli_arguments0,expected0", [ ([], 'cat /var/log/syslog.1 /var/log/syslog'), (['xcvrd'], "cat /var/log/syslog.1 /var/log/syslog | grep 'xcvrd'"), (['-l', '10'], 'cat /var/log/syslog.1 /var/log/syslog | tail -10'), - (['-f'], 'tail -F /var/log/syslog'), ] ) -def test_show_logging_syslog_1(run_command, cli_arguments, expected): +@pytest.mark.parametrize( + "cli_arguments1,expected1", + [ + (['-f'], ['tail', '-F', '/var/log/syslog']), + ] +) +def test_show_logging_syslog_1(run_command, cli_arguments0, expected0, cli_arguments1, expected1): runner = CliRunner() - result = runner.invoke(show.cli.commands["logging"], cli_arguments) - run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + runner.invoke(show.cli.commands["logging"], cli_arguments0) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected0, display_cmd=False, shell=True) + runner.invoke(show.cli.commands["logging"], cli_arguments1) + run_command.assert_called_with(EXPECTED_BASE_COMMAND_LIST + expected1, display_cmd=False) @patch('show.main.run_command') @patch('os.path.exists', MagicMock(return_value=True)) @pytest.mark.parametrize( - "cli_arguments,expected", + "cli_arguments0,expected0", [ ([], 'cat /var/log.tmpfs/syslog'), (['xcvrd'], "cat /var/log.tmpfs/syslog | grep 'xcvrd'"), (['-l', '10'], 'cat /var/log.tmpfs/syslog | tail -10'), - (['-f'], 'tail -F /var/log.tmpfs/syslog'), ] ) -def test_show_logging_tmpfs(run_command, cli_arguments, expected): +@pytest.mark.parametrize( + "cli_arguments1,expected1", + [ + (['-f'], ['tail', '-F', '/var/log.tmpfs/syslog']), + ] +) +def test_show_logging_tmpfs(run_command, cli_arguments0, expected0, cli_arguments1, expected1): runner = CliRunner() - result = runner.invoke(show.cli.commands["logging"], cli_arguments) - run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + runner.invoke(show.cli.commands["logging"], cli_arguments0) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected0, display_cmd=False, shell=True) + runner.invoke(show.cli.commands["logging"], cli_arguments1) + run_command.assert_called_with(EXPECTED_BASE_COMMAND_LIST + expected1, display_cmd=False) @patch('show.main.run_command') @patch('os.path.isfile', MagicMock(return_value=True)) @patch('os.path.exists', MagicMock(return_value=True)) @pytest.mark.parametrize( - "cli_arguments,expected", + "cli_arguments0,expected0", [ ([], 'cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog'), (['xcvrd'], "cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog | grep 'xcvrd'"), (['-l', '10'], 'cat /var/log.tmpfs/syslog.1 /var/log.tmpfs/syslog | tail -10'), - (['-f'], 'tail -F /var/log.tmpfs/syslog'), ] ) -def test_show_logging_tmpfs_syslog_1(run_command, cli_arguments, expected): +@pytest.mark.parametrize( + "cli_arguments1,expected1", + [ + (['-f'], ['tail', '-F', '/var/log.tmpfs/syslog']), + ] +) +def test_show_logging_tmpfs_syslog_1(run_command, cli_arguments0, expected0, cli_arguments1, expected1): runner = CliRunner() - result = runner.invoke(show.cli.commands["logging"], cli_arguments) - run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected, display_cmd=False) + runner.invoke(show.cli.commands["logging"], cli_arguments0) + run_command.assert_called_with(EXPECTED_BASE_COMMAND + expected0, display_cmd=False, shell=True) + runner.invoke(show.cli.commands["logging"], cli_arguments1) + run_command.assert_called_with(EXPECTED_BASE_COMMAND_LIST + expected1, display_cmd=False) def side_effect_subprocess_popen(*args, **kwargs): mock = MagicMock() - if args[0] == "uptime": + if ' '.join(args[0]) == "uptime": mock.stdout.read.return_value = "05:58:07 up 25 days" - elif args[0].startswith("sudo docker images"): + elif ' '.join(args[0]).startswith("sudo docker images"): mock.stdout.read.return_value = "REPOSITORY TAG" return mock @@ -158,7 +189,6 @@ def test_show_version(): result = runner.invoke(show.cli.commands["version"]) assert "SONiC OS Version: 11" in result.output - class TestShowAcl(object): def setup(self): print('SETUP') @@ -638,6 +668,314 @@ def test_temporature(self, mock_run_command): assert result.exit_code == 0 mock_run_command.assert_called_once_with(['tempershow']) + @patch('subprocess.check_call') + def test_firmware(self, mock_check_call): + runner = CliRunner() + result = runner.invoke(show.cli.commands['platform'].commands['firmware']) + assert result.exit_code == 0 + mock_check_call.assert_called_with(["sudo", "fwutil", "show"]) + + def teardown(self): + print('TEAR DOWN') + +class TestShowQuagga(object): + def setup(self): + print('SETUP') + + @patch('show.main.run_command') + @patch('show.main.get_routing_stack', MagicMock(return_value='quagga')) + def test_show_ip_bgp(self, mock_run_command): + from show.bgp_quagga_v4 import bgp + runner = CliRunner() + + result = runner.invoke(show.cli.commands["ip"].commands['bgp'].commands['summary']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', "show ip bgp summary"], return_cmd=True) + + result = runner.invoke(show.cli.commands["ip"].commands['bgp'].commands['neighbors']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', "show ip bgp neighbor"]) + + result = runner.invoke(show.cli.commands["ip"].commands['bgp'].commands['neighbors'], ['0.0.0.0', 'routes']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', "show ip bgp neighbor 0.0.0.0 routes"]) + + @patch('show.main.run_command') + @patch('show.main.get_routing_stack', MagicMock(return_value='quagga')) + def test_show_ipv6_bgp(self, mock_run_command): + from show.bgp_quagga_v6 import bgp + runner = CliRunner() + + result = runner.invoke(show.cli.commands["ipv6"].commands['bgp'].commands['summary']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', "show ipv6 bgp summary"], return_cmd=True) + + result = runner.invoke(show.cli.commands["ipv6"].commands['bgp'].commands['neighbors'], ['0.0.0.0', 'routes']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', "show ipv6 bgp neighbor 0.0.0.0 routes"]) + + def teardown(self): + print('TEAR DOWN') + + +class TestShow(object): + def setup(self): + print('SETUP') + + @patch('show.main.run_command') + def test_show_arp(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["arp"], ['0.0.0.0', '-if', 'Ethernet0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['nbrshow', '-4', '-ip', '0.0.0.0', '-if', 'Ethernet0'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_ndp(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["ndp"], ['0.0.0.0', '-if', 'Ethernet0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['nbrshow', '-6', '-ip', '0.0.0.0', '-if', 'Ethernet0'], display_cmd=True) + + @patch('show.main.run_command') + @patch('show.main.is_mgmt_vrf_enabled', MagicMock(return_value=True)) + def test_show_mgmt_vrf_routes(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["mgmt-vrf"], ['routes']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['ip', 'route', 'show', 'table', '5000']) + + @patch('show.main.run_command') + @patch('show.main.is_mgmt_vrf_enabled', MagicMock(return_value=True)) + def test_show_mgmt_vrf(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["mgmt-vrf"]) + assert result.exit_code == 0 + assert mock_run_command.call_args_list == [ + call(['ip', '-d', 'link', 'show', 'mgmt']), + call(['ip', 'link', 'show', 'vrf', 'mgmt']) + ] + + @patch('show.main.run_command') + def test_show_pfc_priority(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["pfc"].commands['priority'], ['Ethernet0']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['pfc', 'show', 'priority', 'Ethernet0']) + + @patch('show.main.run_command') + def test_show_pfc_asymmetric(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["pfc"].commands['asymmetric'], ['Ethernet0']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['pfc', 'show', 'asymmetric', 'Ethernet0']) + + @patch('show.main.run_command') + def test_show_pfcwd_config(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["pfcwd"].commands['config'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['pfcwd', 'show', 'config', '-d', 'all'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_pfcwd_stats(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["pfcwd"].commands['stats'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['pfcwd', 'show', 'stats', '-d', 'all'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_watermark_telemetry_interval(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["watermark"].commands['telemetry'].commands['interval']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['watermarkcfg', '--show-interval']) + + @patch('show.main.run_command') + def test_show_route_map(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands["route-map"], ['BGP', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', 'show route-map BGP'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_ip_prefix_list(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ip'].commands["prefix-list"], ['0.0.0.0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', 'show ip prefix-list 0.0.0.0'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_ip_protocol(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ip'].commands["protocol"], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', 'show ip protocol'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_ip_fib(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ip'].commands["fib"], ['0.0.0.0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['fibshow', '-4', '-ip', '0.0.0.0'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_ipv6_prefix_list(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ipv6'].commands["prefix-list"], ['0.0.0.0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', 'show ipv6 prefix-list 0.0.0.0'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_ipv6_protocol(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ipv6'].commands["protocol"], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', constants.RVTYSH_COMMAND, '-c', 'show ipv6 protocol'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_ipv6_fib(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ipv6'].commands["fib"], ['0.0.0.0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['fibshow', '-6', '-ip', '0.0.0.0'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_lldp_neighbors(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['lldp'].commands["neighbors"], ['Ethernet0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', 'lldpshow', '-d', '-p' ,'Ethernet0'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_lldp_table(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['lldp'].commands["table"], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', 'lldpshow'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_environment(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['environment'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', 'sensors'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_users(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['users'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['who'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_runningconfiguration_acl(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['runningconfiguration'].commands['acl'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sonic-cfggen', '-d', '--var-json', 'ACL_RULE'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_runningconfiguration_ports(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['runningconfiguration'].commands['ports'], ['Ethernet0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sonic-cfggen', '-d', '--var-json', 'PORT', '--key', 'Ethernet0'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_runningconfiguration_interfaces(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['runningconfiguration'].commands['interfaces'], ['Ethernet0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sonic-cfggen', '-d', '--var-json', 'INTERFACE', '--key', 'Ethernet0'], display_cmd=True) + + @patch('show.main.run_command') + @patch('show.main.getstatusoutput_noshell_pipe', MagicMock(return_value=(0, 'quagga'))) + def test_show_startupconfiguration_bgp_quagga(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['startupconfiguration'].commands['bgp'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', 'docker', 'exec', 'bgp', 'cat', '/etc/quagga/bgpd.conf'], display_cmd=True) + + @patch('show.main.run_command') + @patch('show.main.getstatusoutput_noshell_pipe', MagicMock(return_value=(0, 'frr'))) + def test_show_startupconfiguration_bgp_frr(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['startupconfiguration'].commands['bgp'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', 'docker', 'exec', 'bgp', 'cat', '/etc/frr/bgpd.conf'], display_cmd=True) + + @patch('show.main.run_command') + @patch('show.main.getstatusoutput_noshell_pipe', MagicMock(return_value=(0, 'gobgp'))) + def test_show_startupconfiguration_bgp_gobgp(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['startupconfiguration'].commands['bgp'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['sudo', 'docker', 'exec', 'bgp', 'cat', '/etc/gpbgp/bgpd.conf'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_uptime(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['uptime'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['uptime', '-p'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_clock(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['clock'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['date'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_system_memory(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['system-memory'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['free', '-m'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_mirror_session(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['mirror_session'], ['SPAN', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['acl-loader', 'show', 'session', 'SPAN'], display_cmd=True) + + @patch('show.main.run_command') + def test_show_policer(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['policer'], ['policer0', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['acl-loader', 'show', 'policer', 'policer0'], display_cmd=True) + + @patch('subprocess.Popen') + def test_show_boot(self, mock_subprocess_popen): + runner = CliRunner() + result = runner.invoke(show.cli.commands['boot']) + assert result.exit_code == 0 + mock_subprocess_popen.assert_called_with(["sudo", "sonic-installer", "list"], stdout=subprocess.PIPE, text=True) + + @patch('show.main.run_command') + def test_show_mmu(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['mmu']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['mmuconfig', '-l']) + + @patch('show.main.run_command') + def test_show_lines(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['line'], ['--brief', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['consutil', 'show', '-b'], display_cmd=True) + + @patch('show.main.run_command') + @patch('os.path.isfile', MagicMock(return_value=True)) + def test_show_ztp(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['ztp'], ['status', '--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_with(['ztp', 'status', '--verbose'], display_cmd=True) + def teardown(self): print('TEAR DOWN') diff --git a/tests/techsupport_test.py b/tests/techsupport_test.py index 41664e3589..c5f65895a2 100644 --- a/tests/techsupport_test.py +++ b/tests/techsupport_test.py @@ -3,18 +3,18 @@ from unittest.mock import patch, Mock from click.testing import CliRunner -EXPECTED_BASE_COMMAND = 'sudo ' +EXPECTED_BASE_COMMAND = ['sudo'] @patch("show.main.run_command") @pytest.mark.parametrize( "cli_arguments,expected", [ - ([], 'generate_dump -v -t 5'), - (['--since', '2 days ago'], "generate_dump -v -s '2 days ago' -t 5"), - (['-g', '50'], 'timeout --kill-after=300s -s SIGTERM --foreground 50m generate_dump -v -t 5'), - (['--allow-process-stop'], 'generate_dump -v -a -t 5'), - (['--silent'], 'generate_dump -t 5'), - (['--debug-dump', '--redirect-stderr'], 'generate_dump -v -d -t 5 -r'), + ([], ['generate_dump', '-v', '-t', '5']), + (['--since', '2 days ago'], ['generate_dump', '-v', '-s', '2 days ago', '-t', '5']), + (['-g', '50'], ['timeout', '--kill-after=300s', '-s', 'SIGTERM', '--foreground', '50m', 'generate_dump', '-v', '-t', '5']), + (['--allow-process-stop'], ['generate_dump', '-v', '-a', '-t', '5']), + (['--silent'], ['generate_dump', '-t', '5']), + (['--debug-dump', '--redirect-stderr'], ['generate_dump', '-v', '-d', '-t', '5', '-r']), ] ) def test_techsupport(run_command, cli_arguments, expected): diff --git a/tests/tunnelstat_test.py b/tests/tunnelstat_test.py index f1fe716ef3..19df51f2a6 100644 --- a/tests/tunnelstat_test.py +++ b/tests/tunnelstat_test.py @@ -83,7 +83,7 @@ def test_clear(self): expected = show_vxlan_counters_clear_output # remove the counters snapshot - show.run_command("tunnelstat -D") + show.run_command(['tunnelstat', '-D']) for line in expected: assert line in result.output @@ -97,7 +97,7 @@ def test_clear_interface(self): expected = show_vxlan_counters_clear_interface_output # remove the counters snapshot - show.run_command("tunnelstat -D") + show.run_command(['tunnelstat', '-D']) for line in expected: assert line in result.output From dc59dbd2f04be64ec053f1d680025609d8396a4f Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 31 May 2023 17:19:08 -0400 Subject: [PATCH 22/35] Replace pickle by json (#2849) This reverts commit 10f31ea6fb0876f913cfcfce8c95011e675a99f6. Revert "Revert "Replace pickle by json (#2636)" (#2746)" #### What I did Revert PR 2746 Add fix `default=json_serial` in intfstat #### How I did it #### How to verify it ``` 1. dropstat a. dropstat -c clear b. dropstat -c show 2. flow_counter_stat a. sonic-clear flowcnt-trap b. show flowcnt-trap stats 3. intfstat a. intfstat -c b. intfstat -c c. intfstat -c -t test d. intfstat -c -t test e. intfstat -p 0 4. pfcstat a. pfcstat -c b. pfcstat -s frontend 5. pg-drop a. pg-drop -c clear b. pg-drop -c show 6. portstat a. portstat -s frontend b. portstat -c 7. queuestat a. queuestat -c b. queuestat c. queuestat -p Ethernet0 8. tunnelstat a. tunnelstat -c b. tunnelstat ``` --- scripts/dropstat | 14 +-- scripts/flow_counters_stat | 10 +- scripts/intfstat | 64 +++++----- scripts/pfcstat | 62 +++++----- scripts/pg-drop | 8 +- scripts/portstat | 239 ++++++++++++++++++------------------- scripts/queuestat | 34 +++--- scripts/tunnelstat | 40 +++---- tests/intfstat_test.py | 24 +++- 9 files changed, 258 insertions(+), 237 deletions(-) diff --git a/scripts/dropstat b/scripts/dropstat index f98fc29197..4e9f5bb4d0 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,7 +11,7 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries -import _pickle as pickle +import json import argparse import os import socket @@ -117,10 +117,10 @@ class DropStat(object): """ try: - pickle.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'wb+')) - pickle.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), - open(self.switch_drop_stats_file, 'wb+')) + json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), + open(self.port_drop_stats_file, 'w+')) + json.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), + open(self.switch_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) @@ -135,7 +135,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) + port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) counters = self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP, group, counter_type) headers = std_port_description_header + self.gather_headers(counters, DEBUG_COUNTER_PORT_STAT_MAP) @@ -162,7 +162,7 @@ class DropStat(object): # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.switch_drop_stats_file): - switch_drop_ckpt = pickle.load(open(self.switch_drop_stats_file, 'rb')) + switch_drop_ckpt = json.load(open(self.switch_drop_stats_file, 'r')) counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP, group, counter_type) headers = std_switch_description_header + self.gather_headers(counters, DEBUG_COUNTER_SWITCH_STAT_MAP) diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat index ac5ef94beb..49b97e335b 100755 --- a/scripts/flow_counters_stat +++ b/scripts/flow_counters_stat @@ -2,7 +2,7 @@ import argparse import os -import _pickle as pickle +import json import sys from natsort import natsorted @@ -185,8 +185,8 @@ class FlowCounterStats(object): if os.path.exists(self.data_file): os.remove(self.data_file) - with open(self.data_file, 'wb') as f: - pickle.dump(data, f) + with open(self.data_file, 'w') as f: + json.dump(data, f) except IOError as e: print('Failed to save statistic - {}'.format(repr(e))) @@ -200,8 +200,8 @@ class FlowCounterStats(object): return None try: - with open(self.data_file, 'rb') as f: - data = pickle.load(f) + with open(self.data_file, 'r') as f: + data = json.load(f) except IOError as e: print('Failed to load statistic - {}'.format(repr(e))) return None diff --git a/scripts/intfstat b/scripts/intfstat index 30cfbf084d..91f88d9f1d 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import sys @@ -28,7 +28,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_brate, format_prate -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache from swsscommon.swsscommon import SonicV2Connector nstat_fields = ( @@ -96,7 +96,7 @@ class Intfstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields) + cntr = NStats._make(fields)._asdict() return cntr def get_rates(table_id): @@ -153,14 +153,14 @@ class Intfstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) table.append((key, - data.rx_p_ok, + data['rx_p_ok'], format_brate(rates.rx_bps), format_prate(rates.rx_pps), - data.rx_p_err, - data.tx_p_ok, + data['rx_p_err'], + data['tx_p_ok'], format_brate(rates.tx_bps), format_prate(rates.tx_pps), - data.tx_p_err)) + data['tx_p_err'])) if use_json: print(table_as_json(table, header)) @@ -186,24 +186,24 @@ class Intfstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), + ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), - ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), + ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), - ns_diff(cntr.tx_p_err, old_cntr.tx_p_err))) + ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']))) else: table.append((key, - cntr.rx_p_ok, + cntr['rx_p_ok'], format_brate(rates.rx_bps), format_prate(rates.rx_pps), - cntr.rx_p_err, - cntr.tx_p_ok, + cntr['rx_p_err'], + cntr['tx_p_ok'], format_brate(rates.tx_bps), format_prate(rates.tx_pps), - cntr.tx_p_err)) + cntr['tx_p_err'])) if use_json: print(table_as_json(table, header)) @@ -229,17 +229,17 @@ class Intfstat(object): if cnstat_old_dict and cnstat_old_dict.get(rif): old_cntr = cnstat_old_dict.get(rif) - body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), - ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), - ns_diff(cntr.rx_p_err, old_cntr.rx_p_err), - ns_diff(cntr.rx_b_err, old_cntr.rx_b_err), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), - ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), - ns_diff(cntr.tx_p_err, old_cntr.tx_p_err), - ns_diff(cntr.tx_b_err, old_cntr.tx_b_err)) + body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), + ns_diff(cntr['rx_p_err'], old_cntr['rx_p_err']), + ns_diff(cntr['rx_b_err'], old_cntr['rx_b_err']), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), + ns_diff(cntr['tx_p_err'], old_cntr['tx_p_err']), + ns_diff(cntr['tx_b_err'], old_cntr['tx_b_err'])) else: - body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.rx_p_err,cntr.rx_b_err, - cntr.tx_p_ok, cntr.tx_b_ok, cntr.tx_p_err, cntr.tx_b_err) + body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['rx_p_err'],cntr['rx_b_err'], + cntr['tx_p_ok'], cntr['tx_b_ok'], cntr['tx_p_err'], cntr['tx_b_err']) print(header) print(body) @@ -305,20 +305,20 @@ def main(): if tag_name is not None: if os.path.isfile(cnstat_fqn_general_file): try: - general_data = pickle.load(open(cnstat_fqn_general_file, 'rb')) + general_data = json.load(open(cnstat_fqn_general_file, 'r')) for key, val in cnstat_dict.items(): general_data[key] = val - pickle.dump(general_data, open(cnstat_fqn_general_file, 'wb')) + json.dump(general_data, open(cnstat_fqn_general_file, 'w'), default=json_serial) except IOError as e: sys.exit(e.errno) # Add the information also to tag specific file if os.path.isfile(cnstat_fqn_file): - data = pickle.load(open(cnstat_fqn_file, 'rb')) + data = json.load(open(cnstat_fqn_file, 'r')) for key, val in cnstat_dict.items(): data[key] = val - pickle.dump(data, open(cnstat_fqn_file, 'wb')) + json.dump(data, open(cnstat_fqn_file, 'w'), default=json_serial) else: - pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) except IOError as e: sys.exit(e.errno) else: @@ -330,9 +330,9 @@ def main(): try: cnstat_cached_dict = {} if os.path.isfile(cnstat_fqn_file): - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) else: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_general_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_general_file, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if interface_name: diff --git a/scripts/pfcstat b/scripts/pfcstat index fb7e6018b6..094c6e9380 100755 --- a/scripts/pfcstat +++ b/scripts/pfcstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import os.path @@ -37,7 +37,7 @@ except KeyError: from utilities_common.netstat import ns_diff, STATUS_NA, format_number_with_comma from utilities_common import multi_asic as multi_asic_util from utilities_common import constants -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache PStats = namedtuple("PStats", "pfc0, pfc1, pfc2, pfc3, pfc4, pfc5, pfc6, pfc7") @@ -101,7 +101,7 @@ class Pfcstat(object): fields[pos] = STATUS_NA else: fields[pos] = str(int(counter_data)) - cntr = PStats._make(fields) + cntr = PStats._make(fields)._asdict() return cntr # Get the info from database @@ -144,14 +144,14 @@ class Pfcstat(object): if key == 'time': continue table.append((key, - format_number_with_comma(data.pfc0), - format_number_with_comma(data.pfc1), - format_number_with_comma(data.pfc2), - format_number_with_comma(data.pfc3), - format_number_with_comma(data.pfc4), - format_number_with_comma(data.pfc5), - format_number_with_comma(data.pfc6), - format_number_with_comma(data.pfc7))) + format_number_with_comma(data['pfc0']), + format_number_with_comma(data['pfc1']), + format_number_with_comma(data['pfc2']), + format_number_with_comma(data['pfc3']), + format_number_with_comma(data['pfc4']), + format_number_with_comma(data['pfc5']), + format_number_with_comma(data['pfc6']), + format_number_with_comma(data['pfc7']))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -173,24 +173,24 @@ class Pfcstat(object): if old_cntr is not None: table.append((key, - ns_diff(cntr.pfc0, old_cntr.pfc0), - ns_diff(cntr.pfc1, old_cntr.pfc1), - ns_diff(cntr.pfc2, old_cntr.pfc2), - ns_diff(cntr.pfc3, old_cntr.pfc3), - ns_diff(cntr.pfc4, old_cntr.pfc4), - ns_diff(cntr.pfc5, old_cntr.pfc5), - ns_diff(cntr.pfc6, old_cntr.pfc6), - ns_diff(cntr.pfc7, old_cntr.pfc7))) + ns_diff(cntr['pfc0'], old_cntr['pfc0']), + ns_diff(cntr['pfc1'], old_cntr['pfc1']), + ns_diff(cntr['pfc2'], old_cntr['pfc2']), + ns_diff(cntr['pfc3'], old_cntr['pfc3']), + ns_diff(cntr['pfc4'], old_cntr['pfc4']), + ns_diff(cntr['pfc5'], old_cntr['pfc5']), + ns_diff(cntr['pfc6'], old_cntr['pfc6']), + ns_diff(cntr['pfc7'], old_cntr['pfc7']))) else: table.append((key, - format_number_with_comma(cntr.pfc0), - format_number_with_comma(cntr.pfc1), - format_number_with_comma(cntr.pfc2), - format_number_with_comma(cntr.pfc3), - format_number_with_comma(cntr.pfc4), - format_number_with_comma(cntr.pfc5), - format_number_with_comma(cntr.pfc6), - format_number_with_comma(cntr.pfc7))) + format_number_with_comma(cntr['pfc0']), + format_number_with_comma(cntr['pfc1']), + format_number_with_comma(cntr['pfc2']), + format_number_with_comma(cntr['pfc3']), + format_number_with_comma(cntr['pfc4']), + format_number_with_comma(cntr['pfc5']), + format_number_with_comma(cntr['pfc6']), + format_number_with_comma(cntr['pfc7']))) if rx: print(tabulate(table, header_Rx, tablefmt='simple', stralign='right')) @@ -256,8 +256,8 @@ Examples: if save_fresh_stats: try: - pickle.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'wb')) - pickle.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'wb')) + json.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'w'), default=json_serial) + json.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'w'), default=json_serial) except IOError as e: print(e.errno, e) sys.exit(e.errno) @@ -271,7 +271,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_rx): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_rx, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_rx, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_rx, cnstat_cached_dict, True) except IOError as e: @@ -286,7 +286,7 @@ Examples: """ if os.path.isfile(cnstat_fqn_file_tx): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_tx, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_tx, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) pfcstat.cnstat_diff_print(cnstat_dict_tx, cnstat_cached_dict, False) except IOError as e: diff --git a/scripts/pg-drop b/scripts/pg-drop index 40b4e863d3..7741593081 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,7 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### -import _pickle as pickle +import json import argparse import os import sys @@ -144,7 +144,7 @@ class PgDropStat(object): port_drop_ckpt = {} # Grab the latest clear checkpoint, if it exists if os.path.isfile(self.port_drop_stats_file): - port_drop_ckpt = pickle.load(open(self.port_drop_stats_file, 'rb')) + port_drop_ckpt = json.load(open(self.port_drop_stats_file, 'r')) # Header list contains the port name followed by the PGs. Fields is used to populate the pg values fields = ["0"]* (len(self.header_list) - 1) @@ -216,10 +216,10 @@ class PgDropStat(object): counter_pg_drop_array = [ "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS"] try: - pickle.dump(self.get_counts_table( + json.dump(self.get_counts_table( counter_pg_drop_array, COUNTERS_PG_NAME_MAP), - open(self.port_drop_stats_file, 'wb+')) + open(self.port_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) diff --git a/scripts/portstat b/scripts/portstat index 399733f69c..43746cc1c3 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import os.path @@ -40,7 +40,7 @@ from utilities_common.intf_filter import parse_interface_in_filter import utilities_common.multi_asic as multi_asic_util from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache """ The order and count of statistics mentioned below needs to be in sync with the values in portstat script @@ -181,7 +181,7 @@ class Portstat(object): elif fields[pos] != STATUS_NA: fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) - cntr = NStats._make(fields) + cntr = NStats._make(fields)._asdict() return cntr def get_rates(table_id): @@ -278,62 +278,61 @@ class Portstat(object): if print_all: header = header_all table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_ok), + format_number_with_comma(data['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data.rx_err), - format_number_with_comma(data.rx_drop), - format_number_with_comma(data.rx_ovr), - format_number_with_comma(data.tx_ok), + format_number_with_comma(data['rx_err']), + format_number_with_comma(data['rx_drop']), + format_number_with_comma(data['rx_ovr']), + format_number_with_comma(data['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(data.tx_err), - format_number_with_comma(data.tx_drop), - format_number_with_comma(data.tx_ovr))) + format_number_with_comma(data['tx_err']), + format_number_with_comma(data['tx_drop']), + format_number_with_comma(data['tx_ovr']))) elif errors_only: header = header_errors_only table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_err), - format_number_with_comma(data.rx_drop), - format_number_with_comma(data.rx_ovr), - format_number_with_comma(data.tx_err), - format_number_with_comma(data.tx_drop), - format_number_with_comma(data.tx_ovr))) + format_number_with_comma(data['rx_err']), + format_number_with_comma(data['rx_drop']), + format_number_with_comma(data['rx_ovr']), + format_number_with_comma(data['tx_err']), + format_number_with_comma(data['tx_drop']), + format_number_with_comma(data['tx_ovr']))) elif fec_stats_only: header = header_fec_only table.append((key, self.get_port_state(key), - format_number_with_comma(data.fec_corr), - format_number_with_comma(data.fec_uncorr), - format_number_with_comma(data.fec_symbol_err))) + format_number_with_comma(data['fec_corr']), + format_number_with_comma(data['fec_uncorr']), + format_number_with_comma(data['fec_symbol_err']))) elif rates_only: header = header_rates_only table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_ok), + format_number_with_comma(data['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data.tx_ok), + format_number_with_comma(data['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: header = header_std table.append((key, self.get_port_state(key), - format_number_with_comma(data.rx_ok), + format_number_with_comma(data['rx_ok']), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(data.rx_err), - format_number_with_comma(data.rx_drop), - format_number_with_comma(data.rx_ovr), - format_number_with_comma(data.tx_ok), + format_number_with_comma(data['rx_err']), + format_number_with_comma(data['rx_drop']), + format_number_with_comma(data['rx_ovr']), + format_number_with_comma(data['tx_ok']), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - - format_number_with_comma(data.tx_err), - format_number_with_comma(data.tx_drop), - format_number_with_comma(data.tx_ovr))) + format_number_with_comma(data['tx_err']), + format_number_with_comma(data['tx_drop']), + format_number_with_comma(data['tx_ovr']))) if table: if use_json: print(table_as_json(table, header)) @@ -354,51 +353,51 @@ class Portstat(object): if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) else: - old_cntr = NStats._make([0] * BUCKET_NUM) + old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() if intf_list and key not in intf_list: continue - print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr.rx_64, old_cntr.rx_64))) - print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr.rx_65_127, old_cntr.rx_65_127))) - print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr.rx_128_255, old_cntr.rx_128_255))) - print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr.rx_256_511, old_cntr.rx_256_511))) - print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr.rx_512_1023, old_cntr.rx_512_1023))) - print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr.rx_1024_1518, old_cntr.rx_1024_1518))) - print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr.rx_1519_2047, old_cntr.rx_1519_2047))) - print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr.rx_2048_4095, old_cntr.rx_2048_4095))) - print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr.rx_4096_9216, old_cntr.rx_4096_9216))) - print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr.rx_9217_16383, old_cntr.rx_9217_16383))) + print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], old_cntr['rx_64']))) + print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], old_cntr['rx_65_127']))) + print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], old_cntr['rx_128_255']))) + print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], old_cntr['rx_256_511']))) + print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], old_cntr['rx_512_1023']))) + print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], old_cntr['rx_1024_1518']))) + print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], old_cntr['rx_1519_2047']))) + print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], old_cntr['rx_2048_4095']))) + print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], old_cntr['rx_4096_9216']))) + print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], old_cntr['rx_9217_16383']))) print("") - print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr.rx_all, old_cntr.rx_all))) - print("Unicast Packets Received....................... {}".format(ns_diff(cntr.rx_uca, old_cntr.rx_uca))) - print("Multicast Packets Received..................... {}".format(ns_diff(cntr.rx_mca, old_cntr.rx_mca))) - print("Broadcast Packets Received..................... {}".format(ns_diff(cntr.rx_bca, old_cntr.rx_bca))) + print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], old_cntr['rx_all']))) + print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], old_cntr['rx_uca']))) + print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], old_cntr['rx_mca']))) + print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], old_cntr['rx_bca']))) print("") - print("Jabbers Received............................... {}".format(ns_diff(cntr.rx_jbr, old_cntr.rx_jbr))) - print("Fragments Received............................. {}".format(ns_diff(cntr.rx_frag, old_cntr.rx_frag))) - print("Undersize Received............................. {}".format(ns_diff(cntr.rx_usize, old_cntr.rx_usize))) - print("Overruns Received.............................. {}".format(ns_diff(cntr.rx_ovrrun, old_cntr.rx_ovrrun))) + print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], old_cntr['rx_jbr']))) + print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], old_cntr['rx_frag']))) + print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], old_cntr['rx_usize']))) + print("Overruns Received.............................. {}".format(ns_diff(cntr['rx_ovrrun'], old_cntr['rx_ovrrun']))) print("") - print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr.tx_64, old_cntr.tx_64))) - print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr.tx_65_127, old_cntr.tx_65_127))) - print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr.tx_128_255, old_cntr.tx_128_255))) - print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr.tx_256_511, old_cntr.tx_256_511))) - print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr.tx_512_1023, old_cntr.tx_512_1023))) - print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr.tx_1024_1518, old_cntr.tx_1024_1518))) - print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr.tx_1519_2047, old_cntr.tx_1519_2047))) - print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr.tx_2048_4095, old_cntr.tx_2048_4095))) - print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr.tx_4096_9216, old_cntr.tx_4096_9216))) - print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr.tx_9217_16383, old_cntr.tx_9217_16383))) + print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], old_cntr['tx_64']))) + print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], old_cntr['tx_65_127']))) + print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], old_cntr['tx_128_255']))) + print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], old_cntr['tx_256_511']))) + print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], old_cntr['tx_512_1023']))) + print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], old_cntr['tx_1024_1518']))) + print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], old_cntr['tx_1519_2047']))) + print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], old_cntr['tx_2048_4095']))) + print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], old_cntr['tx_4096_9216']))) + print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], old_cntr['tx_9217_16383']))) print("") - print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr.tx_all, old_cntr.tx_all))) - print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr.tx_uca, old_cntr.tx_uca))) - print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_mca, old_cntr.tx_mca))) - print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr.tx_bca, old_cntr.tx_bca))) + print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], old_cntr['tx_all']))) + print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], old_cntr['tx_uca']))) + print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], old_cntr['tx_mca']))) + print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], old_cntr['tx_bca']))) print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) @@ -435,88 +434,88 @@ class Portstat(object): header = header_all if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_ok, old_cntr.rx_ok), + ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr.rx_err, old_cntr.rx_err), - ns_diff(cntr.rx_drop, old_cntr.rx_drop), - ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), - ns_diff(cntr.tx_ok, old_cntr.tx_ok), + ns_diff(cntr['rx_err'], old_cntr['rx_err']), + ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), + ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), + ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr.tx_err, old_cntr.tx_err), - ns_diff(cntr.tx_drop, old_cntr.tx_drop), - ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + ns_diff(cntr['tx_err'], old_cntr['tx_err']), + ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), + ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_ok), + format_number_with_comma(cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr.rx_err), - format_number_with_comma(cntr.rx_drop), - format_number_with_comma(cntr.rx_ovr), - format_number_with_comma(cntr.tx_ok), + format_number_with_comma(cntr['rx_err']), + format_number_with_comma(cntr['rx_drop']), + format_number_with_comma(cntr['rx_ovr']), + format_number_with_comma(cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr.tx_err), - format_number_with_comma(cntr.tx_drop), - format_number_with_comma(cntr.tx_ovr))) + format_number_with_comma(cntr['tx_err']), + format_number_with_comma(cntr['tx_drop']), + format_number_with_comma(cntr['tx_ovr']))) elif errors_only: header = header_errors_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_err, old_cntr.rx_err), - ns_diff(cntr.rx_drop, old_cntr.rx_drop), - ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), - ns_diff(cntr.tx_err, old_cntr.tx_err), - ns_diff(cntr.tx_drop, old_cntr.tx_drop), - ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + ns_diff(cntr['rx_err'], old_cntr['rx_err']), + ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), + ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), + ns_diff(cntr['tx_err'], old_cntr['tx_err']), + ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), + ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_err), - format_number_with_comma(cntr.rx_drop), - format_number_with_comma(cntr.rx_ovr), - format_number_with_comma(cntr.tx_err), - format_number_with_comma(cntr.tx_drop), - format_number_with_comma(cntr.tx_ovr))) + format_number_with_comma(cntr['rx_err']), + format_number_with_comma(cntr['rx_drop']), + format_number_with_comma(cntr['rx_ovr']), + format_number_with_comma(cntr['tx_err']), + format_number_with_comma(cntr['tx_drop']), + format_number_with_comma(cntr['tx_ovr']))) elif fec_stats_only: header = header_fec_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.fec_corr, old_cntr.fec_corr), - ns_diff(cntr.fec_uncorr, old_cntr.fec_uncorr), - ns_diff(cntr.fec_symbol_err, old_cntr.fec_symbol_err))) + ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), + ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), + ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.fec_corr), - format_number_with_comma(cntr.fec_uncorr), - format_number_with_comma(cntr.fec_symbol_err))) + format_number_with_comma(cntr['fec_corr']), + format_number_with_comma(cntr['fec_uncorr']), + format_number_with_comma(cntr['fec_symbol_err']))) elif rates_only: header = header_rates_only if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_ok, old_cntr.rx_ok), + ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr.tx_ok, old_cntr.tx_ok), + ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_ok), + format_number_with_comma(cntr['rx_ok']), format_brate(rates.rx_bps), format_prate(rates.rx_pps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr.tx_ok), + format_number_with_comma(cntr['tx_ok']), format_brate(rates.tx_bps), format_prate(rates.tx_pps), format_util(rates.tx_bps, port_speed))) @@ -525,33 +524,33 @@ class Portstat(object): if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_ok, old_cntr.rx_ok), + ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - ns_diff(cntr.rx_err, old_cntr.rx_err), - ns_diff(cntr.rx_drop, old_cntr.rx_drop), - ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), - ns_diff(cntr.tx_ok, old_cntr.tx_ok), + ns_diff(cntr['rx_err'], old_cntr['rx_err']), + ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), + ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), + ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - ns_diff(cntr.tx_err, old_cntr.tx_err), - ns_diff(cntr.tx_drop, old_cntr.tx_drop), - ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + ns_diff(cntr['tx_err'], old_cntr['tx_err']), + ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), + ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) else: table.append((key, self.get_port_state(key), - format_number_with_comma(cntr.rx_ok), + format_number_with_comma(cntr['rx_ok']), format_brate(rates.rx_bps), format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr.rx_err), - format_number_with_comma(cntr.rx_drop), - format_number_with_comma(cntr.rx_ovr), - format_number_with_comma(cntr.tx_ok), + format_number_with_comma(cntr['rx_err']), + format_number_with_comma(cntr['rx_drop']), + format_number_with_comma(cntr['rx_ovr']), + format_number_with_comma(cntr['tx_ok']), format_brate(rates.tx_bps), format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr.tx_err), - format_number_with_comma(cntr.tx_drop), - format_number_with_comma(cntr.tx_ovr))) + format_number_with_comma(cntr['tx_err']), + format_number_with_comma(cntr['tx_drop']), + format_number_with_comma(cntr['tx_ovr']))) if table: if use_json: print(table_as_json(table, header)) @@ -642,7 +641,7 @@ Examples: if save_fresh_stats: try: - pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) except IOError as e: sys.exit(e.errno) else: @@ -653,7 +652,7 @@ Examples: cnstat_cached_dict = OrderedDict() if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) if not detail: print("Last cached time was " + str(cnstat_cached_dict.get('time'))) portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, fec_stats_only, rates_only, detail) diff --git a/scripts/queuestat b/scripts/queuestat index 96a24b51a3..d82e7e4a6a 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import os.path @@ -33,7 +33,7 @@ except KeyError: pass from swsscommon.swsscommon import SonicV2Connector -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache from utilities_common import constants import utilities_common.multi_asic as multi_asic_util @@ -186,7 +186,7 @@ class Queuestat(object): fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = QueueStats._make(fields) + cntr = QueueStats._make(fields)._asdict() return cntr # Build a dictionary of the stats @@ -211,9 +211,9 @@ class Queuestat(object): if json_opt: json_output[port][key] = data continue - table.append((port, data.queuetype + str(data.queueindex), - data.totalpacket, data.totalbytes, - data.droppacket, data.dropbytes)) + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'])) if json_opt: json_output[port].update(build_json(port, table)) @@ -241,15 +241,15 @@ class Queuestat(object): old_cntr = cnstat_old_dict.get(key) if old_cntr is not None: - table.append((port, cntr.queuetype + str(cntr.queueindex), - ns_diff(cntr.totalpacket, old_cntr.totalpacket), - ns_diff(cntr.totalbytes, old_cntr.totalbytes), - ns_diff(cntr.droppacket, old_cntr.droppacket), - ns_diff(cntr.dropbytes, old_cntr.dropbytes))) + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) else: - table.append((port, cntr.queuetype + str(cntr.queueindex), - cntr.totalpacket, cntr.totalbytes, - cntr.droppacket, cntr.dropbytes)) + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'])) if json_opt: json_output[port].update(build_json(port, table)) @@ -273,7 +273,7 @@ class Queuestat(object): cnstat_fqn_file_name = cnstat_fqn_file + port if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -307,7 +307,7 @@ class Queuestat(object): json_output[port] = {} if os.path.isfile(cnstat_fqn_file_name): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file_name, 'r')) if json_opt: json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt)) @@ -330,7 +330,7 @@ class Queuestat(object): for port in natsorted(self.counter_port_name_map): cnstat_dict = self.get_cnstat(self.port_queues_map[port]) try: - pickle.dump(cnstat_dict, open(cnstat_fqn_file + port, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file + port, 'w'), default=json_serial) except IOError as e: print(e.errno, e) sys.exit(e.errno) diff --git a/scripts/tunnelstat b/scripts/tunnelstat index 8b045ec684..3d7423e86b 100755 --- a/scripts/tunnelstat +++ b/scripts/tunnelstat @@ -6,7 +6,7 @@ # ##################################################################### -import _pickle as pickle +import json import argparse import datetime import sys @@ -29,7 +29,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, table_as_json, STATUS_NA, format_prate -from utilities_common.cli import UserCache +from utilities_common.cli import json_serial, UserCache from swsscommon.swsscommon import SonicV2Connector @@ -80,7 +80,7 @@ class Tunnelstat(object): counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data: fields[pos] = str(counter_data) - cntr = NStats._make(fields) + cntr = NStats._make(fields)._asdict() return cntr def get_rates(table_id): @@ -149,8 +149,8 @@ class Tunnelstat(object): continue rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) - table.append((key, data.rx_p_ok, data.rx_b_ok, format_prate(rates.rx_pps), - data.tx_p_ok, data.tx_b_ok, format_prate(rates.tx_pps))) + table.append((key, data['rx_p_ok'], data['rx_b_ok'], format_prate(rates.rx_pps), + data['tx_p_ok'], data['tx_b_ok'], format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -175,19 +175,19 @@ class Tunnelstat(object): rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) if old_cntr is not None: table.append((key, - ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), - ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), + ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), format_prate(rates.rx_pps), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), - ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok']), format_prate(rates.tx_pps))) else: table.append((key, - cntr.rx_p_ok, - cntr.rx_b_ok, + cntr['rx_p_ok'], + cntr['rx_b_ok'], format_prate(rates.rx_pps), - cntr.tx_p_ok, - cntr.tx_b_ok, + cntr['tx_p_ok'], + cntr['tx_b_ok'], format_prate(rates.tx_pps))) if use_json: print(table_as_json(table, header)) @@ -210,12 +210,12 @@ class Tunnelstat(object): if cnstat_old_dict: old_cntr = cnstat_old_dict.get(tunnel) if old_cntr: - body = body % (ns_diff(cntr.rx_p_ok, old_cntr.rx_p_ok), - ns_diff(cntr.rx_b_ok, old_cntr.rx_b_ok), - ns_diff(cntr.tx_p_ok, old_cntr.tx_p_ok), - ns_diff(cntr.tx_b_ok, old_cntr.tx_b_ok)) + body = body % (ns_diff(cntr['rx_p_ok'], old_cntr['rx_p_ok']), + ns_diff(cntr['rx_b_ok'], old_cntr['rx_b_ok']), + ns_diff(cntr['tx_p_ok'], old_cntr['tx_p_ok']), + ns_diff(cntr['tx_b_ok'], old_cntr['tx_b_ok'])) else: - body = body % (cntr.rx_p_ok, cntr.rx_b_ok, cntr.tx_p_ok, cntr.tx_b_ok) + body = body % (cntr['rx_p_ok'], cntr['rx_b_ok'], cntr['tx_p_ok'], cntr['tx_b_ok']) print(header) print(body) @@ -273,7 +273,7 @@ def main(): if save_fresh_stats: try: - pickle.dump(cnstat_dict, open(cnstat_fqn_file, 'wb')) + json.dump(cnstat_dict, open(cnstat_fqn_file, 'w'), default=json_serial) except IOError as e: sys.exit(e.errno) else: @@ -283,7 +283,7 @@ def main(): if wait_time_in_seconds == 0: if os.path.isfile(cnstat_fqn_file): try: - cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'rb')) + cnstat_cached_dict = json.load(open(cnstat_fqn_file, 'r')) print("Last cached time was " + str(cnstat_cached_dict.get('time'))) if tunnel_name: tunnelstat.cnstat_single_tunnel(tunnel_name, cnstat_dict, cnstat_cached_dict) diff --git a/tests/intfstat_test.py b/tests/intfstat_test.py index f76e54c7b5..fa3af5e2be 100644 --- a/tests/intfstat_test.py +++ b/tests/intfstat_test.py @@ -1,7 +1,7 @@ import sys import os import traceback - +import subprocess import show.main as show import clear.main as clear @@ -212,6 +212,28 @@ def test_alias_mode(self): assert interface in result_lines[i+2] os.environ["SONIC_CLI_IFACE_MODE"] = "default" + def test_clear_tag(self): + cmd0 = ["intfstat", "-c"] + subprocess.Popen(cmd0, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + + cmd1 = ["intfstat", "-c", '-t', 'test'] + subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + cmd2 = ["intfstat", '-t', 'test'] + p2 = subprocess.Popen(cmd2, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + output2 = p2.communicate()[0] + print(output2) + assert show_interfaces_counters_rif_clear in output2 + + cmd3 = ["intfstat", "-c", '-t', 'test'] + subprocess.Popen(cmd3, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + cmd4 = ["intfstat", '-t', 'test'] + p4 = subprocess.Popen(cmd4, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + output4 = p4.communicate()[0] + print(output4) + assert show_interfaces_counters_rif_clear in output4 + + show.run_command(["intfstat", "-D"]) + @classmethod def teardown_class(cls): print("TEARDOWN") From b316fc27f5b56d7c04dc093ad55db600c4e6e2a3 Mon Sep 17 00:00:00 2001 From: longhuan-cisco <84595962+longhuan-cisco@users.noreply.github.com> Date: Wed, 31 May 2023 22:01:34 -0700 Subject: [PATCH 23/35] Add transceiver status CLI to show output from TRANSCEIVER_STATUS table (#2772) * Add transceiver status CLI to display TRANSCEIVER_STATUS table for CMIS/C-CMIS * Fix typo in command-ref * Add tx_disable * Update tx_disabled_channel's discription * Remove redundant code * addressed comments * Fix typo for tx power low warning * Separate CMIS and CCMIS status map * code cleanup * Unify display format and update display order * Fix after merge from master --- doc/Command-Reference.md | 285 ++++++++++++++++++++++++++- scripts/sfpshow | 77 +++++++- show/interfaces/__init__.py | 23 +++ tests/mock_tables/state_db.json | 296 +++++++++++++++++++++++++++- tests/sfp_test.py | 337 ++++++++++++++++++++++++++++++++ tests/sfputil_test.py | 7 +- utilities_common/sfp_helper.py | 299 ++++++++++++++++++++++++++++ 7 files changed, 1319 insertions(+), 5 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 03c61f1bd4..89ac722b9b 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -931,7 +931,7 @@ This command displays information for all the interfaces for the transceiver req - Usage: ``` - show interfaces transceiver (eeprom [-d|--dom] | info | lpmode | presence | error-status [-hw|--fetch-from-hardware] | pm) [] + show interfaces transceiver (eeprom [-d|--dom] | info | lpmode | presence | error-status [-hw|--fetch-from-hardware] | pm | status) [] ``` - Example (Decode and display information stored on the EEPROM of SFP transceiver connected to Ethernet0): @@ -1060,6 +1060,289 @@ This command displays information for all the interfaces for the transceiver req EVM % 100.0 100.0 100.0 N/A N/A N/A N/A N/A N/A ``` +- Example (Display status info of SFP transceiver connected to Ethernet100): + ``` + admin@sonic:~$ show interfaces transceiver status Ethernet100 + Ethernet100: + Tx fault flag on media lane 1: False + Tx fault flag on media lane 2: False + Tx fault flag on media lane 3: False + Tx fault flag on media lane 4: False + Tx fault flag on media lane 5: False + Tx fault flag on media lane 6: False + Tx fault flag on media lane 7: False + Tx fault flag on media lane 8: False + Rx loss of signal flag on media lane 1: False + Rx loss of signal flag on media lane 2: False + Rx loss of signal flag on media lane 3: False + Rx loss of signal flag on media lane 4: False + Rx loss of signal flag on media lane 5: False + Rx loss of signal flag on media lane 6: False + Rx loss of signal flag on media lane 7: False + Rx loss of signal flag on media lane 8: False + TX disable status on lane 1: False + TX disable status on lane 2: False + TX disable status on lane 3: False + TX disable status on lane 4: False + TX disable status on lane 5: False + TX disable status on lane 6: False + TX disable status on lane 7: False + TX disable status on lane 8: False + Disabled TX channels: 0 + Current module state: ModuleReady + Reason of entering the module fault state: No Fault detected + Datapath firmware fault: False + Module firmware fault: False + Module state changed: False + Data path state indicator on host lane 1: DataPathActivated + Data path state indicator on host lane 2: DataPathActivated + Data path state indicator on host lane 3: DataPathActivated + Data path state indicator on host lane 4: DataPathActivated + Data path state indicator on host lane 5: DataPathActivated + Data path state indicator on host lane 6: DataPathActivated + Data path state indicator on host lane 7: DataPathActivated + Data path state indicator on host lane 8: DataPathActivated + Tx output status on media lane 1: False + Tx output status on media lane 2: False + Tx output status on media lane 3: False + Tx output status on media lane 4: False + Tx output status on media lane 5: False + Tx output status on media lane 6: False + Tx output status on media lane 7: False + Tx output status on media lane 8: False + Rx output status on host lane 1: True + Rx output status on host lane 2: True + Rx output status on host lane 3: True + Rx output status on host lane 4: True + Rx output status on host lane 5: True + Rx output status on host lane 6: True + Rx output status on host lane 7: True + Rx output status on host lane 8: True + Tx loss of signal flag on host lane 1: False + Tx loss of signal flag on host lane 2: False + Tx loss of signal flag on host lane 3: False + Tx loss of signal flag on host lane 4: False + Tx loss of signal flag on host lane 5: False + Tx loss of signal flag on host lane 6: False + Tx loss of signal flag on host lane 7: False + Tx loss of signal flag on host lane 8: False + Tx clock and data recovery loss of lock on host lane 1: False + Tx clock and data recovery loss of lock on host lane 2: False + Tx clock and data recovery loss of lock on host lane 3: False + Tx clock and data recovery loss of lock on host lane 4: False + Tx clock and data recovery loss of lock on host lane 5: False + Tx clock and data recovery loss of lock on host lane 6: False + Tx clock and data recovery loss of lock on host lane 7: False + Tx clock and data recovery loss of lock on host lane 8: False + Rx clock and data recovery loss of lock on media lane 1: False + Rx clock and data recovery loss of lock on media lane 2: False + Rx clock and data recovery loss of lock on media lane 3: False + Rx clock and data recovery loss of lock on media lane 4: False + Rx clock and data recovery loss of lock on media lane 5: False + Rx clock and data recovery loss of lock on media lane 6: False + Rx clock and data recovery loss of lock on media lane 7: False + Rx clock and data recovery loss of lock on media lane 8: False + Configuration status for the data path of host line 1: ConfigSuccess + Configuration status for the data path of host line 2: ConfigSuccess + Configuration status for the data path of host line 3: ConfigSuccess + Configuration status for the data path of host line 4: ConfigSuccess + Configuration status for the data path of host line 5: ConfigSuccess + Configuration status for the data path of host line 6: ConfigSuccess + Configuration status for the data path of host line 7: ConfigSuccess + Configuration status for the data path of host line 8: ConfigSuccess + Data path configuration updated on host lane 1: False + Data path configuration updated on host lane 2: False + Data path configuration updated on host lane 3: False + Data path configuration updated on host lane 4: False + Data path configuration updated on host lane 5: False + Data path configuration updated on host lane 6: False + Data path configuration updated on host lane 7: False + Data path configuration updated on host lane 8: False + Temperature high alarm flag: False + Temperature high warning flag: False + Temperature low warning flag: False + Temperature low alarm flag: False + Vcc high alarm flag: False + Vcc high warning flag: False + Vcc low warning flag: False + Vcc low alarm flag: False + Tx power high alarm flag on lane 1: False + Tx power high alarm flag on lane 2: False + Tx power high alarm flag on lane 3: False + Tx power high alarm flag on lane 4: False + Tx power high alarm flag on lane 5: False + Tx power high alarm flag on lane 6: False + Tx power high alarm flag on lane 7: False + Tx power high alarm flag on lane 8: False + Tx power high warning flag on lane 1: False + Tx power high warning flag on lane 2: False + Tx power high warning flag on lane 3: False + Tx power high warning flag on lane 4: False + Tx power high warning flag on lane 5: False + Tx power high warning flag on lane 6: False + Tx power high warning flag on lane 7: False + Tx power high warning flag on lane 8: False + Tx power low warning flag on lane 1: False + Tx power low warning flag on lane 2: False + Tx power low warning flag on lane 3: False + Tx power low warning flag on lane 4: False + Tx power low warning flag on lane 5: False + Tx power low warning flag on lane 6: False + Tx power low warning flag on lane 7: False + Tx power low warning flag on lane 8: False + Tx power low alarm flag on lane 1: False + Tx power low alarm flag on lane 2: False + Tx power low alarm flag on lane 3: False + Tx power low alarm flag on lane 4: False + Tx power low alarm flag on lane 5: False + Tx power low alarm flag on lane 6: False + Tx power low alarm flag on lane 7: False + Tx power low alarm flag on lane 8: False + Rx power high alarm flag on lane 1: False + Rx power high alarm flag on lane 2: False + Rx power high alarm flag on lane 3: False + Rx power high alarm flag on lane 4: False + Rx power high alarm flag on lane 5: False + Rx power high alarm flag on lane 6: False + Rx power high alarm flag on lane 7: False + Rx power high alarm flag on lane 8: False + Rx power high warning flag on lane 1: False + Rx power high warning flag on lane 2: False + Rx power high warning flag on lane 3: False + Rx power high warning flag on lane 4: False + Rx power high warning flag on lane 5: False + Rx power high warning flag on lane 6: False + Rx power high warning flag on lane 7: False + Rx power high warning flag on lane 8: False + Rx power low warning flag on lane 1: False + Rx power low warning flag on lane 2: False + Rx power low warning flag on lane 3: False + Rx power low warning flag on lane 4: False + Rx power low warning flag on lane 5: False + Rx power low warning flag on lane 6: False + Rx power low warning flag on lane 7: False + Rx power low warning flag on lane 8: False + Rx power low alarm flag on lane 1: False + Rx power low alarm flag on lane 2: False + Rx power low alarm flag on lane 3: False + Rx power low alarm flag on lane 4: False + Rx power low alarm flag on lane 5: False + Rx power low alarm flag on lane 6: False + Rx power low alarm flag on lane 7: False + Rx power low alarm flag on lane 8: False + Tx bias high alarm flag on lane 1: False + Tx bias high alarm flag on lane 2: False + Tx bias high alarm flag on lane 3: False + Tx bias high alarm flag on lane 4: False + Tx bias high alarm flag on lane 5: False + Tx bias high alarm flag on lane 6: False + Tx bias high alarm flag on lane 7: False + Tx bias high alarm flag on lane 8: False + Tx bias high warning flag on lane 1: False + Tx bias high warning flag on lane 2: False + Tx bias high warning flag on lane 3: False + Tx bias high warning flag on lane 4: False + Tx bias high warning flag on lane 5: False + Tx bias high warning flag on lane 6: False + Tx bias high warning flag on lane 7: False + Tx bias high warning flag on lane 8: False + Tx bias low warning flag on lane 1: False + Tx bias low warning flag on lane 2: False + Tx bias low warning flag on lane 3: False + Tx bias low warning flag on lane 4: False + Tx bias low warning flag on lane 5: False + Tx bias low warning flag on lane 6: False + Tx bias low warning flag on lane 7: False + Tx bias low warning flag on lane 8: False + Tx bias low alarm flag on lane 1: False + Tx bias low alarm flag on lane 2: False + Tx bias low alarm flag on lane 3: False + Tx bias low alarm flag on lane 4: False + Tx bias low alarm flag on lane 5: False + Tx bias low alarm flag on lane 6: False + Tx bias low alarm flag on lane 7: False + Tx bias low alarm flag on lane 8: False + Laser temperature high alarm flag: False + Laser temperature high warning flag: False + Laser temperature low warning flag: False + Laser temperature low alarm flag: False + Prefec ber high alarm flag: False + Prefec ber high warning flag: False + Prefec ber low warning flag: False + Prefec ber low alarm flag: False + Postfec ber high alarm flag: False + Postfec ber high warning flag: False + Postfec ber low warning flag: False + Postfec ber low alarm flag: False + Tuning in progress status: False + Laser unlocked status: False + Target output power out of range flag: False + Fine tuning out of range flag: False + Tuning not accepted flag: False + Invalid channel number flag: False + Tuning complete flag: False + Bias xi high alarm flag: False + Bias xi high warning flag: False + Bias xi low warning flag: False + Bias xi low alarm flag: False + Bias xq high alarm flag: False + Bias xq high warning flag: False + Bias xq low warning flag: False + Bias xq low alarm flag: False + Bias xp high alarm flag: False + Bias xp high warning flag: False + Bias xp low warning flag: False + Bias xp low alarm flag: False + Bias yi high alarm flag: False + Bias yi high warning flag: False + Bias yi low warning flag: False + Bias yi low alarm flag: False + Bias yq high alarm flag: False + Bias yq high warning flag: False + Bias yq low warning flag: False + Bias yq low alarm flag: False + Bias yp high alarm flag: False + Bias yp high warning flag: False + Bias yp low warning flag: False + Bias yp low alarm flag: False + CD short high alarm flag: False + CD short high warning flag: False + CD short low warning flag: False + CD short low alarm flag: False + CD long high alarm flag: False + CD long high warning flag: False + CD long low warning flag: False + CD long low alarm flag: False + DGD high alarm flag: False + DGD high warning flag: False + DGD low warning flag: False + DGD low alarm flag: False + PDL high alarm flag: False + PDL high warning flag: False + PDL low warning flag: False + PDL low alarm flag: False + OSNR high alarm flag: False + OSNR high warning flag: False + OSNR low warning flag: False + OSNR low alarm flag: False + ESNR high alarm flag: False + ESNR high warning flag: False + ESNR low warning flag: False + ESNR low alarm flag: False + CFO high alarm flag: False + CFO high warning flag: False + CFO low warning flag: False + CFO low alarm flag: False + Txcurrpower high alarm flag: False + Txcurrpower high warning flag: False + Txcurrpower low warning flag: False + Txcurrpower low alarm flag: False + Rxtotpower high alarm flag: False + Rxtotpower high warning flag: False + Rxtotpower low warning flag: False + Rxtotpower low alarm flag: False + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#basic-show-commands) ## AAA & TACACS+ diff --git a/scripts/sfpshow b/scripts/sfpshow index ac0adf5c6e..5ed1390123 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -17,7 +17,13 @@ from natsort import natsorted from sonic_py_common.interface import front_panel_prefix, backplane_prefix, inband_prefix, recirc_prefix from sonic_py_common import multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string -from utilities_common.sfp_helper import QSFP_DATA_MAP, CMIS_DATA_MAP +from utilities_common.sfp_helper import ( + QSFP_DATA_MAP, + CMIS_DATA_MAP, + QSFP_STATUS_MAP, + CMIS_STATUS_MAP, + CCMIS_STATUS_MAP, +) from tabulate import tabulate # Mock the redis DB for unit test purposes @@ -233,6 +239,8 @@ ZR_PM_INFO_MAP = { ZR_PM_NOT_APPLICABLE_STR = 'Transceiver performance monitoring not applicable' +QSFP_STATUS_NOT_APPLICABLE_STR = 'Transceiver status info not applicable' + def display_invalid_intf_eeprom(intf_name): output = intf_name + ': SFP EEPROM Not detected\n' click.echo(output) @@ -249,6 +257,10 @@ def display_invalid_intf_pm(intf_name): output = intf_name + ': %s\n' % ZR_PM_NOT_APPLICABLE_STR click.echo(output) +def display_invalid_intf_status(intf_name): + output = intf_name + ': %s\n' % QSFP_STATUS_NOT_APPLICABLE_STR + click.echo(output) + class SFPShow(object): def __init__(self, intf_name, namespace_option, dump_dom=False): super(SFPShow, self).__init__() @@ -258,6 +270,7 @@ class SFPShow(object): self.table = [] self.intf_eeprom: Dict[str, str] = {} self.intf_pm: Dict[str, str] = {} + self.intf_status: Dict[str, str] = {} self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace_option) # Convert dict values to cli output string @@ -280,6 +293,17 @@ class SFPShow(object): units) return output + # Convert sfp status in DB to cli output string + def convert_sfp_status_to_output_string(self, sfp_status_dict, status_map): + indent = ' ' * 8 + output = '' + for key in status_map.keys(): + if key not in sfp_status_dict: + continue + output += '{}{}: {}\n'.format(indent, status_map[key], sfp_status_dict[key]) + + return output + # Convert sfp info in DB to cli output string def convert_sfp_info_to_output_string(self, sfp_info_dict): indent = ' ' * 8 @@ -439,6 +463,22 @@ class SFPShow(object): return output + # Convert sfp status info in DB to cli output string + def convert_interface_sfp_status_to_cli_output_string(self, state_db, interface_name): + sfp_status_dict = state_db.get_all(state_db.STATE_DB, 'TRANSCEIVER_STATUS|{}'.format(interface_name)) + if sfp_status_dict and len(sfp_status_dict) > 2: + # common section + output = '\n' + self.convert_sfp_status_to_output_string(sfp_status_dict, QSFP_STATUS_MAP) + # CMIS specific section + if 'module_state' in sfp_status_dict: + output += self.convert_sfp_status_to_output_string(sfp_status_dict, CMIS_STATUS_MAP) + # C-CMIS specific section + if 'tuning_in_progress' in sfp_status_dict: + output += self.convert_sfp_status_to_output_string(sfp_status_dict, CCMIS_STATUS_MAP) + else: + output = QSFP_STATUS_NOT_APPLICABLE_STR + '\n' + return output + def convert_pm_prefix_to_threshold_prefix(self, pm_prefix): if pm_prefix == 'uncorr_frames': return 'postfecber' @@ -551,6 +591,19 @@ class SFPShow(object): self.intf_pm[interface] = self.convert_interface_sfp_pm_to_cli_output_string( self.db, interface) + @multi_asic_util.run_on_multi_asic + def get_status(self): + if self.intf_name is not None: + self.intf_status[self.intf_name] = self.convert_interface_sfp_status_to_cli_output_string( + self.db, self.intf_name) + else: + port_table_keys = self.db.keys(self.db.APPL_DB, "PORT_TABLE:*") + for i in port_table_keys: + interface = re.split(':', i, maxsplit=1)[-1].strip() + if interface and interface.startswith(front_panel_prefix()) and not interface.startswith((backplane_prefix(), inband_prefix(), recirc_prefix())): + self.intf_status[interface] = self.convert_interface_sfp_status_to_cli_output_string( + self.db, interface) + def display_eeprom(self): click.echo("\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_eeprom.items())])) @@ -562,6 +615,10 @@ class SFPShow(object): def display_pm(self): click.echo( "\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_pm.items())])) + + def display_status(self): + click.echo( + "\n".join([f"{k}: {v}" for k, v in natsorted(self.intf_status.items())])) # This is our main entrypoint - the main 'sfpshow' command @@ -642,6 +699,24 @@ def pm(port, namespace): sfp.get_pm() sfp.display_pm() +# 'pm' subcommand + + +@cli.command() +@click.option('-p', '--port', metavar='', help="Display SFP status for port only") +@click.option('-n', '--namespace', default=None, help="Display interfaces for specific namespace") +def status(port, namespace): + if port and multi_asic.is_multi_asic() and namespace is None: + try: + namespace = multi_asic.get_namespace_for_port(port) + except Exception: + display_invalid_intf_status(port) + sys.exit(1) + + sfp = SFPShow(port, namespace) + sfp.get_status() + sfp.display_status() + if __name__ == "__main__": cli() diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index c376afe71e..5aeaef6bf5 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -469,6 +469,29 @@ def pm(interfacename, namespace, verbose): clicommon.run_command(cmd, display_cmd=verbose) +@transceiver.command('status') # 'status' is the actual sub-command name under 'transceiver' command +@click.argument('interfacename', required=False) +@click.option('--namespace', '-n', 'namespace', default=None, show_default=True, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), help='Namespace name or all') +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def transceiver_status(interfacename, namespace, verbose): + """Show interface transceiver status information""" + + ctx = click.get_current_context() + + cmd = ['sfpshow', 'status'] + + if interfacename is not None: + interfacename = try_convert_interfacename_from_alias( + ctx, interfacename) + + cmd += ['-p', str(interfacename)] + + if namespace is not None: + cmd += ['-n', str(namespace)] + + clicommon.run_command(cmd, display_cmd=verbose) + @transceiver.command() @click.argument('interfacename', required=False) @click.option('--namespace', '-n', 'namespace', default=None, show_default=True, diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index cd1a194ba8..883a2b36cc 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -373,6 +373,287 @@ "rx_sig_power_min": "-40", "rx_sig_power_max": "40" }, + "TRANSCEIVER_STATUS|Ethernet44":{ + "DP1State": "DataPathActivated", + "DP2State": "DataPathActivated", + "DP3State": "DataPathActivated", + "DP4State": "DataPathActivated", + "DP5State": "DataPathActivated", + "DP6State": "DataPathActivated", + "DP7State": "DataPathActivated", + "DP8State": "DataPathActivated", + "biasxihighalarm_flag": "False", + "biasxihighwarning_flag": "False", + "biasxilowalarm_flag": "False", + "biasxilowwarning_flag": "False", + "biasxphighalarm_flag": "False", + "biasxphighwarning_flag": "False", + "biasxplowalarm_flag": "False", + "biasxplowwarning_flag": "False", + "biasxqhighalarm_flag": "False", + "biasxqhighwarning_flag": "False", + "biasxqlowalarm_flag": "False", + "biasxqlowwarning_flag": "False", + "biasyihighalarm_flag": "False", + "biasyihighwarning_flag": "False", + "biasyilowalarm_flag": "False", + "biasyilowwarning_flag": "False", + "biasyphighalarm_flag": "False", + "biasyphighwarning_flag": "False", + "biasyplowalarm_flag": "False", + "biasyplowwarning_flag": "False", + "biasyqhighalarm_flag": "False", + "biasyqhighwarning_flag": "False", + "biasyqlowalarm_flag": "False", + "biasyqlowwarning_flag": "False", + "cdlonghighalarm_flag": "False", + "cdlonghighwarning_flag": "False", + "cdlonglowalarm_flag": "False", + "cdlonglowwarning_flag": "False", + "cdshorthighalarm_flag": "False", + "cdshorthighwarning_flag": "False", + "cdshortlowalarm_flag": "False", + "cdshortlowwarning_flag": "False", + "cfohighalarm_flag": "False", + "cfohighwarning_flag": "False", + "cfolowalarm_flag": "False", + "cfolowwarning_flag": "False", + "config_state_hostlane1": "ConfigSuccess", + "config_state_hostlane2": "ConfigSuccess", + "config_state_hostlane3": "ConfigSuccess", + "config_state_hostlane4": "ConfigSuccess", + "config_state_hostlane5": "ConfigSuccess", + "config_state_hostlane6": "ConfigSuccess", + "config_state_hostlane7": "ConfigSuccess", + "config_state_hostlane8": "ConfigSuccess", + "datapath_firmware_fault": "False", + "dgdhighalarm_flag": "False", + "dgdhighwarning_flag": "False", + "dgdlowalarm_flag": "False", + "dgdlowwarning_flag": "False", + "dpinit_pending_hostlane1": "False", + "dpinit_pending_hostlane2": "False", + "dpinit_pending_hostlane3": "False", + "dpinit_pending_hostlane4": "False", + "dpinit_pending_hostlane5": "False", + "dpinit_pending_hostlane6": "False", + "dpinit_pending_hostlane7": "False", + "dpinit_pending_hostlane8": "False", + "error": "N/A", + "esnrhighalarm_flag": "False", + "esnrhighwarning_flag": "False", + "esnrlowalarm_flag": "False", + "esnrlowwarning_flag": "False", + "fine_tuning_oor": "False", + "invalid_channel_num": "False", + "lasertemphighalarm_flag": "False", + "lasertemphighwarning_flag": "False", + "lasertemplowalarm_flag": "False", + "lasertemplowwarning_flag": "False", + "module_fault_cause": "No Fault detected", + "module_firmware_fault": "False", + "module_state": "ModuleReady", + "module_state_changed": "False", + "osnrhighalarm_flag": "False", + "osnrhighwarning_flag": "False", + "osnrlowalarm_flag": "False", + "osnrlowwarning_flag": "False", + "pdlhighalarm_flag": "False", + "pdlhighwarning_flag": "False", + "pdllowalarm_flag": "False", + "pdllowwarning_flag": "False", + "postfecberhighalarm_flag": "False", + "postfecberhighwarning_flag": "False", + "postfecberlowalarm_flag": "False", + "postfecberlowwarning_flag": "False", + "prefecberhighalarm_flag": "False", + "prefecberhighwarning_flag": "False", + "prefecberlowalarm_flag": "False", + "prefecberlowwarning_flag": "False", + "rxcdrlol1": "False", + "rxcdrlol2": "False", + "rxcdrlol3": "False", + "rxcdrlol4": "False", + "rxcdrlol5": "False", + "rxcdrlol6": "False", + "rxcdrlol7": "False", + "rxcdrlol8": "False", + "rxlos1": "False", + "rxlos2": "False", + "rxlos3": "False", + "rxlos4": "False", + "rxlos5": "False", + "rxlos6": "False", + "rxlos7": "False", + "rxlos8": "False", + "rxoutput_status_hostlane1": "True", + "rxoutput_status_hostlane2": "True", + "rxoutput_status_hostlane3": "True", + "rxoutput_status_hostlane4": "True", + "rxoutput_status_hostlane5": "True", + "rxoutput_status_hostlane6": "True", + "rxoutput_status_hostlane7": "True", + "rxoutput_status_hostlane8": "True", + "rxpowerhighalarm_flag1": "False", + "rxpowerhighalarm_flag2": "False", + "rxpowerhighalarm_flag3": "False", + "rxpowerhighalarm_flag4": "False", + "rxpowerhighalarm_flag5": "False", + "rxpowerhighalarm_flag6": "False", + "rxpowerhighalarm_flag7": "False", + "rxpowerhighalarm_flag8": "False", + "rxpowerhighwarning_flag1": "False", + "rxpowerhighwarning_flag2": "False", + "rxpowerhighwarning_flag3": "False", + "rxpowerhighwarning_flag4": "False", + "rxpowerhighwarning_flag5": "False", + "rxpowerhighwarning_flag6": "False", + "rxpowerhighwarning_flag7": "False", + "rxpowerhighwarning_flag8": "False", + "rxpowerlowalarm_flag1": "False", + "rxpowerlowalarm_flag2": "False", + "rxpowerlowalarm_flag3": "False", + "rxpowerlowalarm_flag4": "False", + "rxpowerlowalarm_flag5": "False", + "rxpowerlowalarm_flag6": "False", + "rxpowerlowalarm_flag7": "False", + "rxpowerlowalarm_flag8": "False", + "rxpowerlowwarning_flag1": "False", + "rxpowerlowwarning_flag2": "False", + "rxpowerlowwarning_flag3": "False", + "rxpowerlowwarning_flag4": "False", + "rxpowerlowwarning_flag5": "False", + "rxpowerlowwarning_flag6": "False", + "rxpowerlowwarning_flag7": "False", + "rxpowerlowwarning_flag8": "False", + "rxtotpowerhighalarm_flag": "False", + "rxtotpowerhighwarning_flag": "False", + "rxtotpowerlowalarm_flag": "False", + "rxtotpowerlowwarning_flag": "False", + "status": "1", + "target_output_power_oor": "False", + "temphighalarm_flag": "False", + "temphighwarning_flag": "False", + "templowalarm_flag": "False", + "templowwarning_flag": "False", + "tuning_complete": "False", + "tuning_in_progress": "False", + "tuning_not_accepted": "False", + "txbiashighalarm_flag1": "False", + "txbiashighalarm_flag2": "False", + "txbiashighalarm_flag3": "False", + "txbiashighalarm_flag4": "False", + "txbiashighalarm_flag5": "False", + "txbiashighalarm_flag6": "False", + "txbiashighalarm_flag7": "False", + "txbiashighalarm_flag8": "False", + "txbiashighwarning_flag1": "False", + "txbiashighwarning_flag2": "False", + "txbiashighwarning_flag3": "False", + "txbiashighwarning_flag4": "False", + "txbiashighwarning_flag5": "False", + "txbiashighwarning_flag6": "False", + "txbiashighwarning_flag7": "False", + "txbiashighwarning_flag8": "False", + "txbiaslowalarm_flag1": "False", + "txbiaslowalarm_flag2": "False", + "txbiaslowalarm_flag3": "False", + "txbiaslowalarm_flag4": "False", + "txbiaslowalarm_flag5": "False", + "txbiaslowalarm_flag6": "False", + "txbiaslowalarm_flag7": "False", + "txbiaslowalarm_flag8": "False", + "txbiaslowwarning_flag1": "False", + "txbiaslowwarning_flag2": "False", + "txbiaslowwarning_flag3": "False", + "txbiaslowwarning_flag4": "False", + "txbiaslowwarning_flag5": "False", + "txbiaslowwarning_flag6": "False", + "txbiaslowwarning_flag7": "False", + "txbiaslowwarning_flag8": "False", + "txcdrlol_hostlane1": "False", + "txcdrlol_hostlane2": "False", + "txcdrlol_hostlane3": "False", + "txcdrlol_hostlane4": "False", + "txcdrlol_hostlane5": "False", + "txcdrlol_hostlane6": "False", + "txcdrlol_hostlane7": "False", + "txcdrlol_hostlane8": "False", + "txcurrpowerhighalarm_flag": "False", + "txcurrpowerhighwarning_flag": "False", + "txcurrpowerlowalarm_flag": "False", + "txcurrpowerlowwarning_flag": "False", + "tx_disabled_channel": "0", + "tx1disable": "False", + "tx2disable": "False", + "tx3disable": "False", + "tx4disable": "False", + "tx5disable": "False", + "tx6disable": "False", + "tx7disable": "False", + "tx8disable": "False", + "txfault1": "False", + "txfault2": "False", + "txfault3": "False", + "txfault4": "False", + "txfault5": "False", + "txfault6": "False", + "txfault7": "False", + "txfault8": "False", + "txlos_hostlane1": "False", + "txlos_hostlane2": "False", + "txlos_hostlane3": "False", + "txlos_hostlane4": "False", + "txlos_hostlane5": "False", + "txlos_hostlane6": "False", + "txlos_hostlane7": "False", + "txlos_hostlane8": "False", + "txoutput_status1": "False", + "txoutput_status2": "False", + "txoutput_status3": "False", + "txoutput_status4": "False", + "txoutput_status5": "False", + "txoutput_status6": "False", + "txoutput_status7": "False", + "txoutput_status8": "False", + "txpowerhighalarm_flag1": "False", + "txpowerhighalarm_flag2": "False", + "txpowerhighalarm_flag3": "False", + "txpowerhighalarm_flag4": "False", + "txpowerhighalarm_flag5": "False", + "txpowerhighalarm_flag6": "False", + "txpowerhighalarm_flag7": "False", + "txpowerhighalarm_flag8": "False", + "txpowerhighwarning_flag1": "False", + "txpowerhighwarning_flag2": "False", + "txpowerhighwarning_flag3": "False", + "txpowerhighwarning_flag4": "False", + "txpowerhighwarning_flag5": "False", + "txpowerhighwarning_flag6": "False", + "txpowerhighwarning_flag7": "False", + "txpowerhighwarning_flag8": "False", + "txpowerlowalarm_flag1": "False", + "txpowerlowalarm_flag2": "False", + "txpowerlowalarm_flag3": "False", + "txpowerlowalarm_flag4": "False", + "txpowerlowalarm_flag5": "False", + "txpowerlowalarm_flag6": "False", + "txpowerlowalarm_flag7": "False", + "txpowerlowalarm_flag8": "False", + "txpowerlowwarning_flag1": "False", + "txpowerlowwarning_flag2": "False", + "txpowerlowwarning_flag3": "False", + "txpowerlowwarning_flag4": "False", + "txpowerlowwarning_flag5": "False", + "txpowerlowwarning_flag6": "False", + "txpowerlowwarning_flag7": "False", + "txpowerlowwarning_flag8": "False", + "vcchighalarm_flag": "False", + "vcchighwarning_flag": "False", + "vcclowalarm_flag": "False", + "vcclowwarning_flag": "False", + "wavelength_unlock_status": "False" + }, "TRANSCEIVER_INFO|Ethernet64": { "type" : "QSFP-DD Double Density 8X Pluggable Transceiver", "hardware_rev" : "X.X", @@ -416,7 +697,20 @@ }, "TRANSCEIVER_STATUS|Ethernet4": { "status": "1", - "error": "N/A" + "error": "N/A", + "rxlos1": "False", + "rxlos2": "False", + "rxlos3": "False", + "rxlos4": "False", + "txfault1": "False", + "txfault2": "False", + "txfault3": "False", + "txfault4": "False", + "tx1disable": "False", + "tx2disable": "False", + "tx3disable": "False", + "tx4disable": "False", + "tx_disabled_channel": "0" }, "TRANSCEIVER_STATUS|Ethernet8": { "status": "0", diff --git a/tests/sfp_test.py b/tests/sfp_test.py index 4900071898..f509c04748 100644 --- a/tests/sfp_test.py +++ b/tests/sfp_test.py @@ -216,6 +216,304 @@ EVM % 100.0 100.0 100.0 N/A N/A N/A N/A N/A N/A """ +test_qsfp_status_output = """\ +Ethernet4: + Tx fault flag on media lane 1: False + Tx fault flag on media lane 2: False + Tx fault flag on media lane 3: False + Tx fault flag on media lane 4: False + Rx loss of signal flag on media lane 1: False + Rx loss of signal flag on media lane 2: False + Rx loss of signal flag on media lane 3: False + Rx loss of signal flag on media lane 4: False + TX disable status on lane 1: False + TX disable status on lane 2: False + TX disable status on lane 3: False + TX disable status on lane 4: False + Disabled TX channels: 0 +""" + +test_qsfp_dd_status_output = """\ +Ethernet44: + Tx fault flag on media lane 1: False + Tx fault flag on media lane 2: False + Tx fault flag on media lane 3: False + Tx fault flag on media lane 4: False + Tx fault flag on media lane 5: False + Tx fault flag on media lane 6: False + Tx fault flag on media lane 7: False + Tx fault flag on media lane 8: False + Rx loss of signal flag on media lane 1: False + Rx loss of signal flag on media lane 2: False + Rx loss of signal flag on media lane 3: False + Rx loss of signal flag on media lane 4: False + Rx loss of signal flag on media lane 5: False + Rx loss of signal flag on media lane 6: False + Rx loss of signal flag on media lane 7: False + Rx loss of signal flag on media lane 8: False + TX disable status on lane 1: False + TX disable status on lane 2: False + TX disable status on lane 3: False + TX disable status on lane 4: False + TX disable status on lane 5: False + TX disable status on lane 6: False + TX disable status on lane 7: False + TX disable status on lane 8: False + Disabled TX channels: 0 + Current module state: ModuleReady + Reason of entering the module fault state: No Fault detected + Datapath firmware fault: False + Module firmware fault: False + Module state changed: False + Data path state indicator on host lane 1: DataPathActivated + Data path state indicator on host lane 2: DataPathActivated + Data path state indicator on host lane 3: DataPathActivated + Data path state indicator on host lane 4: DataPathActivated + Data path state indicator on host lane 5: DataPathActivated + Data path state indicator on host lane 6: DataPathActivated + Data path state indicator on host lane 7: DataPathActivated + Data path state indicator on host lane 8: DataPathActivated + Tx output status on media lane 1: False + Tx output status on media lane 2: False + Tx output status on media lane 3: False + Tx output status on media lane 4: False + Tx output status on media lane 5: False + Tx output status on media lane 6: False + Tx output status on media lane 7: False + Tx output status on media lane 8: False + Rx output status on host lane 1: True + Rx output status on host lane 2: True + Rx output status on host lane 3: True + Rx output status on host lane 4: True + Rx output status on host lane 5: True + Rx output status on host lane 6: True + Rx output status on host lane 7: True + Rx output status on host lane 8: True + Tx loss of signal flag on host lane 1: False + Tx loss of signal flag on host lane 2: False + Tx loss of signal flag on host lane 3: False + Tx loss of signal flag on host lane 4: False + Tx loss of signal flag on host lane 5: False + Tx loss of signal flag on host lane 6: False + Tx loss of signal flag on host lane 7: False + Tx loss of signal flag on host lane 8: False + Tx clock and data recovery loss of lock on host lane 1: False + Tx clock and data recovery loss of lock on host lane 2: False + Tx clock and data recovery loss of lock on host lane 3: False + Tx clock and data recovery loss of lock on host lane 4: False + Tx clock and data recovery loss of lock on host lane 5: False + Tx clock and data recovery loss of lock on host lane 6: False + Tx clock and data recovery loss of lock on host lane 7: False + Tx clock and data recovery loss of lock on host lane 8: False + Rx clock and data recovery loss of lock on media lane 1: False + Rx clock and data recovery loss of lock on media lane 2: False + Rx clock and data recovery loss of lock on media lane 3: False + Rx clock and data recovery loss of lock on media lane 4: False + Rx clock and data recovery loss of lock on media lane 5: False + Rx clock and data recovery loss of lock on media lane 6: False + Rx clock and data recovery loss of lock on media lane 7: False + Rx clock and data recovery loss of lock on media lane 8: False + Configuration status for the data path of host line 1: ConfigSuccess + Configuration status for the data path of host line 2: ConfigSuccess + Configuration status for the data path of host line 3: ConfigSuccess + Configuration status for the data path of host line 4: ConfigSuccess + Configuration status for the data path of host line 5: ConfigSuccess + Configuration status for the data path of host line 6: ConfigSuccess + Configuration status for the data path of host line 7: ConfigSuccess + Configuration status for the data path of host line 8: ConfigSuccess + Data path configuration updated on host lane 1: False + Data path configuration updated on host lane 2: False + Data path configuration updated on host lane 3: False + Data path configuration updated on host lane 4: False + Data path configuration updated on host lane 5: False + Data path configuration updated on host lane 6: False + Data path configuration updated on host lane 7: False + Data path configuration updated on host lane 8: False + Temperature high alarm flag: False + Temperature high warning flag: False + Temperature low warning flag: False + Temperature low alarm flag: False + Vcc high alarm flag: False + Vcc high warning flag: False + Vcc low warning flag: False + Vcc low alarm flag: False + Tx power high alarm flag on lane 1: False + Tx power high alarm flag on lane 2: False + Tx power high alarm flag on lane 3: False + Tx power high alarm flag on lane 4: False + Tx power high alarm flag on lane 5: False + Tx power high alarm flag on lane 6: False + Tx power high alarm flag on lane 7: False + Tx power high alarm flag on lane 8: False + Tx power high warning flag on lane 1: False + Tx power high warning flag on lane 2: False + Tx power high warning flag on lane 3: False + Tx power high warning flag on lane 4: False + Tx power high warning flag on lane 5: False + Tx power high warning flag on lane 6: False + Tx power high warning flag on lane 7: False + Tx power high warning flag on lane 8: False + Tx power low warning flag on lane 1: False + Tx power low warning flag on lane 2: False + Tx power low warning flag on lane 3: False + Tx power low warning flag on lane 4: False + Tx power low warning flag on lane 5: False + Tx power low warning flag on lane 6: False + Tx power low warning flag on lane 7: False + Tx power low warning flag on lane 8: False + Tx power low alarm flag on lane 1: False + Tx power low alarm flag on lane 2: False + Tx power low alarm flag on lane 3: False + Tx power low alarm flag on lane 4: False + Tx power low alarm flag on lane 5: False + Tx power low alarm flag on lane 6: False + Tx power low alarm flag on lane 7: False + Tx power low alarm flag on lane 8: False + Rx power high alarm flag on lane 1: False + Rx power high alarm flag on lane 2: False + Rx power high alarm flag on lane 3: False + Rx power high alarm flag on lane 4: False + Rx power high alarm flag on lane 5: False + Rx power high alarm flag on lane 6: False + Rx power high alarm flag on lane 7: False + Rx power high alarm flag on lane 8: False + Rx power high warning flag on lane 1: False + Rx power high warning flag on lane 2: False + Rx power high warning flag on lane 3: False + Rx power high warning flag on lane 4: False + Rx power high warning flag on lane 5: False + Rx power high warning flag on lane 6: False + Rx power high warning flag on lane 7: False + Rx power high warning flag on lane 8: False + Rx power low warning flag on lane 1: False + Rx power low warning flag on lane 2: False + Rx power low warning flag on lane 3: False + Rx power low warning flag on lane 4: False + Rx power low warning flag on lane 5: False + Rx power low warning flag on lane 6: False + Rx power low warning flag on lane 7: False + Rx power low warning flag on lane 8: False + Rx power low alarm flag on lane 1: False + Rx power low alarm flag on lane 2: False + Rx power low alarm flag on lane 3: False + Rx power low alarm flag on lane 4: False + Rx power low alarm flag on lane 5: False + Rx power low alarm flag on lane 6: False + Rx power low alarm flag on lane 7: False + Rx power low alarm flag on lane 8: False + Tx bias high alarm flag on lane 1: False + Tx bias high alarm flag on lane 2: False + Tx bias high alarm flag on lane 3: False + Tx bias high alarm flag on lane 4: False + Tx bias high alarm flag on lane 5: False + Tx bias high alarm flag on lane 6: False + Tx bias high alarm flag on lane 7: False + Tx bias high alarm flag on lane 8: False + Tx bias high warning flag on lane 1: False + Tx bias high warning flag on lane 2: False + Tx bias high warning flag on lane 3: False + Tx bias high warning flag on lane 4: False + Tx bias high warning flag on lane 5: False + Tx bias high warning flag on lane 6: False + Tx bias high warning flag on lane 7: False + Tx bias high warning flag on lane 8: False + Tx bias low warning flag on lane 1: False + Tx bias low warning flag on lane 2: False + Tx bias low warning flag on lane 3: False + Tx bias low warning flag on lane 4: False + Tx bias low warning flag on lane 5: False + Tx bias low warning flag on lane 6: False + Tx bias low warning flag on lane 7: False + Tx bias low warning flag on lane 8: False + Tx bias low alarm flag on lane 1: False + Tx bias low alarm flag on lane 2: False + Tx bias low alarm flag on lane 3: False + Tx bias low alarm flag on lane 4: False + Tx bias low alarm flag on lane 5: False + Tx bias low alarm flag on lane 6: False + Tx bias low alarm flag on lane 7: False + Tx bias low alarm flag on lane 8: False + Laser temperature high alarm flag: False + Laser temperature high warning flag: False + Laser temperature low warning flag: False + Laser temperature low alarm flag: False + Prefec ber high alarm flag: False + Prefec ber high warning flag: False + Prefec ber low warning flag: False + Prefec ber low alarm flag: False + Postfec ber high alarm flag: False + Postfec ber high warning flag: False + Postfec ber low warning flag: False + Postfec ber low alarm flag: False + Tuning in progress status: False + Laser unlocked status: False + Target output power out of range flag: False + Fine tuning out of range flag: False + Tuning not accepted flag: False + Invalid channel number flag: False + Tuning complete flag: False + Bias xi high alarm flag: False + Bias xi high warning flag: False + Bias xi low warning flag: False + Bias xi low alarm flag: False + Bias xq high alarm flag: False + Bias xq high warning flag: False + Bias xq low warning flag: False + Bias xq low alarm flag: False + Bias xp high alarm flag: False + Bias xp high warning flag: False + Bias xp low warning flag: False + Bias xp low alarm flag: False + Bias yi high alarm flag: False + Bias yi high warning flag: False + Bias yi low warning flag: False + Bias yi low alarm flag: False + Bias yq high alarm flag: False + Bias yq high warning flag: False + Bias yq low warning flag: False + Bias yq low alarm flag: False + Bias yp high alarm flag: False + Bias yp high warning flag: False + Bias yp low warning flag: False + Bias yp low alarm flag: False + CD short high alarm flag: False + CD short high warning flag: False + CD short low warning flag: False + CD short low alarm flag: False + CD long high alarm flag: False + CD long high warning flag: False + CD long low warning flag: False + CD long low alarm flag: False + DGD high alarm flag: False + DGD high warning flag: False + DGD low warning flag: False + DGD low alarm flag: False + PDL high alarm flag: False + PDL high warning flag: False + PDL low warning flag: False + PDL low alarm flag: False + OSNR high alarm flag: False + OSNR high warning flag: False + OSNR low warning flag: False + OSNR low alarm flag: False + ESNR high alarm flag: False + ESNR high warning flag: False + ESNR low warning flag: False + ESNR low alarm flag: False + CFO high alarm flag: False + CFO high warning flag: False + CFO low warning flag: False + CFO low alarm flag: False + Txcurrpower high alarm flag: False + Txcurrpower high warning flag: False + Txcurrpower low warning flag: False + Txcurrpower low alarm flag: False + Rxtotpower high alarm flag: False + Rxtotpower high warning flag: False + Rxtotpower low warning flag: False + Rxtotpower low alarm flag: False +""" + test_cmis_eeprom_output = """\ Ethernet64: SFP EEPROM detected Active Firmware: X.X @@ -452,6 +750,14 @@ Ethernet64: Transceiver performance monitoring not applicable """ +test_qsfp_dd_status_all_output = """\ +Ethernet0: Transceiver status info not applicable + +Ethernet4: Transceiver status info not applicable + +Ethernet64: Transceiver status info not applicable +""" + class TestSFP(object): @classmethod def setup_class(cls): @@ -569,6 +875,23 @@ def test_qsfp_dd_pm(self): expected = "Ethernet200: Transceiver performance monitoring not applicable" assert result_lines == expected + def test_qsfp_status(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["status"], ["Ethernet4"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_status_output + + def test_qsfp_dd_status(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["status"], ["Ethernet44"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_dd_status_output + + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["status"], ["Ethernet200"]) + result_lines = result.output.strip('\n') + expected = "Ethernet200: Transceiver status info not applicable" + assert result_lines == expected + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -636,6 +959,14 @@ def test_qsfp_dd_pm_with_ns(self): expected = "Ethernet0: Transceiver performance monitoring not applicable" assert result_lines == expected + @patch.object(show_module.interfaces.click.Choice, 'convert', MagicMock(return_value='asic0')) + def test_qsfp_dd_status_with_ns(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["status"], ['Ethernet0', '-n', 'asic0']) + result_lines = result.output.strip('\n') + expected = "Ethernet0: Transceiver status info not applicable" + assert result_lines == expected + @patch.object(show_module.interfaces.click.Choice, 'convert', MagicMock(return_value='asic1')) def test_cmis_sfp_info_with_ns(self): runner = CliRunner() @@ -674,6 +1005,12 @@ def test_qsfp_dd_pm_all(self): assert result.exit_code == 0 assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_dd_pm_all_output + def test_qsfp_dd_status_all(self): + runner = CliRunner() + result = runner.invoke(show.cli.commands["interfaces"].commands["transceiver"].commands["status"]) + assert result.exit_code == 0 + assert "\n".join([ l.rstrip() for l in result.output.split('\n')]) == test_qsfp_dd_status_all_output + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index bbdd124516..c57a5dc613 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -288,7 +288,8 @@ def test_error_status_from_db(self): ['Ethernet16', 'Unplugged'], ['Ethernet28', 'Unplugged'], ['Ethernet36', 'Unknown'], - ['Ethernet40', 'Unplugged']] + ['Ethernet40', 'Unplugged'], + ['Ethernet44', 'OK']] output = sfputil.fetch_error_status_from_state_db(None, db.db) assert output == expected_output @@ -306,7 +307,8 @@ def test_error_status_from_db_RJ45(self): ['Ethernet16', 'N/A'], ['Ethernet28', 'N/A'], ['Ethernet36', 'N/A'], - ['Ethernet40', 'N/A']] + ['Ethernet40', 'N/A'], + ['Ethernet44', 'N/A']] output = sfputil.fetch_error_status_from_state_db(None, db.db) assert output == expected_output @@ -393,6 +395,7 @@ def test_show_error_status(self): Ethernet28 Unplugged Ethernet36 Unknown Ethernet40 Unplugged +Ethernet44 OK """ assert result.output == expected_output diff --git a/utilities_common/sfp_helper.py b/utilities_common/sfp_helper.py index a5bf7839a9..09a96ca2ea 100644 --- a/utilities_common/sfp_helper.py +++ b/utilities_common/sfp_helper.py @@ -43,6 +43,305 @@ CMIS_DATA_MAP = {**QSFP_DATA_MAP, **QSFP_CMIS_DELTA_DATA_MAP} +# Common fileds for all types: +# For non-CMIS, only first 1 or 4 lanes are applicable. +# For CMIS, all 8 lanes are applicable. +QSFP_STATUS_MAP = { + 'txfault1': 'Tx fault flag on media lane 1', + 'txfault2': 'Tx fault flag on media lane 2', + 'txfault3': 'Tx fault flag on media lane 3', + 'txfault4': 'Tx fault flag on media lane 4', + 'txfault5': 'Tx fault flag on media lane 5', + 'txfault6': 'Tx fault flag on media lane 6', + 'txfault7': 'Tx fault flag on media lane 7', + 'txfault8': 'Tx fault flag on media lane 8', + 'rxlos1': 'Rx loss of signal flag on media lane 1', + 'rxlos2': 'Rx loss of signal flag on media lane 2', + 'rxlos3': 'Rx loss of signal flag on media lane 3', + 'rxlos4': 'Rx loss of signal flag on media lane 4', + 'rxlos5': 'Rx loss of signal flag on media lane 5', + 'rxlos6': 'Rx loss of signal flag on media lane 6', + 'rxlos7': 'Rx loss of signal flag on media lane 7', + 'rxlos8': 'Rx loss of signal flag on media lane 8', + 'tx1disable': 'TX disable status on lane 1', + 'tx2disable': 'TX disable status on lane 2', + 'tx3disable': 'TX disable status on lane 3', + 'tx4disable': 'TX disable status on lane 4', + 'tx5disable': 'TX disable status on lane 5', + 'tx6disable': 'TX disable status on lane 6', + 'tx7disable': 'TX disable status on lane 7', + 'tx8disable': 'TX disable status on lane 8', + 'tx_disabled_channel': 'Disabled TX channels' +} + +# CMIS specific fields (excluding C-CMIS specific): +CMIS_STATUS_MAP = { + 'module_state': 'Current module state', + 'module_fault_cause': 'Reason of entering the module fault state', + 'datapath_firmware_fault': 'Datapath firmware fault', + 'module_firmware_fault': 'Module firmware fault', + 'module_state_changed': 'Module state changed', + 'DP1State': 'Data path state indicator on host lane 1', + 'DP2State': 'Data path state indicator on host lane 2', + 'DP3State': 'Data path state indicator on host lane 3', + 'DP4State': 'Data path state indicator on host lane 4', + 'DP5State': 'Data path state indicator on host lane 5', + 'DP6State': 'Data path state indicator on host lane 6', + 'DP7State': 'Data path state indicator on host lane 7', + 'DP8State': 'Data path state indicator on host lane 8', + 'txoutput_status1': 'Tx output status on media lane 1', + 'txoutput_status2': 'Tx output status on media lane 2', + 'txoutput_status3': 'Tx output status on media lane 3', + 'txoutput_status4': 'Tx output status on media lane 4', + 'txoutput_status5': 'Tx output status on media lane 5', + 'txoutput_status6': 'Tx output status on media lane 6', + 'txoutput_status7': 'Tx output status on media lane 7', + 'txoutput_status8': 'Tx output status on media lane 8', + 'rxoutput_status_hostlane1': 'Rx output status on host lane 1', + 'rxoutput_status_hostlane2': 'Rx output status on host lane 2', + 'rxoutput_status_hostlane3': 'Rx output status on host lane 3', + 'rxoutput_status_hostlane4': 'Rx output status on host lane 4', + 'rxoutput_status_hostlane5': 'Rx output status on host lane 5', + 'rxoutput_status_hostlane6': 'Rx output status on host lane 6', + 'rxoutput_status_hostlane7': 'Rx output status on host lane 7', + 'rxoutput_status_hostlane8': 'Rx output status on host lane 8', + 'txlos_hostlane1': 'Tx loss of signal flag on host lane 1', + 'txlos_hostlane2': 'Tx loss of signal flag on host lane 2', + 'txlos_hostlane3': 'Tx loss of signal flag on host lane 3', + 'txlos_hostlane4': 'Tx loss of signal flag on host lane 4', + 'txlos_hostlane5': 'Tx loss of signal flag on host lane 5', + 'txlos_hostlane6': 'Tx loss of signal flag on host lane 6', + 'txlos_hostlane7': 'Tx loss of signal flag on host lane 7', + 'txlos_hostlane8': 'Tx loss of signal flag on host lane 8', + 'txcdrlol_hostlane1': 'Tx clock and data recovery loss of lock on host lane 1', + 'txcdrlol_hostlane2': 'Tx clock and data recovery loss of lock on host lane 2', + 'txcdrlol_hostlane3': 'Tx clock and data recovery loss of lock on host lane 3', + 'txcdrlol_hostlane4': 'Tx clock and data recovery loss of lock on host lane 4', + 'txcdrlol_hostlane5': 'Tx clock and data recovery loss of lock on host lane 5', + 'txcdrlol_hostlane6': 'Tx clock and data recovery loss of lock on host lane 6', + 'txcdrlol_hostlane7': 'Tx clock and data recovery loss of lock on host lane 7', + 'txcdrlol_hostlane8': 'Tx clock and data recovery loss of lock on host lane 8', + 'rxcdrlol1': 'Rx clock and data recovery loss of lock on media lane 1', + 'rxcdrlol2': 'Rx clock and data recovery loss of lock on media lane 2', + 'rxcdrlol3': 'Rx clock and data recovery loss of lock on media lane 3', + 'rxcdrlol4': 'Rx clock and data recovery loss of lock on media lane 4', + 'rxcdrlol5': 'Rx clock and data recovery loss of lock on media lane 5', + 'rxcdrlol6': 'Rx clock and data recovery loss of lock on media lane 6', + 'rxcdrlol7': 'Rx clock and data recovery loss of lock on media lane 7', + 'rxcdrlol8': 'Rx clock and data recovery loss of lock on media lane 8', + 'config_state_hostlane1': 'Configuration status for the data path of host line 1', + 'config_state_hostlane2': 'Configuration status for the data path of host line 2', + 'config_state_hostlane3': 'Configuration status for the data path of host line 3', + 'config_state_hostlane4': 'Configuration status for the data path of host line 4', + 'config_state_hostlane5': 'Configuration status for the data path of host line 5', + 'config_state_hostlane6': 'Configuration status for the data path of host line 6', + 'config_state_hostlane7': 'Configuration status for the data path of host line 7', + 'config_state_hostlane8': 'Configuration status for the data path of host line 8', + 'dpinit_pending_hostlane1': 'Data path configuration updated on host lane 1', + 'dpinit_pending_hostlane2': 'Data path configuration updated on host lane 2', + 'dpinit_pending_hostlane3': 'Data path configuration updated on host lane 3', + 'dpinit_pending_hostlane4': 'Data path configuration updated on host lane 4', + 'dpinit_pending_hostlane5': 'Data path configuration updated on host lane 5', + 'dpinit_pending_hostlane6': 'Data path configuration updated on host lane 6', + 'dpinit_pending_hostlane7': 'Data path configuration updated on host lane 7', + 'dpinit_pending_hostlane8': 'Data path configuration updated on host lane 8', + 'temphighalarm_flag': 'Temperature high alarm flag', + 'temphighwarning_flag': 'Temperature high warning flag', + 'templowwarning_flag': 'Temperature low warning flag', + 'templowalarm_flag': 'Temperature low alarm flag', + 'vcchighalarm_flag': 'Vcc high alarm flag', + 'vcchighwarning_flag': 'Vcc high warning flag', + 'vcclowwarning_flag': 'Vcc low warning flag', + 'vcclowalarm_flag': 'Vcc low alarm flag', + 'txpowerhighalarm_flag1': 'Tx power high alarm flag on lane 1', + 'txpowerhighalarm_flag2': 'Tx power high alarm flag on lane 2', + 'txpowerhighalarm_flag3': 'Tx power high alarm flag on lane 3', + 'txpowerhighalarm_flag4': 'Tx power high alarm flag on lane 4', + 'txpowerhighalarm_flag5': 'Tx power high alarm flag on lane 5', + 'txpowerhighalarm_flag6': 'Tx power high alarm flag on lane 6', + 'txpowerhighalarm_flag7': 'Tx power high alarm flag on lane 7', + 'txpowerhighalarm_flag8': 'Tx power high alarm flag on lane 8', + 'txpowerhighwarning_flag1': 'Tx power high warning flag on lane 1', + 'txpowerhighwarning_flag2': 'Tx power high warning flag on lane 2', + 'txpowerhighwarning_flag3': 'Tx power high warning flag on lane 3', + 'txpowerhighwarning_flag4': 'Tx power high warning flag on lane 4', + 'txpowerhighwarning_flag5': 'Tx power high warning flag on lane 5', + 'txpowerhighwarning_flag6': 'Tx power high warning flag on lane 6', + 'txpowerhighwarning_flag7': 'Tx power high warning flag on lane 7', + 'txpowerhighwarning_flag8': 'Tx power high warning flag on lane 8', + 'txpowerlowwarning_flag1': 'Tx power low warning flag on lane 1', + 'txpowerlowwarning_flag2': 'Tx power low warning flag on lane 2', + 'txpowerlowwarning_flag3': 'Tx power low warning flag on lane 3', + 'txpowerlowwarning_flag4': 'Tx power low warning flag on lane 4', + 'txpowerlowwarning_flag5': 'Tx power low warning flag on lane 5', + 'txpowerlowwarning_flag6': 'Tx power low warning flag on lane 6', + 'txpowerlowwarning_flag7': 'Tx power low warning flag on lane 7', + 'txpowerlowwarning_flag8': 'Tx power low warning flag on lane 8', + 'txpowerlowalarm_flag1': 'Tx power low alarm flag on lane 1', + 'txpowerlowalarm_flag2': 'Tx power low alarm flag on lane 2', + 'txpowerlowalarm_flag3': 'Tx power low alarm flag on lane 3', + 'txpowerlowalarm_flag4': 'Tx power low alarm flag on lane 4', + 'txpowerlowalarm_flag5': 'Tx power low alarm flag on lane 5', + 'txpowerlowalarm_flag6': 'Tx power low alarm flag on lane 6', + 'txpowerlowalarm_flag7': 'Tx power low alarm flag on lane 7', + 'txpowerlowalarm_flag8': 'Tx power low alarm flag on lane 8', + 'rxpowerhighalarm_flag1': 'Rx power high alarm flag on lane 1', + 'rxpowerhighalarm_flag2': 'Rx power high alarm flag on lane 2', + 'rxpowerhighalarm_flag3': 'Rx power high alarm flag on lane 3', + 'rxpowerhighalarm_flag4': 'Rx power high alarm flag on lane 4', + 'rxpowerhighalarm_flag5': 'Rx power high alarm flag on lane 5', + 'rxpowerhighalarm_flag6': 'Rx power high alarm flag on lane 6', + 'rxpowerhighalarm_flag7': 'Rx power high alarm flag on lane 7', + 'rxpowerhighalarm_flag8': 'Rx power high alarm flag on lane 8', + 'rxpowerhighwarning_flag1': 'Rx power high warning flag on lane 1', + 'rxpowerhighwarning_flag2': 'Rx power high warning flag on lane 2', + 'rxpowerhighwarning_flag3': 'Rx power high warning flag on lane 3', + 'rxpowerhighwarning_flag4': 'Rx power high warning flag on lane 4', + 'rxpowerhighwarning_flag5': 'Rx power high warning flag on lane 5', + 'rxpowerhighwarning_flag6': 'Rx power high warning flag on lane 6', + 'rxpowerhighwarning_flag7': 'Rx power high warning flag on lane 7', + 'rxpowerhighwarning_flag8': 'Rx power high warning flag on lane 8', + 'rxpowerlowwarning_flag1': 'Rx power low warning flag on lane 1', + 'rxpowerlowwarning_flag2': 'Rx power low warning flag on lane 2', + 'rxpowerlowwarning_flag3': 'Rx power low warning flag on lane 3', + 'rxpowerlowwarning_flag4': 'Rx power low warning flag on lane 4', + 'rxpowerlowwarning_flag5': 'Rx power low warning flag on lane 5', + 'rxpowerlowwarning_flag6': 'Rx power low warning flag on lane 6', + 'rxpowerlowwarning_flag7': 'Rx power low warning flag on lane 7', + 'rxpowerlowwarning_flag8': 'Rx power low warning flag on lane 8', + 'rxpowerlowalarm_flag1': 'Rx power low alarm flag on lane 1', + 'rxpowerlowalarm_flag2': 'Rx power low alarm flag on lane 2', + 'rxpowerlowalarm_flag3': 'Rx power low alarm flag on lane 3', + 'rxpowerlowalarm_flag4': 'Rx power low alarm flag on lane 4', + 'rxpowerlowalarm_flag5': 'Rx power low alarm flag on lane 5', + 'rxpowerlowalarm_flag6': 'Rx power low alarm flag on lane 6', + 'rxpowerlowalarm_flag7': 'Rx power low alarm flag on lane 7', + 'rxpowerlowalarm_flag8': 'Rx power low alarm flag on lane 8', + 'txbiashighalarm_flag1': 'Tx bias high alarm flag on lane 1', + 'txbiashighalarm_flag2': 'Tx bias high alarm flag on lane 2', + 'txbiashighalarm_flag3': 'Tx bias high alarm flag on lane 3', + 'txbiashighalarm_flag4': 'Tx bias high alarm flag on lane 4', + 'txbiashighalarm_flag5': 'Tx bias high alarm flag on lane 5', + 'txbiashighalarm_flag6': 'Tx bias high alarm flag on lane 6', + 'txbiashighalarm_flag7': 'Tx bias high alarm flag on lane 7', + 'txbiashighalarm_flag8': 'Tx bias high alarm flag on lane 8', + 'txbiashighwarning_flag1': 'Tx bias high warning flag on lane 1', + 'txbiashighwarning_flag2': 'Tx bias high warning flag on lane 2', + 'txbiashighwarning_flag3': 'Tx bias high warning flag on lane 3', + 'txbiashighwarning_flag4': 'Tx bias high warning flag on lane 4', + 'txbiashighwarning_flag5': 'Tx bias high warning flag on lane 5', + 'txbiashighwarning_flag6': 'Tx bias high warning flag on lane 6', + 'txbiashighwarning_flag7': 'Tx bias high warning flag on lane 7', + 'txbiashighwarning_flag8': 'Tx bias high warning flag on lane 8', + 'txbiaslowwarning_flag1': 'Tx bias low warning flag on lane 1', + 'txbiaslowwarning_flag2': 'Tx bias low warning flag on lane 2', + 'txbiaslowwarning_flag3': 'Tx bias low warning flag on lane 3', + 'txbiaslowwarning_flag4': 'Tx bias low warning flag on lane 4', + 'txbiaslowwarning_flag5': 'Tx bias low warning flag on lane 5', + 'txbiaslowwarning_flag6': 'Tx bias low warning flag on lane 6', + 'txbiaslowwarning_flag7': 'Tx bias low warning flag on lane 7', + 'txbiaslowwarning_flag8': 'Tx bias low warning flag on lane 8', + 'txbiaslowalarm_flag1': 'Tx bias low alarm flag on lane 1', + 'txbiaslowalarm_flag2': 'Tx bias low alarm flag on lane 2', + 'txbiaslowalarm_flag3': 'Tx bias low alarm flag on lane 3', + 'txbiaslowalarm_flag4': 'Tx bias low alarm flag on lane 4', + 'txbiaslowalarm_flag5': 'Tx bias low alarm flag on lane 5', + 'txbiaslowalarm_flag6': 'Tx bias low alarm flag on lane 6', + 'txbiaslowalarm_flag7': 'Tx bias low alarm flag on lane 7', + 'txbiaslowalarm_flag8': 'Tx bias low alarm flag on lane 8', + 'lasertemphighalarm_flag': 'Laser temperature high alarm flag', + 'lasertemphighwarning_flag': 'Laser temperature high warning flag', + 'lasertemplowwarning_flag': 'Laser temperature low warning flag', + 'lasertemplowalarm_flag': 'Laser temperature low alarm flag', + 'prefecberhighalarm_flag': 'Prefec ber high alarm flag', + 'prefecberhighwarning_flag': 'Prefec ber high warning flag', + 'prefecberlowwarning_flag': 'Prefec ber low warning flag', + 'prefecberlowalarm_flag': 'Prefec ber low alarm flag', + 'postfecberhighalarm_flag': 'Postfec ber high alarm flag', + 'postfecberhighwarning_flag': 'Postfec ber high warning flag', + 'postfecberlowwarning_flag': 'Postfec ber low warning flag', + 'postfecberlowalarm_flag': 'Postfec ber low alarm flag' +} + +# C-CMIS specific fields: +CCMIS_STATUS_MAP = { + 'tuning_in_progress': 'Tuning in progress status', + 'wavelength_unlock_status': 'Laser unlocked status', + 'target_output_power_oor': 'Target output power out of range flag', + 'fine_tuning_oor': 'Fine tuning out of range flag', + 'tuning_not_accepted': 'Tuning not accepted flag', + 'invalid_channel_num': 'Invalid channel number flag', + 'tuning_complete': 'Tuning complete flag', + 'biasxihighalarm_flag': 'Bias xi high alarm flag', + 'biasxihighwarning_flag': 'Bias xi high warning flag', + 'biasxilowwarning_flag': 'Bias xi low warning flag', + 'biasxilowalarm_flag': 'Bias xi low alarm flag', + 'biasxqhighalarm_flag': 'Bias xq high alarm flag', + 'biasxqhighwarning_flag': 'Bias xq high warning flag', + 'biasxqlowwarning_flag': 'Bias xq low warning flag', + 'biasxqlowalarm_flag': 'Bias xq low alarm flag', + 'biasxphighalarm_flag': 'Bias xp high alarm flag', + 'biasxphighwarning_flag': 'Bias xp high warning flag', + 'biasxplowwarning_flag': 'Bias xp low warning flag', + 'biasxplowalarm_flag': 'Bias xp low alarm flag', + 'biasyihighalarm_flag': 'Bias yi high alarm flag', + 'biasyihighwarning_flag': 'Bias yi high warning flag', + 'biasyilowwarning_flag': 'Bias yi low warning flag', + 'biasyilowalarm_flag': 'Bias yi low alarm flag', + 'biasyqhighalarm_flag': 'Bias yq high alarm flag', + 'biasyqhighwarning_flag': 'Bias yq high warning flag', + 'biasyqlowwarning_flag': 'Bias yq low warning flag', + 'biasyqlowalarm_flag': 'Bias yq low alarm flag', + 'biasyphighalarm_flag': 'Bias yp high alarm flag', + 'biasyphighwarning_flag': 'Bias yp high warning flag', + 'biasyplowwarning_flag': 'Bias yp low warning flag', + 'biasyplowalarm_flag': 'Bias yp low alarm flag', + 'cdshorthighalarm_flag': 'CD short high alarm flag', + 'cdshorthighwarning_flag': 'CD short high warning flag', + 'cdshortlowwarning_flag': 'CD short low warning flag', + 'cdshortlowalarm_flag': 'CD short low alarm flag', + 'cdlonghighalarm_flag': 'CD long high alarm flag', + 'cdlonghighwarning_flag': 'CD long high warning flag', + 'cdlonglowwarning_flag': 'CD long low warning flag', + 'cdlonglowalarm_flag': 'CD long low alarm flag', + 'dgdhighalarm_flag': 'DGD high alarm flag', + 'dgdhighwarning_flag': 'DGD high warning flag', + 'dgdlowwarning_flag': 'DGD low warning flag', + 'dgdlowalarm_flag': 'DGD low alarm flag', + 'sopmdhighalarm_flag': 'SOPMD high alarm flag', + 'sopmdhighwarning_flag': 'SOPMD high warning flag', + 'sopmdlowwarning_flag': 'SOPMD low warning flag', + 'sopmdlowalarm_flag': 'SOPMD low alarm flag', + 'pdlhighalarm_flag': 'PDL high alarm flag', + 'pdlhighwarning_flag': 'PDL high warning flag', + 'pdllowwarning_flag': 'PDL low warning flag', + 'pdllowalarm_flag': 'PDL low alarm flag', + 'osnrhighalarm_flag': 'OSNR high alarm flag', + 'osnrhighwarning_flag': 'OSNR high warning flag', + 'osnrlowwarning_flag': 'OSNR low warning flag', + 'osnrlowalarm_flag': 'OSNR low alarm flag', + 'esnrhighalarm_flag': 'ESNR high alarm flag', + 'esnrhighwarning_flag': 'ESNR high warning flag', + 'esnrlowwarning_flag': 'ESNR low warning flag', + 'esnrlowalarm_flag': 'ESNR low alarm flag', + 'cfohighalarm_flag': 'CFO high alarm flag', + 'cfohighwarning_flag': 'CFO high warning flag', + 'cfolowwarning_flag': 'CFO low warning flag', + 'cfolowalarm_flag': 'CFO low alarm flag', + 'txcurrpowerhighalarm_flag': 'Txcurrpower high alarm flag', + 'txcurrpowerhighwarning_flag': 'Txcurrpower high warning flag', + 'txcurrpowerlowwarning_flag': 'Txcurrpower low warning flag', + 'txcurrpowerlowalarm_flag': 'Txcurrpower low alarm flag', + 'rxtotpowerhighalarm_flag': 'Rxtotpower high alarm flag', + 'rxtotpowerhighwarning_flag': 'Rxtotpower high warning flag', + 'rxtotpowerlowwarning_flag': 'Rxtotpower low warning flag', + 'rxtotpowerlowalarm_flag': 'Rxtotpower low alarm flag', + 'rxsigpowerhighalarm_flag': 'Rxsigpower high alarm flag', + 'rxsigpowerhighwarning_flag': 'Rxsigpower high warning flag', + 'rxsigpowerlowwarning_flag': 'Rxsigpower low warning flag', + 'rxsigpowerlowalarm_flag': 'Rxsigpower low alarm flag' +} + def covert_application_advertisement_to_output_string(indent, sfp_info_dict): key = 'application_advertisement' field_name = '{}{}: '.format(indent, QSFP_DATA_MAP[key]) From 359dfc0c6edbf03c32be4be4e2893407b8d38747 Mon Sep 17 00:00:00 2001 From: Yevhen Fastiuk Date: Fri, 2 Jun 2023 23:15:34 +0300 Subject: [PATCH 24/35] [Clock] Implement clock CLI (#2793) * Implement clock CLI * Add tests for clock CLI * Update Command-Reference.md Updated relevant new CLI commands for clock mgmt config clock timezone config clock date show clock timezones --------- Signed-off-by: Yevhen Fastiuk Co-authored-by: Meir Renford --- config/main.py | 63 ++++++++++++++++++++++++++++++++++++++++ doc/Command-Reference.md | 56 +++++++++++++++++++++++++++++++++++ show/main.py | 27 ++++++++++++++--- tests/config_test.py | 62 +++++++++++++++++++++++++++++++++++++++ tests/show_test.py | 9 ++++++ 5 files changed, 213 insertions(+), 4 deletions(-) diff --git a/config/main.py b/config/main.py index aa207455af..5f06f59e1e 100644 --- a/config/main.py +++ b/config/main.py @@ -1,6 +1,7 @@ #!/usr/sbin/env python import click +import datetime import ipaddress import json import jsonpatch @@ -7103,5 +7104,67 @@ def del_subinterface(ctx, subinterface_name): except JsonPatchConflict as e: ctx.fail("{} is invalid vlan subinterface. Error: {}".format(subinterface_name, e)) + +# +# 'clock' group ('config clock ...') +# +@config.group() +def clock(): + """Configuring system clock""" + pass + + +def get_tzs(ctx, args, incomplete): + ret = clicommon.run_command('timedatectl list-timezones', + display_cmd=False, ignore_error=False, + return_cmd=True) + if len(ret) == 0: + return [] + + lst = ret[0].split('\n') + return [k for k in lst if incomplete in k] + + +@clock.command() +@click.argument('timezone', metavar='', required=True, + autocompletion=get_tzs) +def timezone(timezone): + """Set system timezone""" + + if timezone not in get_tzs(None, None, ''): + click.echo(f'Timezone {timezone} does not conform format') + sys.exit(1) + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, 'localhost', + {'timezone': timezone}) + + +@clock.command() +@click.argument('date', metavar='', required=True) +@click.argument('time', metavar='', required=True) +def date(date, time): + """Set system date and time""" + valid = True + try: + datetime.datetime.strptime(date, '%Y-%m-%d') + except ValueError: + click.echo(f'Date {date} does not conform format YYYY-MM-DD') + valid = False + + try: + datetime.datetime.strptime(time, '%H:%M:%S') + except ValueError: + click.echo(f'Time {time} does not conform format HH:MM:SS') + valid = False + + if not valid: + sys.exit(1) + + date_time = f'{date} {time}' + clicommon.run_command(['timedatectl', 'set-time', date_time]) + + if __name__ == '__main__': config() diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 89ac722b9b..316e8cb86e 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -543,6 +543,62 @@ This command displays the current date and time configured on the system Mon Mar 25 20:25:16 UTC 2019 ``` +**config clock date** + +This command will set the date-time of the systetm, given strings with date-time format + +- Usage: + ``` + config clock date + ``` + +- Parameters: + - _date_: valid date in format YYYY-MM-DD + - _time_: valid time in format HH:MM:SS + +- Example: + ``` + admin@sonic:~$ config clock date 2023-04-10 13:54:36 + ``` + +**config clock timezone** + +This command will set the timezone of the systetm, given a string of a valid timezone. + +- Usage: + ``` + config clock timezone + ``` + +- Parameters: + - _timezone_: valid timezone to be configured + + +- Example: + ``` + admin@sonic:~$ config clock timezone Africa/Accra + + +**show clock timezones** + +This command Will display list of all valid timezones to be configured. + +- Usage: + ``` + show clock timezones + ``` + +- Example: + ``` + root@host:~$ show clock timezones + Africa/Abidjan + Africa/Accra + Africa/Addis_Ababa + Africa/Algiers + Africa/Asmara + ... + ``` + **show boot** This command displays the current OS image, the image to be loaded on next reboot, and lists all the available images installed on the device diff --git a/show/main.py b/show/main.py index d79777ebeb..21b284b92b 100755 --- a/show/main.py +++ b/show/main.py @@ -1771,13 +1771,32 @@ def uptime(verbose): cmd = ['uptime', '-p'] run_command(cmd, display_cmd=verbose) -@cli.command() + +# +# 'clock' command group ("show clock ...") +# +@cli.group('clock', invoke_without_command=True) +@click.pass_context @click.option('--verbose', is_flag=True, help="Enable verbose output") -def clock(verbose): +def clock(ctx, verbose): """Show date and time""" - cmd = ["date"] - run_command(cmd, display_cmd=verbose) + # If invoking subcomand, no need to do anything + if ctx.invoked_subcommand is not None: + return + run_command(['date'], display_cmd=verbose) + + +@clock.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def timezones(verbose): + """List of available timezones""" + run_command(['timedatectl', 'list-timezones'], display_cmd=verbose) + + +# +# 'system-memory' command ("show system-memory") +# @cli.command('system-memory') @click.option('--verbose', is_flag=True, help="Enable verbose output") def system_memory(verbose): diff --git a/tests/config_test.py b/tests/config_test.py index b5be1717cb..571800101a 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -2363,3 +2363,65 @@ def test_fec(self, mock_run_command): def teardown(self): print("TEARDOWN") + +class TestConfigClock(object): + timezone_test_val = ['Europe/Kyiv', 'Asia/Israel', 'UTC'] + + @classmethod + def setup_class(cls): + print('SETUP') + import config.main + importlib.reload(config.main) + + @patch('config.main.get_tzs', mock.Mock(return_value=timezone_test_val)) + def test_timezone_good(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['clock'].commands['timezone'], + ['UTC'], obj=obj) + + assert result.exit_code == 0 + + @patch('config.main.get_tzs', mock.Mock(return_value=timezone_test_val)) + def test_timezone_bad(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['clock'].commands['timezone'], + ['Atlantis'], obj=obj) + + assert result.exit_code != 0 + assert 'Timezone Atlantis does not conform format' in result.output + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_date_good(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['clock'].commands['date'], + ['2020-10-10', '10:20:30'], obj=obj) + + assert result.exit_code == 0 + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_date_bad(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['clock'].commands['date'], + ['20-10-10', '60:70:80'], obj=obj) + + assert result.exit_code != 0 + assert 'Date 20-10-10 does not conform format' in result.output + assert 'Time 60:70:80 does not conform format' in result.output + + @classmethod + def teardown_class(cls): + print('TEARDOWN') diff --git a/tests/show_test.py b/tests/show_test.py index b7f6a9baf8..21af60d8c0 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -926,6 +926,15 @@ def test_show_clock(self, mock_run_command): assert result.exit_code == 0 mock_run_command.assert_called_with(['date'], display_cmd=True) + @patch('show.main.run_command') + def test_show_timezone(self, mock_run_command): + runner = CliRunner() + result = runner.invoke( + show.cli.commands['clock'].commands['timezones'], ['--verbose']) + assert result.exit_code == 0 + mock_run_command.assert_called_once_with( + ['timedatectl', 'list-timezones'], display_cmd=True) + @patch('show.main.run_command') def test_show_system_memory(self, mock_run_command): runner = CliRunner() From 72ca48481645edc3437d7899e2fa754d16eff02e Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Fri, 2 Jun 2023 13:33:58 -0700 Subject: [PATCH 25/35] Add CLI configuration options for teamd retry count feature (#2642) * Add CLI configuration options for teamd retry count feature Add a SONiC CLI to more easily configure the retry count for port channels. This effectively acts like a wrapper around the underlying teamdctl command. Also add a python script that'll be installed into /usr/local/bin/teamd_increase_retry_count.py that will detect if the peer device likely supports this teamd feature (based on LLDP neighbor info) and increases the teamd retry count to 5 in preparation for warm upgrade. This script requires sudo to run. This is tied to sonic-net/sonic-buildimage#13453. Signed-off-by: Saikrishna Arcot * Add test for error case from teamd when it's not running Signed-off-by: Saikrishna Arcot * Fix up test cases Signed-off-by: Saikrishna Arcot * Add some error handling if teamdctl doesn't exist Signed-off-by: Saikrishna Arcot * Add probe functionality and sending current LACPDU packet functionality Signed-off-by: Saikrishna Arcot * Check to see if the retry count feature is enabled before doing a get or set Signed-off-by: Saikrishna Arcot * Add option to only send probe packets or only change retry count Signed-off-by: Saikrishna Arcot * Call the teamd retry count script if doing a warm-reboot Signed-off-by: Saikrishna Arcot * Fix pycheck errors, and disable scapy's IPv6 and verbose mode Scapy's IPv6 support appears to have caused some issues with older versions of scapy, which may be present on older SONiC images. Signed-off-by: Saikrishna Arcot * Make teamd retry count support optional Don't fail warm reboot if teamd retry count support doesn't happen to be present. Also use fastfast-reboot for Mellanox devices. Signed-off-by: Saikrishna Arcot * Address review comments, and restructure code to increase code coverage Signed-off-by: Saikrishna Arcot * Address some review comments Signed-off-by: Saikrishna Arcot * Replace tabs with spaces Signed-off-by: Saikrishna Arcot * Verify that expected keys are present in the data returned from teamdctl Also update a failure message in the warm-reboot script if the retry count script fails. Signed-off-by: Saikrishna Arcot * Fix TimeoutExpired undefined error Signed-off-by: Saikrishna Arcot * Add ability to mock subprocess calls (at a limited level) Signed-off-by: Saikrishna Arcot * Return an actual subprocess object, and add a test for checking timeout Signed-off-by: Saikrishna Arcot * Change variable syntax Signed-off-by: Saikrishna Arcot * Fix set being accessed with an index Signed-off-by: Saikrishna Arcot * Add option to warm-reboot script to control if teamd retry count is required or not Signed-off-by: Saikrishna Arcot * Move the teamd retry count check to before orchagent This is so that in the case of the teamd retry count check failing, there's fewer changes that happen on the system (it'll fail faster). Signed-off-by: Saikrishna Arcot * Move retry count script start to be prior to point-of-no-return This doesn't need to be after the point-of-no-return, since this will detach and be sending LACPDUs on its own. Signed-off-by: Saikrishna Arcot * Set executable bit Signed-off-by: Saikrishna Arcot * Address PR comments Signed-off-by: Saikrishna Arcot * Change to case-insensitive string contains check Signed-off-by: Saikrishna Arcot * Make sure the global abort variable is used Signed-off-by: Saikrishna Arcot --------- Signed-off-by: Saikrishna Arcot --- config/main.py | 87 +++++++ scripts/fast-reboot | 32 ++- scripts/teamd_increase_retry_count.py | 322 ++++++++++++++++++++++++++ setup.py | 1 + tests/portchannel_test.py | 175 ++++++++++++++ 5 files changed, 616 insertions(+), 1 deletion(-) create mode 100755 scripts/teamd_increase_retry_count.py diff --git a/config/main.py b/config/main.py index 5f06f59e1e..aab334e712 100644 --- a/config/main.py +++ b/config/main.py @@ -2281,6 +2281,93 @@ def del_portchannel_member(ctx, portchannel_name, port_name): except JsonPatchConflict: ctx.fail("Invalid or nonexistent portchannel or interface. Please ensure existence of portchannel member.") +@portchannel.group(cls=clicommon.AbbreviationGroup, name='retry-count') +@click.pass_context +def portchannel_retry_count(ctx): + pass + +def check_if_retry_count_is_enabled(ctx, portchannel_name): + try: + proc = subprocess.Popen(["teamdctl", portchannel_name, "state", "item", "get", "runner.enable_retry_count_feature"], text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, err = proc.communicate(timeout=10) + if proc.returncode != 0: + ctx.fail("Unable to determine if the retry count feature is enabled or not: {}".format(err.strip())) + return output.strip() == "true" + except subprocess.TimeoutExpired as e: + proc.kill() + proc.communicate() + ctx.fail("Unable to determine if the retry count feature is enabled or not: {}".format(e)) + +@portchannel_retry_count.command('get') +@click.argument('portchannel_name', metavar='', required=True) +@click.pass_context +def get_portchannel_retry_count(ctx, portchannel_name): + """Get the retry count for a port channel""" + db = ValidatedConfigDBConnector(ctx.obj['db']) + + # Don't proceed if the port channel name is not valid + if is_portchannel_name_valid(portchannel_name) is False: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" + .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + + # Don't proceed if the port channel does not exist + if is_portchannel_present_in_db(db, portchannel_name) is False: + ctx.fail("{} is not present.".format(portchannel_name)) + + try: + is_retry_count_enabled = check_if_retry_count_is_enabled(ctx, portchannel_name) + if not is_retry_count_enabled: + ctx.fail("Retry count feature is not enabled!") + + proc = subprocess.Popen(["teamdctl", portchannel_name, "state", "item", "get", "runner.retry_count"], text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, err = proc.communicate(timeout=10) + if proc.returncode != 0: + ctx.fail("Unable to get the retry count: {}".format(err.strip())) + click.echo(output.strip()) + except FileNotFoundError: + ctx.fail("Unable to get the retry count: teamdctl could not be run") + except subprocess.TimeoutExpired as e: + proc.kill() + proc.communicate() + ctx.fail("Unable to get the retry count: {}".format(e)) + except Exception as e: + ctx.fail("Unable to get the retry count: {}".format(e)) + +@portchannel_retry_count.command('set') +@click.argument('portchannel_name', metavar='', required=True) +@click.argument('retry_count', metavar='', required=True, type=click.IntRange(3,10)) +@click.pass_context +def set_portchannel_retry_count(ctx, portchannel_name, retry_count): + """Set the retry count for a port channel""" + db = ValidatedConfigDBConnector(ctx.obj['db']) + + # Don't proceed if the port channel name is not valid + if is_portchannel_name_valid(portchannel_name) is False: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" + .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + + # Don't proceed if the port channel does not exist + if is_portchannel_present_in_db(db, portchannel_name) is False: + ctx.fail("{} is not present.".format(portchannel_name)) + + try: + is_retry_count_enabled = check_if_retry_count_is_enabled(ctx, portchannel_name) + if not is_retry_count_enabled: + ctx.fail("Retry count feature is not enabled!") + + proc = subprocess.Popen(["teamdctl", portchannel_name, "state", "item", "set", "runner.retry_count", str(retry_count)], text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, err = proc.communicate(timeout=10) + if proc.returncode != 0: + ctx.fail("Unable to set the retry count: {}".format(err.strip())) + except FileNotFoundError: + ctx.fail("Unable to set the retry count: teamdctl could not be run") + except subprocess.TimeoutExpired as e: + proc.kill() + proc.communicate() + ctx.fail("Unable to set the retry count: {}".format(e)) + except Exception as e: + ctx.fail("Unable to set the retry count: {}".format(e)) + # # 'mirror_session' group ('config mirror_session ...') diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 99a631046f..917449b436 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -31,6 +31,7 @@ TAG_LATEST=yes DETACH=no LOG_PATH="/var/log/${REBOOT_TYPE}.txt" UIMAGE_HDR_SIZE=64 +REQUIRE_TEAMD_RETRY_COUNT=no # Require 100M available on the hard drive for warm reboot temp files, # Size is in 1K blocks: @@ -48,6 +49,7 @@ EXIT_DB_INTEGRITY_FAILURE=15 EXIT_NO_CONTROL_PLANE_ASSISTANT=20 EXIT_SONIC_INSTALLER_VERIFY_REBOOT=21 EXIT_PLATFORM_FW_AU_FAILURE=22 +EXIT_TEAMD_RETRY_COUNT_FAILURE=23 function error() { @@ -79,13 +81,15 @@ function showHelpAndExit() echo " -t : Don't tag the current kube images as latest" echo " -D : detached mode - closing terminal will not cause stopping reboot" echo " -u : include ssd-upgrader-part in boot options" + echo " -n : don't require peer devices to be running SONiC with retry count feature [default]" + echo " -N : require peer devices to be running SONiC with retry count feature" exit "${EXIT_SUCCESS}" } function parseOptions() { - while getopts "vfidh?rkxc:sDu" opt; do #TODO "t" is missing + while getopts "vfidh?rkxc:sDunN" opt; do #TODO "t" is missing case ${opt} in h|\? ) showHelpAndExit @@ -126,6 +130,12 @@ function parseOptions() u ) SSD_FW_UPDATE_BOOT_OPTION=yes ;; + n ) + REQUIRE_TEAMD_RETRY_COUNT=no + ;; + N ) + REQUIRE_TEAMD_RETRY_COUNT=yes + ;; esac done } @@ -636,6 +646,22 @@ init_warm_reboot_states setup_control_plane_assistant +TEAMD_INCREASE_RETRY_COUNT=0 +if [[ "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" ]]; then + TEAMD_RETRY_COUNT_PROBE_RC=0 + /usr/local/bin/teamd_increase_retry_count.py --probe-only || TEAMD_RETRY_COUNT_PROBE_RC=$? + if [[ ${TEAMD_RETRY_COUNT_PROBE_RC} -ne 0 ]]; then + if [[ "${REQUIRE_TEAMD_RETRY_COUNT}" = "yes" ]]; then + error "Could not confirm that all neighbor devices are running SONiC with the retry count feature" + exit "${EXIT_TEAMD_RETRY_COUNT_FAILURE}" + else + debug "Warning: Retry count feature support unknown for one or more neighbor devices; assuming that it's not available" + fi + else + TEAMD_INCREASE_RETRY_COUNT=1 + fi +fi + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then # Freeze orchagent for warm restart # Ask orchagent_restart_check to try freeze 5 times with interval of 2 seconds, @@ -664,6 +690,10 @@ if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then fi fi +if [[ ( "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" ) && "${TEAMD_INCREASE_RETRY_COUNT}" -eq 1 ]]; then + /usr/local/bin/teamd_increase_retry_count.py +fi + # We are fully committed to reboot from this point on because critical # service will go down and we cannot recover from it. set +e diff --git a/scripts/teamd_increase_retry_count.py b/scripts/teamd_increase_retry_count.py new file mode 100755 index 0000000000..34238b3fee --- /dev/null +++ b/scripts/teamd_increase_retry_count.py @@ -0,0 +1,322 @@ +#!/usr/bin/python3 + +import subprocess +import json +from scapy.config import conf +conf.ipv6_enabled = False +conf.verb = False +from scapy.fields import ByteField, ShortField, MACField, XStrFixedLenField, ConditionalField +from scapy.layers.l2 import Ether +from scapy.sendrecv import sendp, sniff +from scapy.packet import Packet, split_layers, bind_layers +import scapy.contrib.lacp +import os +import re +import sys +from threading import Thread, Event +import time +import argparse +import signal + +from sonic_py_common import logger +from swsscommon.swsscommon import DBConnector, Table + +log = logger.Logger() +revertTeamdRetryCountChanges = False +DEFAULT_RETRY_COUNT = 3 +EXTENDED_RETRY_COUNT = 5 +SLOW_PROTOCOL_MAC_ADDRESS = "01:80:c2:00:00:02" +LACP_ETHERTYPE = 0x8809 + +class LACPRetryCount(Packet): + name = "LACPRetryCount" + fields_desc = [ + ByteField("version", 0xf1), + ByteField("actor_type", 1), + ByteField("actor_length", 20), + ShortField("actor_system_priority", 0), + MACField("actor_system", None), + ShortField("actor_key", 0), + ShortField("actor_port_priority", 0), + ShortField("actor_port_number", 0), + ByteField("actor_state", 0), + XStrFixedLenField("actor_reserved", "", 3), + ByteField("partner_type", 2), + ByteField("partner_length", 20), + ShortField("partner_system_priority", 0), + MACField("partner_system", None), + ShortField("partner_key", 0), + ShortField("partner_port_priority", 0), + ShortField("partner_port_number", 0), + ByteField("partner_state", 0), + XStrFixedLenField("partner_reserved", "", 3), + ByteField("collector_type", 3), + ByteField("collector_length", 16), + ShortField("collector_max_delay", 0), + XStrFixedLenField("collector_reserved", "", 12), + ConditionalField(ByteField("actor_retry_count_type", 0x80), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("actor_retry_count_length", 4), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("actor_retry_count", 0), lambda pkt:pkt.version == 0xf1), + ConditionalField(XStrFixedLenField("actor_retry_count_reserved", "", 1), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("partner_retry_count_type", 0x81), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("partner_retry_count_length", 4), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("partner_retry_count", 0), lambda pkt:pkt.version == 0xf1), + ConditionalField(XStrFixedLenField("partner_retry_count_reserved", "", 1), lambda pkt:pkt.version == 0xf1), + ByteField("terminator_type", 0), + ByteField("terminator_length", 0), + ConditionalField(XStrFixedLenField("reserved", "", 42), lambda pkt:pkt.version == 0xf1), + ConditionalField(XStrFixedLenField("reserved", "", 50), lambda pkt:pkt.version != 0xf1), + ] + +split_layers(scapy.contrib.lacp.SlowProtocol, scapy.contrib.lacp.LACP, subtype=1) +bind_layers(scapy.contrib.lacp.SlowProtocol, LACPRetryCount, subtype=1) + +class LacpPacketListenThread(Thread): + def __init__(self, port, targetMacAddress, sendReadyEvent): + Thread.__init__(self) + self.port = port + self.targetMacAddress = targetMacAddress + self.sendReadyEvent = sendReadyEvent + self.detectedNewVersion = False + + def lacpPacketCallback(self, pkt): + if pkt["LACPRetryCount"].version == 0xf1: + self.detectedNewVersion = True + return self.detectedNewVersion + + def run(self): + sniff(stop_filter=self.lacpPacketCallback, iface=self.port, filter="ether proto {} and ether src {}".format(LACP_ETHERTYPE, self.targetMacAddress), + store=0, timeout=30, started_callback=self.sendReadyEvent.set) + +def getPortChannels(): + applDb = DBConnector("APPL_DB", 0) + configDb = DBConnector("CONFIG_DB", 0) + portChannelTable = Table(applDb, "LAG_TABLE") + portChannels = portChannelTable.getKeys() + activePortChannels = [] + for portChannel in portChannels: + state = portChannelTable.get(portChannel) + if not state or not state[0]: + continue + isAdminUp = False + isOperUp = False + for key, value in state[1]: + if key == "admin_status": + isAdminUp = value == "up" + elif key == "oper_status": + isOperUp = value == "up" + if isAdminUp and isOperUp: + activePortChannels.append(portChannel) + + # Now find out which BGP sessions on these port channels are admin up. This needs to go + # through a circuitious sequence of steps. + # + # 1. Get the local IPv4/IPv6 address assigned to each port channel. + # 2. Find out which BGP session (in CONFIG_DB) has a local_addr attribute of the local + # IPv4/IPv6 address. + # 3. Check the admin_status field of that table in CONFIG_DB. + portChannelData = {} + portChannelInterfaceTable = Table(configDb, "PORTCHANNEL_INTERFACE") + portChannelInterfaces = portChannelInterfaceTable.getKeys() + for portChannelInterface in portChannelInterfaces: + if "|" not in portChannelInterface: + continue + portChannel = portChannelInterface.split("|")[0] + ipAddress = portChannelInterface.split("|")[1].split("/")[0].lower() + if portChannel not in activePortChannels: + continue + portChannelData[ipAddress] = { + "portChannel": portChannel, + "adminUp": False + } + + bgpTable = Table(configDb, "BGP_NEIGHBOR") + bgpNeighbors = bgpTable.getKeys() + for bgpNeighbor in bgpNeighbors: + neighborData = bgpTable.get(bgpNeighbor) + if not neighborData[0]: + continue + localAddr = None + isAdminUp = False + for key, value in neighborData[1]: + if key == "local_addr": + if value not in portChannelData: + break + localAddr = value.lower() + elif key == "admin_status": + isAdminUp = value == "up" + if not localAddr: + continue + portChannelData[localAddr]["adminUp"] = isAdminUp + + return set([portChannelData[x]["portChannel"] for x in portChannelData.keys() if portChannelData[x]["adminUp"]]) + +def getPortChannelConfig(portChannelName): + (processStdout, _) = getCmdOutput(["teamdctl", portChannelName, "state", "dump"]) + return json.loads(processStdout) + +def getLldpNeighbors(): + (processStdout, _) = getCmdOutput(["lldpctl", "-f", "json"]) + return json.loads(processStdout) + +def craftLacpPacket(portChannelConfig, portName, isResetPacket=False, newVersion=True): + portConfig = portChannelConfig["ports"][portName] + actorConfig = portConfig["runner"]["actor_lacpdu_info"] + partnerConfig = portConfig["runner"]["partner_lacpdu_info"] + l2 = Ether(dst=SLOW_PROTOCOL_MAC_ADDRESS, src=portConfig["ifinfo"]["dev_addr"], type=LACP_ETHERTYPE) + l3 = scapy.contrib.lacp.SlowProtocol(subtype=0x01) + l4 = LACPRetryCount() + if newVersion: + l4.version = 0xf1 + else: + l4.version = 0x1 + l4.actor_system_priority = actorConfig["system_priority"] + l4.actor_system = actorConfig["system"] + l4.actor_key = actorConfig["key"] + l4.actor_port_priority = actorConfig["port_priority"] + l4.actor_port_number = actorConfig["port"] + l4.actor_state = actorConfig["state"] + l4.partner_system_priority = partnerConfig["system_priority"] + l4.partner_system = partnerConfig["system"] + l4.partner_key = partnerConfig["key"] + l4.partner_port_priority = partnerConfig["port_priority"] + l4.partner_port_number = partnerConfig["port"] + l4.partner_state = partnerConfig["state"] + if newVersion: + l4.actor_retry_count = EXTENDED_RETRY_COUNT if not isResetPacket else DEFAULT_RETRY_COUNT + l4.partner_retry_count = DEFAULT_RETRY_COUNT + packet = l2 / l3 / l4 + return packet + +def sendLacpPackets(packets, revertPackets): + global revertTeamdRetryCountChanges + while not revertTeamdRetryCountChanges: + for port, packet in packets: + sendp(packet, iface=port) + time.sleep(15) + if revertTeamdRetryCountChanges: + for port, packet in revertPackets: + sendp(packet, iface=port) + +def abortTeamdChanges(signum, frame): + global revertTeamdRetryCountChanges + log.log_info("Got signal {}, reverting teamd retry count change".format(signum)) + revertTeamdRetryCountChanges = True + +def getCmdOutput(cmd): + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + return proc.communicate()[0], proc.returncode + +def main(probeOnly=False): + if os.geteuid() != 0: + log.log_error("Root privileges required for this operation", also_print_to_console=True) + sys.exit(1) + portChannels = getPortChannels() + if not portChannels: + log.log_info("No port channels retrieved; exiting") + return + failedPortChannels = [] + if probeOnly: + for portChannel in portChannels: + config = getPortChannelConfig(portChannel) + lldpInfo = getLldpNeighbors() + portChannelChecked = False + for portName in config["ports"].keys(): + if not "runner" in config["ports"][portName] or \ + not "partner_lacpdu_info" in config["ports"][portName]["runner"] or \ + not "actor_lacpdu_info" in config["ports"][portName]["runner"]: + log.log_error("ERROR: Missing information from teamd about {}; skipping".format(portName)) + failedPortChannels.append(portChannel) + break + + interfaceLldpInfo = [k for k in lldpInfo["lldp"]["interface"] if portName in k] + if not interfaceLldpInfo: + log.log_warning("WARNING: No LLDP info available for {}; skipping".format(portName)) + continue + interfaceLldpInfo = interfaceLldpInfo[0][portName] + peerName = list(interfaceLldpInfo["chassis"].keys())[0] + peerInfo = interfaceLldpInfo["chassis"][peerName] + if "descr" not in peerInfo: + log.log_warning("WARNING: No peer description available via LLDP for {}; skipping".format(portName)) + continue + portChannelChecked = True + if "sonic" not in peerInfo["descr"].lower(): + log.log_warning("WARNING: Peer device is not a SONiC device; skipping") + failedPortChannels.append(portChannel) + break + + sendReadyEvent = Event() + + # Start sniffing thread + lacpThread = LacpPacketListenThread(portName, config["ports"][portName]["runner"]["partner_lacpdu_info"]["system"], sendReadyEvent) + lacpThread.start() + + # Generate and send probe packet after sniffing has started + probePacket = craftLacpPacket(config, portName) + sendReadyEvent.wait() + sendp(probePacket, iface=portName) + + lacpThread.join() + + resetProbePacket = craftLacpPacket(config, portName, newVersion=False) + # 2-second sleep for making sure all processing is done on the peer device + time.sleep(2) + sendp(resetProbePacket, iface=portName, count=2, inter=0.5) + + if lacpThread.detectedNewVersion: + log.log_notice("SUCCESS: Peer device {} is running version of SONiC with teamd retry count feature".format(peerName), also_print_to_console=True) + break + else: + log.log_warning("WARNING: Peer device {} is running version of SONiC without teamd retry count feature".format(peerName), also_print_to_console=True) + failedPortChannels.append(portChannel) + break + if not portChannelChecked: + log.log_warning("WARNING: No information available about peer device on port channel {}".format(portChannel), also_print_to_console=True) + failedPortChannels.append(portChannel) + if failedPortChannels: + log.log_error("ERROR: There are port channels/peer devices that failed the probe: {}".format(failedPortChannels), also_print_to_console=True) + sys.exit(2) + else: + global revertTeamdRetryCountChanges + signal.signal(signal.SIGUSR1, abortTeamdChanges) + signal.signal(signal.SIGTERM, abortTeamdChanges) + (_, rc) = getCmdOutput(["config", "portchannel", "retry-count", "get", list(portChannels)[0]]) + if rc == 0: + # Currently running on SONiC version with teamd retry count feature + for portChannel in portChannels: + getCmdOutput(["config", "portchannel", "retry-count", "set", portChannel, str(EXTENDED_RETRY_COUNT)]) + pid = os.fork() + if pid == 0: + # Running in a new process, detached from parent process + while not revertTeamdRetryCountChanges: + time.sleep(15) + if revertTeamdRetryCountChanges: + for portChannel in portChannels: + getCmdOutput(["config", "portchannel", "retry-count", "set", portChannel, str(DEFAULT_RETRY_COUNT)]) + else: + lacpPackets = [] + revertLacpPackets = [] + for portChannel in portChannels: + config = getPortChannelConfig(portChannel) + for portName in config["ports"].keys(): + if not "runner" in config["ports"][portName] or \ + not "partner_lacpdu_info" in config["ports"][portName]["runner"] or \ + not "actor_lacpdu_info" in config["ports"][portName]["runner"]: + log.log_error("ERROR: Missing information from teamd about {}; skipping".format(portName)) + break + + packet = craftLacpPacket(config, portName) + lacpPackets.append((portName, packet)) + packet = craftLacpPacket(config, portName, isResetPacket=True) + revertLacpPackets.append((portName, packet)) + pid = os.fork() + if pid == 0: + # Running in a new process, detached from parent process + sendLacpPackets(lacpPackets, revertLacpPackets) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Teamd retry count changer.') + parser.add_argument('--probe-only', action='store_true', + help='Probe the peer devices only, to verify that they support the teamd retry count feature') + args = parser.parse_args() + main(args.probe_only) diff --git a/setup.py b/setup.py index ea0e949ab9..bc69337b8c 100644 --- a/setup.py +++ b/setup.py @@ -165,6 +165,7 @@ 'scripts/soft-reboot', 'scripts/storyteller', 'scripts/syseeprom-to-json', + 'scripts/teamd_increase_retry_count.py', 'scripts/tempershow', 'scripts/tunnelstat', 'scripts/update_json.py', diff --git a/tests/portchannel_test.py b/tests/portchannel_test.py index 4d6eb33ed0..9b8bf56863 100644 --- a/tests/portchannel_test.py +++ b/tests/portchannel_test.py @@ -1,5 +1,6 @@ import os import pytest +import subprocess import traceback import mock @@ -13,6 +14,7 @@ from mock import patch class TestPortChannel(object): + @classmethod def setup_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "1" @@ -268,6 +270,179 @@ def test_delete_portchannel_which_is_member_of_a_vlan(self): assert result.exit_code != 0 assert "PortChannel1001 has vlan Vlan4000 configured, remove vlan membership to proceed" in result.output + def test_get_invalid_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # get the retry count of a portchannel with an invalid portchannel name + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["Ethernet48"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: Ethernet48 is invalid!" in result.output + + def test_set_invalid_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # set the retry count of a portchannel with an invalid portchannel name + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["Ethernet48", "5"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: Ethernet48 is invalid!" in result.output + + def test_get_non_existing_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # get the retry count of a portchannel with portchannel not yet created + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel0005"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: PortChannel0005 is not present." in result.output + + def test_set_non_existing_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # set the retry count of a portchannel with portchannel not yet created + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel0005", "5"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: PortChannel0005 is not present." in result.output + + originalSubprocessPopen = subprocess.Popen + + class SubprocessMock: + def __init__(self, *args, **kwargs): + self.retryCountEnabled = True + self.timeout = False + + def __call__(self, *args, **kwargs): + stdoutResult = "" + stderrResult = "" + rc = 0 + + commandArgs = args[0] + if commandArgs[0] != "teamdctl": + return TestPortChannel.originalSubprocessPopen(*args, **kwargs) + if self.timeout: + return TestPortChannel.originalSubprocessPopen(["sleep", "90"], **kwargs) + if commandArgs[5] == "runner.enable_retry_count_feature": + return TestPortChannel.originalSubprocessPopen(["echo", "true" if self.retryCountEnabled else "false"], **kwargs) + elif commandArgs[5] == "runner.retry_count": + if commandArgs[4] == "get": + return TestPortChannel.originalSubprocessPopen(["echo", "3"], **kwargs) + elif commandArgs[4] == "set": + return TestPortChannel.originalSubprocessPopen(["echo", ""], **kwargs) + else: + return TestPortChannel.originalSubprocessPopen(["false"], **kwargs) + else: + return TestPortChannel.originalSubprocessPopen(["false"], **kwargs) + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_get_portchannel_retry_count_disabled(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = False + + # get the retry count of a portchannel, but when the retry count feature is disabled + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel1001"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Retry count feature is not enabled!" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_set_portchannel_retry_count_disabled(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = False + + # set the retry count of a portchannel, but when the retry count feature is disabled + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel1001", "5"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Retry count feature is not enabled!" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_get_portchannel_retry_count_timeout(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + subprocessMock.timeout = True + + # get the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel1001"], obj=obj) + # expect a timeout failure + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Unable to get the retry count" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_set_portchannel_retry_count_timeout(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + subprocessMock.timeout = True + + # set the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel1001", "5"], obj=obj) + # expect a timeout failure + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Unable to set the retry count" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_get_portchannel_retry_count(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + + # get the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel1001"], obj=obj) + # output has been mocked + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output.strip() == "3" + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_set_portchannel_retry_count(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + + # set the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel1001", "5"], obj=obj) + # output has been mocked + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == "" + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" From 5c9b21771a733944ff1dc8058250185fef62f2f7 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Mon, 5 Jun 2023 15:20:49 +0800 Subject: [PATCH 26/35] Fix issue: out of range sflow polling interval is accepted and stored in config_db (#2847) #### What I did Fixed issue: out of range sflow polling interval is accepted and stored in config_db. Reproduce step: ``` 1. Enable sflow feature: config feature state sflow enabled 2. Enable sflow itself: config sflow enable 3. Configure out of range polling interval: config sflow polling-interval 1. Error message is shown as expected 4. Save config: config save -y 5. Check "SFLOW" section inside config_db ``` As the interval is invalid, the expected behavior is that the interval is not saved to redis. But we see the invalid value was written to redis. #### How I did it Change `click.echo` to `ctx.fail` #### How to verify it 1. Manual test 2. Add a check in existing unit test case to cover the change --- config/main.py | 2 +- tests/sflow_test.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index aab334e712..450b9fc5c1 100644 --- a/config/main.py +++ b/config/main.py @@ -6631,7 +6631,7 @@ def polling_int(ctx, interval): """Set polling-interval for counter-sampling (0 to disable)""" if ADHOC_VALIDATION: if interval not in range(5, 301) and interval != 0: - click.echo("Polling interval must be between 5-300 (0 to disable)") + ctx.fail("Polling interval must be between 5-300 (0 to disable)") config_db = ValidatedConfigDBConnector(ctx.obj['db']) sflow_tbl = config_db.get_table('SFLOW') diff --git a/tests/sflow_test.py b/tests/sflow_test.py index da03ff396e..ecd622655e 100644 --- a/tests/sflow_test.py +++ b/tests/sflow_test.py @@ -237,6 +237,7 @@ def test_config_sflow_polling_interval(self): result = runner.invoke(config.config.commands["sflow"]. commands["polling-interval"], ["500"], obj=obj) print(result.exit_code, result.output) + assert result.exit_code != 0 assert "Polling interval must be between 5-300" in result.output # set to 20 From 7d803aedf68f8deb7dc989c90cbf3bae235910b2 Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Mon, 5 Jun 2023 17:51:27 +0800 Subject: [PATCH 27/35] Revert "[config]config reload should generate sysinfo if missing (#2778)" (#2865) This reverts commit 88ffb16721d6f867f71100bb14564120a456e07b. --- config/main.py | 13 ---------- tests/config_test.py | 61 +------------------------------------------- 2 files changed, 1 insertion(+), 73 deletions(-) diff --git a/config/main.py b/config/main.py index 450b9fc5c1..d49d8345a1 100644 --- a/config/main.py +++ b/config/main.py @@ -1536,19 +1536,6 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form click.echo("The config file {} doesn't exist".format(file)) continue - if file_format == 'config_db': - file_input = read_json_file(file) - - platform = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("platform") - mac = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("mac") - - if not platform or not mac: - log.log_warning("Input file does't have platform or mac. platform: {}, mac: {}" - .format(None if platform is None else platform, None if mac is None else mac)) - load_sysinfo = True - if load_sysinfo: try: command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] diff --git a/tests/config_test.py b/tests/config_test.py index 571800101a..332bfe14ec 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -464,66 +464,9 @@ def setup_class(cls): print("SETUP") import config.main importlib.reload(config.main) - - def add_sysinfo_to_cfg_file(self): - with open(self.dummy_cfg_file, 'w') as f: - device_metadata = { - "DEVICE_METADATA": { - "localhost": { - "platform": "some_platform", - "mac": "02:42:f0:7f:01:05" - } - } - } - f.write(json.dumps(device_metadata)) - - def test_reload_config_invalid_input(self, get_cmd_module, setup_single_broadcom_asic): - open(self.dummy_cfg_file, 'w').close() - with mock.patch( - "utilities_common.cli.run_command", - mock.MagicMock(side_effect=mock_run_command_side_effect) - ) as mock_run_command: - (config, show) = get_cmd_module - runner = CliRunner() - - result = runner.invoke( - config.config.commands["reload"], - [self.dummy_cfg_file, '-y', '-f']) - - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - - def test_reload_config_no_sysinfo(self, get_cmd_module, setup_single_broadcom_asic): - with open(self.dummy_cfg_file, 'w') as f: - device_metadata = { - "DEVICE_METADATA": { - "localhost": { - "hwsku": "some_hwsku" - } - } - } - f.write(json.dumps(device_metadata)) - - with mock.patch( - "utilities_common.cli.run_command", - mock.MagicMock(side_effect=mock_run_command_side_effect) - ) as mock_run_command: - (config, show) = get_cmd_module - runner = CliRunner() - - result = runner.invoke( - config.config.commands["reload"], - [self.dummy_cfg_file, '-y', '-f']) - - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 + open(cls.dummy_cfg_file, 'w').close() def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): - self.add_sysinfo_to_cfg_file() with mock.patch( "utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect) @@ -543,7 +486,6 @@ def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): == RELOAD_CONFIG_DB_OUTPUT def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broadcom_asic): - self.add_sysinfo_to_cfg_file() with mock.patch( "utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect_disabled_timer) @@ -563,7 +505,6 @@ def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broad assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == reload_config_with_disabled_service_output def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): - self.add_sysinfo_to_cfg_file() with mock.patch( "utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect) From 1c1e22d3b6123f5635f3ea5f525ed7f1c1d97926 Mon Sep 17 00:00:00 2001 From: "Ravi [Marvell]" Date: Tue, 6 Jun 2023 02:33:45 +0530 Subject: [PATCH 28/35] [acl-loader] Support for ACL table type L3V4V6 (#2794) Support a new ACL table type called L3V4V6. This table supports both v4 and v6 Match types. Add unit tests for this new ACL table type. HLD: sonic-net/SONiC#1267 --- acl_loader/main.py | 57 +++++++-- doc/Command-Reference.md | 2 +- tests/acl_input/acl1.json | 84 ++++++++++++++ .../illegal_v4v6_rule_no_ethertype.json | 109 ++++++++++++++++++ tests/acl_loader_test.py | 44 ++++++- tests/aclshow_test.py | 2 +- tests/mock_tables/config_db.json | 11 ++ 7 files changed, 299 insertions(+), 10 deletions(-) create mode 100644 tests/acl_input/illegal_v4v6_rule_no_ethertype.json diff --git a/acl_loader/main.py b/acl_loader/main.py index ff5d22f007..7261867412 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -94,7 +94,7 @@ class AclLoader(object): "ETHERTYPE_LLDP": 0x88CC, "ETHERTYPE_VLAN": 0x8100, "ETHERTYPE_ROCE": 0x8915, - "ETHERTYPE_ARP": 0x0806, + "ETHERTYPE_ARP": 0x0806, "ETHERTYPE_IPV4": 0x0800, "ETHERTYPE_IPV6": 0x86DD, "ETHERTYPE_MPLS": 0x8847 @@ -261,7 +261,7 @@ def read_acl_object_status_info(self, cfg_db_table_name, state_db_table_name): else: state_db_info = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(state_db_table_name, state_db_key)) status[key]['status'] = state_db_info.get("status", "N/A") if state_db_info else "N/A" - + return status def get_sessions_db_info(self): @@ -346,6 +346,14 @@ def is_table_l3v6(self, tname): """ return self.tables_db_info[tname]["type"].upper() == "L3V6" + def is_table_l3v4v6(self, tname): + """ + Check if ACL table type is L3V4V6 + :param tname: ACL table name + :return: True if table type is L3V4V6 else False + """ + return self.tables_db_info[tname]["type"].upper() == "L3V4V6" + def is_table_l3(self, tname): """ Check if ACL table type is L3 @@ -509,6 +517,17 @@ def convert_ip(self, table_name, rule_idx, rule): # "IP_ICMP" we need to pick the correct protocol number for the IP version if rule.ip.config.protocol == "IP_ICMP" and self.is_table_ipv6(table_name): rule_props["IP_PROTOCOL"] = self.ip_protocol_map["IP_ICMPV6"] + elif rule.ip.config.protocol == "IP_ICMP" and self.is_table_l3v4v6(table_name): + # For L3V4V6 tables, both ICMP and ICMPv6 are supported, + # so find the IP_PROTOCOL using the ether_type. + try: + ether_type = rule.l2.config.ethertype + except Exception as e: + ether_type = None + if rule.l2.config.ethertype == "ETHERTYPE_IPV6": + rule_props["IP_PROTOCOL"] = self.ip_protocol_map["IP_ICMPV6"] + else: + rule_props["IP_PROTOCOL"] = self.ip_protocol_map[rule.ip.config.protocol] else: rule_props["IP_PROTOCOL"] = self.ip_protocol_map[rule.ip.config.protocol] else: @@ -544,9 +563,20 @@ def convert_ip(self, table_name, rule_idx, rule): def convert_icmp(self, table_name, rule_idx, rule): rule_props = {} - is_table_v6 = self.is_table_ipv6(table_name) - type_key = "ICMPV6_TYPE" if is_table_v6 else "ICMP_TYPE" - code_key = "ICMPV6_CODE" if is_table_v6 else "ICMP_CODE" + is_rule_v6 = False + if self.is_table_ipv6(table_name): + is_rule_v6 = True + elif self.is_table_l3v4v6(table_name): + # get the IP version type using Ether-Type. + try: + ether_type = rule.l2.config.ethertype + if ether_type == "ETHERTYPE_IPV6": + is_rule_v6 = True + except Exception as e: + pass + + type_key = "ICMPV6_TYPE" if is_rule_v6 else "ICMP_TYPE" + code_key = "ICMPV6_CODE" if is_rule_v6 else "ICMP_CODE" if rule.icmp.config.type != "" and rule.icmp.config.type != "null": icmp_type = rule.icmp.config.type @@ -651,7 +681,18 @@ def convert_rule_to_db_schema(self, table_name, rule): rule_props["PRIORITY"] = str(self.max_priority - rule_idx) # setup default ip type match to dataplane acl (could be overriden by rule later) - if self.is_table_l3v6(table_name): + if self.is_table_l3v4v6(table_name): + # ETHERTYPE must be passed and it should be one of IPv4 or IPv6 + try: + ether_type = rule.l2.config.ethertype + except Exception as e: + raise AclLoaderException("l2:ethertype must be provided for rule #{} in table:{} of type L3V4V6".format(rule_idx, table_name)) + if ether_type not in ["ETHERTYPE_IPV4", "ETHERTYPE_IPV6"]: + # Ether type must be v4 or v6 to match IP fields, L4 (TCP/UDP) fields or ICMP fields + if rule.ip or rule.transport: + raise AclLoaderException("ethertype={} is neither ETHERTYPE_IPV4 nor ETHERTYPE_IPV6 for IP rule #{} in table:{} type L3V4V6".format(rule.l2.config.ethertype, rule_idx, table_name)) + rule_props["ETHER_TYPE"] = str(self.ethertype_map[ether_type]) + elif self.is_table_l3v6(table_name): rule_props["IP_TYPE"] = "IPV6ANY" # ETHERTYPE is not supported for DATAACLV6 elif self.is_table_l3(table_name): rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"]) @@ -682,6 +723,8 @@ def deny_rule(self, table_name): rule_props["IP_TYPE"] = "IPV6ANY" # ETHERTYPE is not supported for DATAACLV6 elif self.is_table_l3(table_name): rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"]) + elif self.is_table_l3v4v6(table_name): + rule_props["IP_TYPE"] = "IP" # Drop both v4 and v6 packets else: return {} # Don't add default deny rule if table is not [L3, L3V6] return rule_data @@ -835,7 +878,7 @@ def show_table(self, table_name): for key, val in self.get_tables_db_info().items(): if table_name and key != table_name: continue - + stage = val.get("stage", Stage.INGRESS).lower() # Get ACL table status from STATE_DB if key in self.acl_table_status: diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 316e8cb86e..3d6f4bade1 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -1861,7 +1861,7 @@ This command is used to create new ACL tables. - Parameters: - table_name: The name of the ACL table to create. - - table_type: The type of ACL table to create (e.g. "L3", "L3V6", "MIRROR") + - table_type: The type of ACL table to create (e.g. "L3", "L3V6", "L3V4V6", "MIRROR") - description: A description of the table for the user. (default is the table_name) - ports: A comma-separated list of ports/interfaces to add to the table. The behavior is as follows: - Physical ports will be bound as physical ports diff --git a/tests/acl_input/acl1.json b/tests/acl_input/acl1.json index 177d7cb227..4bcd8049be 100644 --- a/tests/acl_input/acl1.json +++ b/tests/acl_input/acl1.json @@ -316,6 +316,90 @@ "config": { "name": "bmc_acl_northbound_v6" } + }, + "DATAACLV4V6": { + "acl-entries": { + "acl-entry": { + "1": { + "config": { + "sequence-id": 1 + }, + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "l2": { + "config": { + "vlan-id": "369", + "ethertype": "ETHERTYPE_IPV4" + } + }, + "ip": { + "config": { + "protocol": "IP_TCP", + "source-ip-address": "20.0.0.2/32", + "destination-ip-address": "30.0.0.3/32" + } + } + }, + "2": { + "config": { + "sequence-id": 2 + }, + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "l2": { + "config": { + "ethertype": "ETHERTYPE_IPV6" + } + }, + "ip": { + "config": { + "protocol": "IP_ICMP", + "source-ip-address": "::1/128", + "destination-ip-address": "::1/128" + } + }, + "icmp": { + "config": { + "type": "1", + "code": "0" + } + } + }, + "3": { + "config": { + "sequence-id": 3 + }, + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "l2": { + "config": { + "ethertype": "ETHERTYPE_IPV6" + } + }, + "ip": { + "config": { + "protocol": "IP_ICMP", + "source-ip-address": "::1/128", + "destination-ip-address": "::1/128" + } + }, + "icmp": { + "config": { + "type": "128" + } + } + } + } + } } } } diff --git a/tests/acl_input/illegal_v4v6_rule_no_ethertype.json b/tests/acl_input/illegal_v4v6_rule_no_ethertype.json new file mode 100644 index 0000000000..acf30773ce --- /dev/null +++ b/tests/acl_input/illegal_v4v6_rule_no_ethertype.json @@ -0,0 +1,109 @@ +{ + "acl": { + "acl-sets": { + "acl-set": { + "DATAACLV4V6": { + "acl-entries": { + "acl-entry": { + "1": { + "config": { + "sequence-id": 1 + }, + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "ip": { + "config": { + "protocol": "IP_TCP", + "source-ip-address": "20.0.0.2/32", + "destination-ip-address": "30.0.0.3/32" + } + } + }, + "2": { + "config": { + "sequence-id": 2 + }, + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "l2": { + "config": { + "ethertype": "ETHERTYPE_IPV6" + } + }, + "ip": { + "config": { + "protocol": "IP_ICMP", + "source-ip-address": "::1/128", + "destination-ip-address": "::1/128" + } + }, + "icmp": { + "config": { + "type": "1", + "code": "0" + } + } + }, + "3": { + "config": { + "sequence-id": 3 + }, + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "ip": { + "config": { + "protocol": "IP_ICMP", + "source-ip-address": "::1/128", + "destination-ip-address": "::1/128" + } + }, + "icmp": { + "config": { + "type": "1" + } + } + }, + "4": { + "config": { + "sequence-id": 2 + }, + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "l2": { + "config": { + "ethertype": "ETHERTYPE_IPV4" + } + }, + "ip": { + "config": { + "protocol": "IP_ICMP", + "source-ip-address": "20.0.0.2/32", + "destination-ip-address": "30.0.0.3/32" + } + }, + "icmp": { + "config": { + "type": "1", + "code": "0" + } + } + } + } + } + } + } + } + } +} diff --git a/tests/acl_loader_test.py b/tests/acl_loader_test.py index 37f6430733..adcf38fe37 100644 --- a/tests/acl_loader_test.py +++ b/tests/acl_loader_test.py @@ -21,7 +21,7 @@ def test_acl_empty(self): def test_valid(self): yang_acl = AclLoader.parse_acl_json(os.path.join(test_path, 'acl_input/acl1.json')) - assert len(yang_acl.acl.acl_sets.acl_set) == 8 + assert len(yang_acl.acl.acl_sets.acl_set) == 9 def test_invalid(self): with pytest.raises(AclLoaderException): @@ -95,6 +95,42 @@ def test_ethertype_translation(self, acl_loader): "PRIORITY": "9997" } + def test_v4_rule_inv4v6_table(self, acl_loader): + acl_loader.rules_info = {} + acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/acl1.json')) + assert acl_loader.rules_info[("DATAACLV4V6", "RULE_1")] + assert acl_loader.rules_info[("DATAACLV4V6", "RULE_1")] == { + "VLAN_ID": 369, + "ETHER_TYPE": 2048, + "IP_PROTOCOL": 6, + "SRC_IP": "20.0.0.2/32", + "DST_IP": "30.0.0.3/32", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9999" + } + + def test_v6_rule_inv4v6_table(self, acl_loader): + acl_loader.rules_info = {} + acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/acl1.json')) + assert acl_loader.rules_info[("DATAACLV4V6", "RULE_2")] + assert acl_loader.rules_info[("DATAACLV4V6", "RULE_2")] == { + "ETHER_TYPE": 34525, + "IP_PROTOCOL": 58, + "SRC_IPV6": "::1/128", + "DST_IPV6": "::1/128", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "9998", + 'ICMPV6_CODE': 0, + 'ICMPV6_TYPE': 1 + } + + def test_rule_without_ethertype_inv4v6(self, acl_loader): + acl_loader.rules_info = {} + acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/illegal_v4v6_rule_no_ethertype.json')) + assert not acl_loader.rules_info.get(("DATAACLV4V6", "RULE_1")) + assert acl_loader.rules_info[("DATAACLV4V6", "RULE_2")] + assert not acl_loader.rules_info.get(("DATAACLV4V6", "RULE_3")) + def test_icmp_translation(self, acl_loader): acl_loader.rules_info = {} acl_loader.load_rules_from_file(os.path.join(test_path, 'acl_input/acl1.json')) @@ -151,6 +187,12 @@ def test_ingress_default_deny_rule(self, acl_loader): 'PACKET_ACTION': 'DROP', 'IP_TYPE': 'IPV6ANY' } + assert acl_loader.rules_info[('DATAACLV4V6', 'DEFAULT_RULE')] == { + 'PRIORITY': '1', + 'PACKET_ACTION': 'DROP', + 'IP_TYPE': 'IP' + } + # Verify acl-loader doesn't add default deny rule to MIRROR assert ('EVERFLOW', 'DEFAULT_RULE') not in acl_loader.rules_info # Verify acl-loader doesn't add default deny rule to MIRRORV6 diff --git a/tests/aclshow_test.py b/tests/aclshow_test.py index 8e2d20cbf1..94615e5443 100644 --- a/tests/aclshow_test.py +++ b/tests/aclshow_test.py @@ -90,7 +90,7 @@ # Expected output for aclshow -r RULE_4,RULE_6 -vv rule4_rule6_verbose_output = '' + \ """Reading ACL info... -Total number of ACL Tables: 15 +Total number of ACL Tables: 16 Total number of ACL Rules: 21 RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 5cf11f9f66..538f81d605 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -556,6 +556,17 @@ "type": "L3", "stage": "ingress" }, + "ACL_TABLE|DATAACLV4V6": { + "expireat": 1602451533.237415, + "ttl": -0.001, + "type": "hash", + "value": { + "policy_desc": "DATAACLV4V6", + "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023,Ethernet64,Ethernet68,Ethernet72,Ethernet76,Ethernet80,Ethernet84,Ethernet88,Ethernet92,Ethernet96,Ethernet100,Ethernet104,Ethernet108,Ethernet112,Ethernet116,Ethernet120,Ethernet124", + "stage": "ingress", + "type": "L3V4V6" + } + }, "ACL_TABLE|EVERFLOW": { "policy_desc": "EVERFLOW", "ports@": "PortChannel0002,PortChannel0005,PortChannel0008,PortChannel0011,PortChannel0014,PortChannel0017,PortChannel0020,PortChannel0023,Ethernet100,Ethernet104,Ethernet92,Ethernet96,Ethernet84,Ethernet88,Ethernet76,Ethernet80,Ethernet108,Ethernet112,Ethernet64,Ethernet120,Ethernet116,Ethernet124,Ethernet72,Ethernet68", From 0fc9e71b149670225b11d11917ff62accfca8920 Mon Sep 17 00:00:00 2001 From: Arvindsrinivasan Lakshmi Narasimhan <55814491+arlakshm@users.noreply.github.com> Date: Tue, 6 Jun 2023 16:54:07 -0700 Subject: [PATCH 29/35] [chassis]: remote cli commands infra for sonic chassis (#2850) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What I did Microsoft ADO 17792956 Since each Linecard is running an independent SONiC Instance, the user needs to login to a linecard to run any CLI command The user can login to each Linecard 2 ways Ssh directly to the linecard using the management IP address Ssh to supervisor and from supervisor ssh to the Linecard using the Linecard’s internal IP address To simplify the user experience and allow scripting agents to execute commands on all linecards. Two new commands are being added rexec -c This command will execute the command on specified linecards or all linecards. rshell connects to the linecard for interactive shell This PR is adding the changes of PR #2701 How to verify it UT and tested chassis Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- rcli/__init__.py | 0 rcli/linecard.py | 151 ++++++++++ rcli/rexec.py | 44 +++ rcli/rshell.py | 38 +++ rcli/utils.py | 149 ++++++++++ setup.py | 6 + sonic-utilities-data/bash_completion.d/rexec | 21 ++ sonic-utilities-data/bash_completion.d/rshell | 21 ++ tests/chassis_modules_test.py | 12 +- tests/mock_tables/asic0/state_db.json | 12 + tests/mock_tables/chassis_state_db.json | 9 + tests/mock_tables/database_config.json | 5 + tests/mock_tables/state_db.json | 4 +- tests/remote_cli_test.py | 260 ++++++++++++++++++ 14 files changed, 724 insertions(+), 8 deletions(-) create mode 100644 rcli/__init__.py create mode 100644 rcli/linecard.py create mode 100644 rcli/rexec.py create mode 100644 rcli/rshell.py create mode 100644 rcli/utils.py create mode 100644 sonic-utilities-data/bash_completion.d/rexec create mode 100644 sonic-utilities-data/bash_completion.d/rshell create mode 100644 tests/mock_tables/chassis_state_db.json create mode 100644 tests/remote_cli_test.py diff --git a/rcli/__init__.py b/rcli/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/rcli/linecard.py b/rcli/linecard.py new file mode 100644 index 0000000000..fdc6882ed1 --- /dev/null +++ b/rcli/linecard.py @@ -0,0 +1,151 @@ +import click +import os +import paramiko +import sys +import select +import socket +import sys +import termios +import tty + +from .utils import get_linecard_ip +from paramiko.py3compat import u +from paramiko import Channel + +EMPTY_OUTPUTS = ['', '\x1b[?2004l\r'] + +class Linecard: + + def __init__(self, linecard_name, username, password): + """ + Initialize Linecard object and store credentials, connection, and channel + + :param linecard_name: The name of the linecard you want to connect to + :param username: The username to use to connect to the linecard + :param password: The linecard password. If password not provided, it + will prompt the user for it + :param use_ssh_keys: Whether or not to use SSH keys to authenticate. + """ + self.ip = get_linecard_ip(linecard_name) + + if not self.ip: + sys.exit(1) + + self.linecard_name = linecard_name + self.username = username + self.password = password + + self.connection = self._connect() + + + def _connect(self): + connection = paramiko.SSHClient() + # if ip address not in known_hosts, ignore known_hosts error + connection.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + try: + connection.connect(self.ip, username=self.username, password=self.password) + except paramiko.ssh_exception.NoValidConnectionsError as e: + connection = None + click.echo(e) + return connection + + def _get_password(self): + """ + Prompts the user for a password, and returns the password + + :param username: The username that we want to get the password for + :type username: str + :return: The password for the username. + """ + + return getpass( + "Password for username '{}': ".format(self.username), + # Pass in click stdout stream - this is similar to using click.echo + stream=click.get_text_stream('stdout') + ) + + def _set_tty_params(self): + tty.setraw(sys.stdin.fileno()) + tty.setcbreak(sys.stdin.fileno()) + + def _is_data_to_read(self, read): + if self.channel in read: + return True + return False + + def _is_data_to_write(self, read): + if sys.stdin in read: + return True + return False + + def _write_to_terminal(self, data): + # Write channel output to terminal + sys.stdout.write(data) + sys.stdout.flush() + + def _start_interactive_shell(self): + oldtty = termios.tcgetattr(sys.stdin) + try: + self._set_tty_params() + self.channel.settimeout(0.0) + + while True: + #Continuously wait for commands and execute them + read, write, ex = select.select([self.channel, sys.stdin], [], []) + if self._is_data_to_read(read): + try: + # Get output from channel + x = u(self.channel.recv(1024)) + if len(x) == 0: + # logout message will be displayed + break + self._write_to_terminal(x) + except socket.timeout as e: + click.echo("Connection timed out") + break + if self._is_data_to_write(read): + # If we are able to send input, get the input from stdin + x = sys.stdin.read(1) + if len(x) == 0: + break + # Send the input to the channel + self.channel.send(x) + finally: + # Now that the channel has been exited, return to the previously-saved old tty + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty) + pass + + + def start_shell(self) -> None: + """ + Opens a session, gets a pseudo-terminal, invokes a shell, and then + attaches the host shell to the remote shell. + """ + # Create shell session + self.channel = self.connection.get_transport().open_session() + self.channel.get_pty() + self.channel.invoke_shell() + # Use Paramiko Interactive script to connect to the shell + self._start_interactive_shell() + # After user exits interactive shell, close the connection + self.connection.close() + + + def execute_cmd(self, command) -> str: + """ + Takes a command as an argument, executes it on the remote shell, and returns the output + + :param command: The command to execute on the remote shell + :return: The output of the command. + """ + # Execute the command and gather errors and output + _, stdout, stderr = self.connection.exec_command(command + "\n") + output = stdout.read().decode('utf-8') + + if stderr: + # Error was present, add message to output + output += stderr.read().decode('utf-8') + + # Close connection and return output + self.connection.close() + return output diff --git a/rcli/rexec.py b/rcli/rexec.py new file mode 100644 index 0000000000..fb56df8351 --- /dev/null +++ b/rcli/rexec.py @@ -0,0 +1,44 @@ +import os +import click +import paramiko +import sys + +from .linecard import Linecard +from rcli import utils as rcli_utils +from sonic_py_common import device_info + +@click.command() +@click.argument('linecard_names', nargs=-1, type=str, required=True) +@click.option('-c', '--command', type=str, required=True) +def cli(linecard_names, command): + """ + Executes a command on one or many linecards + + :param linecard_names: A list of linecard names to execute the command on, + use `all` to execute on all linecards. + :param command: The command to execute on the linecard(s) + """ + if not device_info.is_chassis(): + click.echo("This commmand is only supported Chassis") + sys.exit(1) + + username = os.getlogin() + password = rcli_utils.get_password(username) + + if list(linecard_names) == ["all"]: + # Get all linecard names using autocompletion helper + linecard_names = rcli_utils.get_all_linecards(None, None, "") + + # Iterate through each linecard, execute command, and gather output + for linecard_name in linecard_names: + try: + lc = Linecard(linecard_name, username, password) + if lc.connection: + # If connection was created, connection exists. Otherwise, user will see an error message. + click.echo("======== {} output: ========".format(lc.linecard_name)) + click.echo(lc.execute_cmd(command)) + except paramiko.ssh_exception.AuthenticationException: + click.echo("Login failed on '{}' with username '{}'".format(linecard_name, lc.username)) + +if __name__=="__main__": + cli(prog_name='rexec') diff --git a/rcli/rshell.py b/rcli/rshell.py new file mode 100644 index 0000000000..decda6cd59 --- /dev/null +++ b/rcli/rshell.py @@ -0,0 +1,38 @@ +import os +import click +import paramiko +import sys + +from .linecard import Linecard +from sonic_py_common import device_info +from rcli import utils as rcli_utils + + +@click.command() +@click.argument('linecard_name', type=str, autocompletion=rcli_utils.get_all_linecards) +def cli(linecard_name): + """ + Open interactive shell for one linecard + + :param linecard_name: The name of the linecard to connect to + """ + if not device_info.is_chassis(): + click.echo("This commmand is only supported Chassis") + sys.exit(1) + + username = os.getlogin() + password = rcli_utils.get_password(username) + + try: + lc =Linecard(linecard_name, username, password) + if lc.connection: + click.echo("Connecting to {}".format(lc.linecard_name)) + # If connection was created, connection exists. Otherwise, user will see an error message. + lc.start_shell() + click.echo("Connection Closed") + except paramiko.ssh_exception.AuthenticationException: + click.echo("Login failed on '{}' with username '{}'".format(linecard_name, lc.username)) + + +if __name__=="__main__": + cli(prog_name='rshell') diff --git a/rcli/utils.py b/rcli/utils.py new file mode 100644 index 0000000000..933043d069 --- /dev/null +++ b/rcli/utils.py @@ -0,0 +1,149 @@ +import click +from getpass import getpass +import os +import sys + +from swsscommon.swsscommon import SonicV2Connector + +CHASSIS_MODULE_INFO_TABLE = 'CHASSIS_MODULE_TABLE' +CHASSIS_MODULE_INFO_KEY_TEMPLATE = 'CHASSIS_MODULE {}' +CHASSIS_MODULE_INFO_DESC_FIELD = 'desc' +CHASSIS_MODULE_INFO_SLOT_FIELD = 'slot' +CHASSIS_MODULE_INFO_OPERSTATUS_FIELD = 'oper_status' +CHASSIS_MODULE_INFO_ADMINSTATUS_FIELD = 'admin_status' + +CHASSIS_MIDPLANE_INFO_TABLE = 'CHASSIS_MIDPLANE_TABLE' +CHASSIS_MIDPLANE_INFO_IP_FIELD = 'ip_address' +CHASSIS_MIDPLANE_INFO_ACCESS_FIELD = 'access' + +CHASSIS_MODULE_HOSTNAME_TABLE = 'CHASSIS_MODULE_HOSTNAME_TABLE' +CHASSIS_MODULE_HOSTNAME = 'module_hostname' + +def connect_to_chassis_state_db(): + chassis_state_db = SonicV2Connector(host="127.0.0.1") + chassis_state_db.connect(chassis_state_db.CHASSIS_STATE_DB) + return chassis_state_db + + +def connect_state_db(): + state_db = SonicV2Connector(host="127.0.0.1") + state_db.connect(state_db.STATE_DB) + return state_db + + + +def get_linecard_module_name_from_hostname(linecard_name: str): + + chassis_state_db = connect_to_chassis_state_db() + + keys = chassis_state_db.keys(chassis_state_db.CHASSIS_STATE_DB , '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, '*')) + for key in keys: + module_name = key.split('|')[1] + hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, key, CHASSIS_MODULE_HOSTNAME) + if hostname.replace('-', '').lower() == linecard_name.replace('-', '').lower(): + return module_name + + return None + +def get_linecard_ip(linecard_name: str): + """ + Given a linecard name, lookup its IP address in the midplane table + + :param linecard_name: The name of the linecard you want to connect to + :type linecard_name: str + :return: IP address of the linecard + """ + # Adapted from `show chassis modules midplane-status` command logic: + # https://github.com/sonic-net/sonic-utilities/blob/master/show/chassis_modules.py + + # if the user passes linecard hostname, then try to get the module name for that linecard + module_name = get_linecard_module_name_from_hostname(linecard_name) + # if the module name cannot be found from host, assume the user has passed module name + if module_name is None: + module_name = linecard_name + module_ip, module_access = get_module_ip_and_access_from_state_db(module_name) + + if not module_ip: + click.echo('Linecard {} not found'.format(linecard_name)) + return None + + if module_access != 'True': + click.echo('Linecard {} not accessible'.format(linecard_name)) + return None + + + return module_ip + +def get_module_ip_and_access_from_state_db(module_name): + state_db = connect_state_db() + data_dict = state_db.get_all( + state_db.STATE_DB, '{}|{}'.format(CHASSIS_MIDPLANE_INFO_TABLE,module_name )) + if data_dict is None: + return None, None + + linecard_ip = data_dict.get(CHASSIS_MIDPLANE_INFO_IP_FIELD, None) + access = data_dict.get(CHASSIS_MIDPLANE_INFO_ACCESS_FIELD, None) + + return linecard_ip, access + + +def get_all_linecards(ctx, args, incomplete) -> list: + """ + Return a list of all accessible linecard names. This function is used to + autocomplete linecard names in the CLI. + + :param ctx: The Click context object that is passed to the command function + :param args: The arguments passed to the Click command + :param incomplete: The string that the user has typed so far + :return: A list of all accessible linecard names. + """ + # Adapted from `show chassis modules midplane-status` command logic: + # https://github.com/sonic-net/sonic-utilities/blob/master/show/chassis_modules.py + + + chassis_state_db = connect_to_chassis_state_db() + state_db = connect_state_db() + + linecards = [] + keys = state_db.keys(state_db.STATE_DB,'{}|*'.format(CHASSIS_MIDPLANE_INFO_TABLE)) + for key in keys: + key_list = key.split('|') + if len(key_list) != 2: # error data in DB, log it and ignore + click.echo('Warn: Invalid Key {} in {} table'.format(key, CHASSIS_MIDPLANE_INFO_TABLE )) + continue + module_name = key_list[1] + linecard_ip, access = get_module_ip_and_access_from_state_db(module_name) + if linecard_ip is None: + continue + + if access != "True" : + continue + + # get the hostname for this module + hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, module_name), CHASSIS_MODULE_HOSTNAME) + if hostname: + linecards.append(hostname) + else: + linecards.append(module_name) + + # Return a list of all matched linecards + return [lc for lc in linecards if incomplete in lc] + + +def get_password(username=None): + """ + Prompts the user for a password, and returns the password + + :param username: The username that we want to get the password for + :type username: str + :return: The password for the username. + """ + + if username is None: + username =os.getlogin() + + return getpass( + "Password for username '{}': ".format(username), + # Pass in click stdout stream - this is similar to using click.echo + stream=click.get_text_stream('stdout') + ) \ No newline at end of file diff --git a/setup.py b/setup.py index bc69337b8c..547b0fac7a 100644 --- a/setup.py +++ b/setup.py @@ -74,6 +74,7 @@ 'pddf_thermalutil', 'pddf_ledutil', 'syslog_util', + 'rcli', 'show', 'show.interfaces', 'show.plugins', @@ -207,6 +208,8 @@ 'pddf_psuutil = pddf_psuutil.main:cli', 'pddf_thermalutil = pddf_thermalutil.main:cli', 'pddf_ledutil = pddf_ledutil.main:cli', + 'rexec = rcli.rexec:cli', + 'rshell = rcli.rshell:cli', 'show = show.main:cli', 'sonic-clear = clear.main:cli', 'sonic-installer = sonic_installer.main:sonic_installer', @@ -219,7 +222,9 @@ ] }, install_requires=[ + 'bcrypt==3.2.2', 'click==7.0', + 'cryptography==3.3.2', 'urllib3<2', 'click-log>=0.3.2', 'docker>=4.4.4', @@ -235,6 +240,7 @@ 'natsort>=6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 'netaddr>=0.8.0', 'netifaces>=0.10.7', + 'paramiko==2.11.0', 'pexpect>=4.8.0', 'semantic-version>=2.8.5', 'prettyprinter>=0.18.0', diff --git a/sonic-utilities-data/bash_completion.d/rexec b/sonic-utilities-data/bash_completion.d/rexec new file mode 100644 index 0000000000..1199fd0676 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/rexec @@ -0,0 +1,21 @@ +_rexec_completion() { + local IFS=$' +' + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _REXEC_COMPLETE=complete $1 ) ) + return 0 +} + +_rexec_completionetup() { + local COMPLETION_OPTIONS="" + local BASH_VERSION_ARR=(${BASH_VERSION//./ }) + # Only BASH version 4.4 and later have the nosort option. + if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then + COMPLETION_OPTIONS="-o nosort" + fi + + complete $COMPLETION_OPTIONS -F _rexec_completion rexec +} + +_rexec_completionetup; \ No newline at end of file diff --git a/sonic-utilities-data/bash_completion.d/rshell b/sonic-utilities-data/bash_completion.d/rshell new file mode 100644 index 0000000000..012f754dd7 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/rshell @@ -0,0 +1,21 @@ +_rshell_completion() { + local IFS=$' +' + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _RSHELL_COMPLETE=complete $1 ) ) + return 0 +} + +_rshell_completionetup() { + local COMPLETION_OPTIONS="" + local BASH_VERSION_ARR=(${BASH_VERSION//./ }) + # Only BASH version 4.4 and later have the nosort option. + if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then + COMPLETION_OPTIONS="-o nosort" + fi + + complete $COMPLETION_OPTIONS -F _rshell_completion rshell +} + +_rshell_completionetup; \ No newline at end of file diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py index 6b9e0f3e6e..a9ba0f82a7 100644 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -33,11 +33,11 @@ """ show_chassis_midplane_output="""\ - Name IP-Address Reachability ------------ ------------- -------------- - LINE-CARD0 192.168.1.1 True - LINE-CARD1 192.168.1.2 False -SUPERVISOR0 192.168.1.100 True + Name IP-Address Reachability +---------- ------------- -------------- +LINE-CARD0 192.168.1.100 True +LINE-CARD1 192.168.1.2 False +LINE-CARD2 192.168.1.1 True """ show_chassis_system_ports_output_asic0="""\ @@ -225,7 +225,7 @@ def test_midplane_show_all_count_lines(self): result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["midplane-status"], []) print(result.output) result_lines = result.output.strip('\n').split('\n') - modules = ["LINE-CARD0", "LINE-CARD1", "SUPERVISOR0"] + modules = ["LINE-CARD0", "LINE-CARD1", "LINE-CARD2"] for i, module in enumerate(modules): assert module in result_lines[i + warning_lines + header_lines] assert len(result_lines) == warning_lines + header_lines + len(modules) diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 559af04826..6ae0258be0 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -287,6 +287,18 @@ "REMOTE_MOD": "0", "REMOTE_PORT": "93" }, + "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { + "ip_address": "127.0.0.1", + "access": "True" + }, + "CHASSIS_MIDPLANE_TABLE|LINE-CARD1": { + "ip_address": "127.0.0.1", + "access": "True" + }, + "CHASSIS_MIDPLANE_TABLE|LINE-CARD2": { + "ip_address": "127.0.0.1", + "access": "False" + }, "ACL_TABLE_TABLE|DATAACL_5" : { "status": "Active" }, diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json new file mode 100644 index 0000000000..5178c49ca0 --- /dev/null +++ b/tests/mock_tables/chassis_state_db.json @@ -0,0 +1,9 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + } + +} \ No newline at end of file diff --git a/tests/mock_tables/database_config.json b/tests/mock_tables/database_config.json index d12ba05414..f55c0734c2 100644 --- a/tests/mock_tables/database_config.json +++ b/tests/mock_tables/database_config.json @@ -56,6 +56,11 @@ "id" : 12, "separator": "|", "instance" : "redis" + }, + "CHASSIS_STATE_DB" : { + "id" : 13, + "separator": "|", + "instance" : "redis" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 883a2b36cc..289bf3cec2 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1229,11 +1229,11 @@ "max_queues": "20", "max_priority_groups": "8" }, - "CHASSIS_MIDPLANE_TABLE|SUPERVISOR0": { + "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { "ip_address": "192.168.1.100", "access": "True" }, - "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { + "CHASSIS_MIDPLANE_TABLE|LINE-CARD2": { "ip_address": "192.168.1.1", "access": "True" }, diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py new file mode 100644 index 0000000000..67545dd1b3 --- /dev/null +++ b/tests/remote_cli_test.py @@ -0,0 +1,260 @@ +import os +from click.testing import CliRunner +import paramiko +from rcli import rexec +from rcli import rshell +from rcli import linecard +from rcli import utils as rcli_utils +import sys +from io import BytesIO, StringIO +from unittest import mock +import select +import socket +import termios + +MULTI_LC_REXEC_OUTPUT = '''======== sonic-lc1 output: ======== +hello world +======== LINE-CARD2 output: ======== +hello world +''' +REXEC_HELP = '''Usage: cli [OPTIONS] LINECARD_NAMES... + + Executes a command on one or many linecards + + :param linecard_names: A list of linecard names to execute the command on, + use `all` to execute on all linecards. :param command: The command to + execute on the linecard(s) + +Options: + -c, --command TEXT [required] + --help Show this message and exit. +''' + +def mock_exec_command(): + + mock_stdout = BytesIO(b"""hello world""") + mock_stderr = BytesIO() + return '', mock_stdout, None + +def mock_exec_error_cmd(): + mock_stdout = BytesIO() + mock_stderr = BytesIO(b"""Command not found""") + return '', mock_stdout, mock_stderr + +def mock_connection_channel(): + c = mock.MagicMock(return_value="channel") + c.get_pty = mock.MagicMock(return_value='') + c.invoke_shell = mock.MagicMock() + c.recv = mock.MagicMock(side_effect=['abcd', '']) + return c + +def mock_connection_channel_with_timeout(): + c = mock.MagicMock(return_value="channel") + c.get_pty = mock.MagicMock(return_value='') + c.invoke_shell = mock.MagicMock() + c.recv = mock.MagicMock(side_effect=['abcd', socket.timeout(10, 'timeout')]) + return c + +def mock_paramiko_connection(channel): + # Create a mock to return for connection. + conn = mock.MagicMock() + #create a mock return for transport + t = mock.MagicMock() + t.open_session = mock.MagicMock(return_value=channel) + conn.get_transport = mock.MagicMock(return_value=t) + conn.connect = mock.MagicMock() + conn.close = mock.MagicMock() + return conn + +class TestRemoteExec(object): + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import dbconnector + dbconnector.load_database_config() + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + #@mock.patch.object(linecard.Linecard, '_get_password', mock.MagicMock(return_value='dummmy')) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) + @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value = mock_exec_command())) + def test_rexec_with_module_name(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD0" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "pwd"]) + print(result.output) + assert result.exit_code == 0, result.output + assert "hello world" in result.output + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) + @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value = mock_exec_command())) + def test_rexec_with_hostname(self): + runner = CliRunner() + LINECARD_NAME = "sonic-lc1" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "pwd"]) + print(result.output) + assert result.exit_code == 0, result.output + assert "hello world" in result.output + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) + @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value = mock_exec_error_cmd())) + def test_rexec_error_with_module_name(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD0" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "pwd"]) + print(result.output) + assert result.exit_code == 0, result.output + assert "Command not found" in result.output + + def test_rexec_error(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD0" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) + print(result.output) + assert result.exit_code == 1, result.output + assert "This commmand is only supported Chassis" in result.output + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) + @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) + def test_rexec_all(self): + runner = CliRunner() + LINECARD_NAME = "all" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) + print(result.output) + assert result.exit_code == 0, result.output + assert MULTI_LC_REXEC_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) + @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) + def test_rexec_invalid_lc(self): + runner = CliRunner() + LINECARD_NAME = "sonic-lc-3" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) + print(result.output) + assert result.exit_code == 1, result.output + assert "Linecard sonic-lc-3 not found\n" == result.output + + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) + @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) + def test_rexec_unreachable_lc(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD1" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) + print(result.output) + assert result.exit_code == 1, result.output + assert "Linecard LINE-CARD1 not accessible\n" == result.output + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) + @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) + def test_rexec_help(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD1" + result = runner.invoke(rexec.cli, ["--help"]) + print(result.output) + assert result.exit_code == 0, result.output + assert REXEC_HELP == result.output + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', + 22): "None" }))) + @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value = "hello world")) + def test_rexec_exception(self): + runner = CliRunner() + LINECARD_NAME = "sonic-lc1" + result = runner.invoke(rexec.cli, [LINECARD_NAME, "-c", "show version"]) + print(result.output) + assert result.exit_code == 0, result.output + assert "[Errno None] Unable to connect to port 22 on 192.168.0.1\n" == result.output + + +class TestRemoteCLI(object): + @classmethod + def setup_class(cls): + print("SETUP") + from .mock_tables import dbconnector + dbconnector.load_database_config() + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(linecard.Linecard, '_set_tty_params', mock.MagicMock()) + @mock.patch.object(termios, 'tcsetattr', mock.MagicMock()) + @mock.patch.object(termios, 'tcgetattr', mock.MagicMock(return_value=[])) + def test_rcli_with_module_name(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD0" + channel = mock_connection_channel() + + with mock.patch('paramiko.SSHClient', mock.MagicMock(return_value=mock_paramiko_connection(channel))), \ + mock.patch('select.select', mock.MagicMock(return_value=([channel], [], []))): + result = runner.invoke(rshell.cli, [LINECARD_NAME]) + print(result.output) + assert result.exit_code == 0, result.output + assert "abcd" in result.output + + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(linecard.Linecard, '_set_tty_params', mock.MagicMock()) + @mock.patch.object(termios, 'tcsetattr', mock.MagicMock()) + @mock.patch.object(termios, 'tcgetattr', mock.MagicMock(return_value=[])) + def test_rcli_with_module_name_2(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD0" + channel = mock_connection_channel() + + with mock.patch('paramiko.SSHClient', mock.MagicMock(return_value=mock_paramiko_connection(channel))), \ + mock.patch('select.select', mock.MagicMock(side_effect=[([], [], []), ([channel], [], []),([channel], [], [])])): + result = runner.invoke(rshell.cli, [LINECARD_NAME]) + print(result.output) + assert result.exit_code == 0, result.output + assert "Connecting to LINE-CARD0" in result.output + + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) + @mock.patch.object(linecard.Linecard, '_set_tty_params', mock.MagicMock()) + @mock.patch.object(termios, 'tcsetattr', mock.MagicMock()) + @mock.patch.object(termios, 'tcgetattr', mock.MagicMock(return_value=[])) + def test_rcli_with_module_name_3(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD0" + channel = mock_connection_channel_with_timeout() + + with mock.patch('paramiko.SSHClient', mock.MagicMock(return_value=mock_paramiko_connection(channel))), \ + mock.patch('select.select', mock.MagicMock(return_value=([channel], [], []))): + result = runner.invoke(rshell.cli, [LINECARD_NAME]) + print(result.output) + assert result.exit_code == 0, result.output + assert "Connecting to LINE-CARD0" in result.output + + def test_rcli_error(self): + runner = CliRunner() + LINECARD_NAME = "LINE-CARD0" + result = runner.invoke(rshell.cli, [LINECARD_NAME]) + print(result.output) + assert result.exit_code == 1, result.output + assert "This commmand is only supported Chassis" in result.output \ No newline at end of file From 51c46b5668ce0e0380ddb5b0a58c060189807e3c Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 7 Jun 2023 09:19:44 -0400 Subject: [PATCH 30/35] [sonic_installer] remove subprocess with shell=True (#2643) #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. #### How I did it `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) #### How to verify it Pass UT Signed-off-by: Mai Bui --- sonic_installer/bootloader/aboot.py | 10 ++- sonic_installer/bootloader/grub.py | 10 +-- sonic_installer/bootloader/uboot.py | 33 +++++----- sonic_installer/common.py | 19 ++++-- sonic_installer/main.py | 72 ++++++++++---------- tests/installer_bootloader_aboot_test.py | 21 +++++- tests/installer_bootloader_grub_test.py | 37 ++++++++++- tests/installer_bootloader_uboot_test.py | 83 +++++++++++++++++++----- tests/installer_docker_test.py | 34 +++++++++- tests/swap_allocator_test.py | 2 +- tests/test_sonic_installer.py | 9 +++ 11 files changed, 242 insertions(+), 88 deletions(-) diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index ff8be7896b..ac327feb4c 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -126,8 +126,14 @@ def set_next_image(self, image): return True def install_image(self, image_path): - run_command("/usr/bin/unzip -od /tmp %s boot0" % image_path) - run_command("swipath=%s target_path=/host sonic_upgrade=1 . /tmp/boot0" % image_path) + run_command(["/usr/bin/unzip", "-od", "/tmp", image_path, "boot0"]) + env = os.environ.copy() + env.update({ + 'swipath': image_path, + 'target_path': '/host', + 'sonic_upgrade': '1' + }) + run_command(["/bin/sh", "/tmp/boot0"], env=env) def remove_image(self, image): nextimage = self.get_next_image() diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index 7ab5c6c0bc..73d23adf99 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -51,19 +51,19 @@ def get_next_image(self): def set_default_image(self, image): images = self.get_installed_images() - command = 'grub-set-default --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) + command = ['grub-set-default', '--boot-directory=' + HOST_PATH, str(images.index(image))] run_command(command) return True def set_next_image(self, image): images = self.get_installed_images() - command = 'grub-reboot --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) + command = ['grub-reboot', '--boot-directory=' + HOST_PATH, str(images.index(image))] run_command(command) return True def install_image(self, image_path): - run_command("bash " + image_path) - run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + run_command(["bash", image_path]) + run_command(['grub-set-default', '--boot-directory=' + HOST_PATH, '0']) def remove_image(self, image): click.echo('Updating GRUB...') @@ -82,7 +82,7 @@ def remove_image(self, image): subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) click.echo('Done') - run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + run_command(['grub-set-default', '--boot-directory=' + HOST_PATH, '0']) click.echo('Image removed') def get_linux_cmdline(self, image): diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py index 9420d35644..0490a48216 100644 --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -6,7 +6,7 @@ import subprocess import os import re - +from shlex import split import click from ..common import ( @@ -23,12 +23,12 @@ class UbootBootloader(OnieInstallerBootloader): def get_installed_images(self): images = [] - proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(["/usr/bin/fw_printenv", "-n", "sonic_version_1"], text=True, stdout=subprocess.PIPE) (out, _) = proc.communicate() image = out.rstrip() if IMAGE_PREFIX in image: images.append(image) - proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(["/usr/bin/fw_printenv", "-n", "sonic_version_2"], text=True, stdout=subprocess.PIPE) (out, _) = proc.communicate() image = out.rstrip() if IMAGE_PREFIX in image: @@ -37,7 +37,7 @@ def get_installed_images(self): def get_next_image(self): images = self.get_installed_images() - proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(["/usr/bin/fw_printenv", "-n", "boot_next"], text=True, stdout=subprocess.PIPE) (out, _) = proc.communicate() image = out.rstrip() if "sonic_image_2" in image and len(images) == 2: @@ -49,31 +49,31 @@ def get_next_image(self): def set_default_image(self, image): images = self.get_installed_images() if image in images[0]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') + run_command(['/usr/bin/fw_setenv', 'boot_next', "run sonic_image_1"]) elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') + run_command(['/usr/bin/fw_setenv', 'boot_next', "run sonic_image_2"]) return True def set_next_image(self, image): images = self.get_installed_images() if image in images[0]: - run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"') + run_command(['/usr/bin/fw_setenv', 'boot_once', "run sonic_image_1"]) elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"') + run_command(['/usr/bin/fw_setenv', 'boot_once', "run sonic_image_2"]) return True def install_image(self, image_path): - run_command("bash " + image_path) + run_command(["bash", image_path]) def remove_image(self, image): click.echo('Updating next boot ...') images = self.get_installed_images() if image in images[0]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') - run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"') + run_command(['/usr/bin/fw_setenv', 'boot_next', "run sonic_image_2"]) + run_command(['/usr/bin/fw_setenv', 'sonic_version_1', "NONE"]) elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') - run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') + run_command(['/usr/bin/fw_setenv', 'boot_next', "run sonic_image_1"]) + run_command(['/usr/bin/fw_setenv', 'sonic_version_2', "NONE"]) image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX, 1) click.echo('Removing image root filesystem...') subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) @@ -84,17 +84,16 @@ def verify_image_platform(self, image_path): def set_fips(self, image, enable): fips = "1" if enable else "0" - proc = subprocess.Popen("/usr/bin/fw_printenv linuxargs", shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(["/usr/bin/fw_printenv", "linuxargs"], text=True, stdout=subprocess.PIPE) (out, _) = proc.communicate() cmdline = out.strip() cmdline = re.sub('^linuxargs=', '', cmdline) cmdline = re.sub(r' sonic_fips=[^\s]', '', cmdline) + " sonic_fips=" + fips - cmdline = '"' + cmdline + '"' - run_command('/usr/bin/fw_setenv linuxargs ' + cmdline ) + run_command(['/usr/bin/fw_setenv', 'linuxargs'] + split(cmdline)) click.echo('Done') def get_fips(self, image): - proc = subprocess.Popen("/usr/bin/fw_printenv linuxargs", shell=True, text=True, stdout=subprocess.PIPE) + proc = subprocess.Popen(["/usr/bin/fw_printenv", "linuxargs"], text=True, stdout=subprocess.PIPE) (out, _) = proc.communicate() return 'sonic_fips=1' in out diff --git a/sonic_installer/common.py b/sonic_installer/common.py index 685063587c..f220a4b57e 100644 --- a/sonic_installer/common.py +++ b/sonic_installer/common.py @@ -8,7 +8,7 @@ import signal import click - +from shlex import join from .exception import SonicRuntimeException HOST_PATH = '/host' @@ -19,11 +19,20 @@ WORKDIR_NAME = 'work' DOCKERDIR_NAME = 'docker' -# Run bash command and print output to stdout -def run_command(command): - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) +def is_list_of_strings(command): + return isinstance(command, list) and all(isinstance(item, str) for item in command) - proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE) +# Run bash command and print output to stdout +def run_command(command, stdout=subprocess.PIPE, env=None, shell=False): + if not is_list_of_strings(command): + sys.exit("Input command should be a list of strings") + if not shell: + command_str = join(command) + else: + command_str = command + click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) + + proc = subprocess.Popen(command, text=True, stdout=stdout, env=env, shell=shell) (out, _) = proc.communicate() click.echo(out) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index ce1c15866d..a5c901b6d4 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -10,7 +10,7 @@ import click from sonic_py_common import logger from swsscommon.swsscommon import SonicV2Connector - +from sonic_py_common.general import getstatusoutput_noshell_pipe from .bootloader import get_bootloader from .common import ( run_command, run_command_or_raise, @@ -122,8 +122,8 @@ def reporthook(count, block_size, total_size): # and extract tag name from docker image file. def get_docker_tag_name(image): # Try to get tag name from label metadata - cmd = "docker inspect --format '{{.ContainerConfig.Labels.Tag}}' " + image - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) + cmd = ["docker", "inspect", "--format", '{{.ContainerConfig.Labels.Tag}}', image] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) (out, _) = proc.communicate() if proc.returncode != 0: return "unknown" @@ -171,33 +171,32 @@ def abort_if_false(ctx, param, value): def get_container_image_name(container_name): # example image: docker-lldp-sv2:latest - cmd = "docker inspect --format '{{.Config.Image}}' " + container_name - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) + cmd = ["docker", "inspect", "--format", '{{.Config.Image}}', container_name] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) (out, _) = proc.communicate() if proc.returncode != 0: sys.exit(proc.returncode) image_latest = out.rstrip() # example image_name: docker-lldp-sv2 - cmd = "echo " + image_latest + " | cut -d ':' -f 1" - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) - image_name = proc.stdout.read().rstrip() + _, stdout = getstatusoutput_noshell_pipe(["echo", image_latest], ["cut", "-d", ':', "-f", "1"]) + image_name = stdout.rstrip() return image_name def get_container_image_id(image_tag): # TODO: extract commond docker info fetching functions # this is image_id for image with tag, like 'docker-teamd:latest' - cmd = "docker images --format '{{.ID}}' " + image_tag - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) + cmd = ["docker", "images", "--format", '{{.ID}}', image_tag] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) image_id = proc.stdout.read().rstrip() return image_id def get_container_image_id_all(image_name): # All images id under the image name like 'docker-teamd' - cmd = "docker images --format '{{.ID}}' " + image_name - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) + cmd = ["docker", "images", "--format", '{{.ID}}', image_name] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) image_id_all = proc.stdout.read() image_id_all = image_id_all.splitlines() image_id_all = set(image_id_all) @@ -449,7 +448,8 @@ def setup_swapmem(self): with open(swapfile, 'wb') as fd: os.posix_fallocate(fd.fileno(), 0, self.swap_mem_size * SWAPAllocator.MiB_TO_BYTES_FACTOR) os.chmod(swapfile, 0o600) - run_command(f'mkswap {swapfile}; swapon {swapfile}') + run_command(['mkswap', swapfile]) + run_command(['swapon', swapfile]) def remove_swapmem(self): swapfile = SWAPAllocator.SWAP_FILE_PATH @@ -499,7 +499,7 @@ def validate_positive_int(ctx, param, value): def sonic_installer(): """ SONiC image installation manager """ if os.geteuid() != 0: - exit("Root privileges required for this operation") + sys.exit("Root privileges required for this operation") # Warn the user if they are calling the deprecated version of the command (with an underscore instead of a hyphen) if os.path.basename(sys.argv[0]) == "sonic_installer": @@ -583,7 +583,7 @@ def install(url, force, skip_platform_check=False, skip_migration=False, skip_pa if skip_migration: echo_and_log("Skipping configuration migration as requested in the command option.") else: - run_command('config-setup backup') + run_command(['config-setup', 'backup']) update_sonic_environment(bootloader, binary_image_version) @@ -595,8 +595,10 @@ def install(url, force, skip_platform_check=False, skip_migration=False, skip_pa migrate_sonic_packages(bootloader, binary_image_version) # Finally, sync filesystem - run_command("sync;sync;sync") - run_command("sleep 3") # wait 3 seconds after sync + run_command(["sync"]) + run_command(["sync"]) + run_command(["sync"]) + run_command(["sleep", "3"]) # wait 3 seconds after sync echo_and_log('Done') @@ -801,12 +803,12 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): if container_name == "swss" or container_name == "bgp" or container_name == "teamd": if warm_configured is False and warm: - run_command("config warm_restart enable %s" % container_name) + run_command(["config", "warm_restart", "enable", "%s" % container_name]) # Fetch tag of current running image tag_previous = get_docker_tag_name(image_latest) # Load the new image beforehand to shorten disruption time - run_command("docker load < %s" % image_path) + run_command(["docker", "load", "-i", "%s" % image_path]) warm_app_names = [] # warm restart specific procssing for swss, bgp and teamd dockers. if warm_configured is True or warm: @@ -816,21 +818,21 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): if skip_check: skipPendingTaskCheck = " -s" - cmd = "docker exec -i swss orchagent_restart_check -w 2000 -r 5 " + skipPendingTaskCheck + cmd = ["docker", "exec", "-i", "swss", "orchagent_restart_check", "-w", "2000", "-r", "5", skipPendingTaskCheck] - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, text=True) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, text=True) (out, err) = proc.communicate() if proc.returncode != 0: if not skip_check: echo_and_log("Orchagent is not in clean state, RESTARTCHECK failed", LOG_ERR) # Restore orignal config before exit if warm_configured is False and warm: - run_command("config warm_restart disable %s" % container_name) + run_command(["config", "warm_restart", "disable", "%s" % container_name]) # Clean the image loaded earlier image_id_latest = get_container_image_id(image_latest) - run_command("docker rmi -f %s" % image_id_latest) + run_command(["docker", "rmi", "-f", "%s" % image_id_latest]) # Re-point latest tag to previous tag - run_command("docker tag %s:%s %s" % (image_name, tag_previous, image_latest)) + run_command(["docker", "tag", "%s:%s" % (image_name, tag_previous), "%s" % image_latest]) sys.exit(proc.returncode) else: @@ -843,8 +845,8 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): elif container_name == "bgp": # Kill bgpd to restart the bgp graceful restart procedure echo_and_log("Stopping bgp ...") - run_command("docker exec -i bgp pkill -9 zebra") - run_command("docker exec -i bgp pkill -9 bgpd") + run_command(["docker", "exec", "-i", "bgp", "pkill", "-9", "zebra"]) + run_command(["docker", "exec", "-i", "bgp", "pkill", "-9", "bgpd"]) warm_app_names = ["bgp"] echo_and_log("Stopped bgp ...") @@ -852,7 +854,7 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): echo_and_log("Stopping teamd ...") # Send USR1 signal to all teamd instances to stop them # It will prepare teamd for warm-reboot - run_command("docker exec -i teamd pkill -USR1 teamd > /dev/null") + run_command(["docker", "exec", "-i", "teamd", "pkill", "-USR1", "teamd"], stdout=subprocess.DEVNULL) warm_app_names = ["teamsyncd"] echo_and_log("Stopped teamd ...") @@ -860,13 +862,13 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): for warm_app_name in warm_app_names: hdel_warm_restart_table("STATE_DB", "WARM_RESTART_TABLE", warm_app_name, "state") - run_command("docker kill %s > /dev/null" % container_name) - run_command("docker rm %s " % container_name) + run_command(["docker", "kill", "%s" % container_name], stdout=subprocess.DEVNULL) + run_command(["docker", "rm", "%s" % container_name]) if tag is None: # example image: docker-lldp-sv2:latest tag = get_docker_tag_name(image_latest) - run_command("docker tag %s:latest %s:%s" % (image_name, image_name, tag)) - run_command("systemctl restart %s" % container_name) + run_command(["docker", "tag", "%s:latest" % image_name, "%s:%s" % (image_name, tag)]) + run_command(["systemctl", "restart", "%s" % container_name]) if cleanup_image: # All images id under the image name @@ -874,7 +876,7 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): # Unless requested, the previoud docker image will be preserved for id in image_id_all: if id == image_id_previous: - run_command("docker rmi -f %s" % id) + run_command(["docker", "rmi", "-f", "%s" % id]) break exp_state = "reconciled" @@ -902,7 +904,7 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): # Restore to previous cold restart setting if warm_configured is False and warm: if container_name == "swss" or container_name == "bgp" or container_name == "teamd": - run_command("config warm_restart disable %s" % container_name) + run_command(["config", "warm_restart", "disable", container_name]) if state == exp_state: echo_and_log('Done') @@ -940,11 +942,11 @@ def rollback_docker(container_name): break # make previous image as latest - run_command("docker tag %s:%s %s:latest" % (image_name, version_tag, image_name)) + run_command(["docker", "tag", "%s:%s" % (image_name, version_tag), "%s:latest" % (image_name)]) if container_name == "swss" or container_name == "bgp" or container_name == "teamd": echo_and_log("Cold reboot is required to restore system state after '{}' rollback !!".format(container_name), LOG_ERR) else: - run_command("systemctl restart %s" % container_name) + run_command(["systemctl", "restart", container_name]) echo_and_log('Done') diff --git a/tests/installer_bootloader_aboot_test.py b/tests/installer_bootloader_aboot_test.py index 56eee4872e..9f98884327 100644 --- a/tests/installer_bootloader_aboot_test.py +++ b/tests/installer_bootloader_aboot_test.py @@ -1,4 +1,5 @@ -from unittest.mock import Mock, patch +import os +from unittest.mock import Mock, patch, call # Import test module import sonic_installer.bootloader.aboot as aboot @@ -53,6 +54,24 @@ def test_get_next_image(re_search_patch): re_search_patch().group = Mock(return_value=image_dir) assert bootloader.get_next_image() == exp_image +def test_install_image(): + image_path = 'sonic' + env = os.environ.copy() + env.update({ + 'swipath': image_path, + 'target_path': '/host', + 'sonic_upgrade': '1' + }) + + expected_calls = [ + call(["/usr/bin/unzip", "-od", "/tmp", "%s" % image_path, "boot0"]), + call(["/bin/sh", "/tmp/boot0"], env=env) + ] + with patch('sonic_installer.bootloader.aboot.run_command') as mock_cmd: + bootloader = aboot.AbootBootloader() + bootloader.install_image(image_path) + mock_cmd.assert_has_calls(expected_calls) + def test_set_fips_aboot(): image = 'test-image' dirpath = tempfile.mkdtemp() diff --git a/tests/installer_bootloader_grub_test.py b/tests/installer_bootloader_grub_test.py index ff35e13b37..9b6a76ff11 100644 --- a/tests/installer_bootloader_grub_test.py +++ b/tests/installer_bootloader_grub_test.py @@ -1,10 +1,45 @@ import os import shutil -from unittest.mock import Mock, patch +from unittest.mock import Mock, patch, call # Import test module import sonic_installer.bootloader.grub as grub +installed_images = [ + f'{grub.IMAGE_PREFIX}expeliarmus-{grub.IMAGE_PREFIX}abcde', + f'{grub.IMAGE_PREFIX}expeliarmus-abcde', +] + +def test_set_default_image(): + image = installed_images[0] + expected_call = [call(['grub-set-default', '--boot-directory=' + grub.HOST_PATH, str(installed_images.index(image))])] + + with patch("sonic_installer.bootloader.grub.run_command") as mock_cmd: + bootloader = grub.GrubBootloader() + bootloader.get_installed_images = Mock(return_value=installed_images) + bootloader.set_default_image(image) + mock_cmd.assert_has_calls(expected_call) + +def test_set_next_image(): + image = installed_images[0] + expected_call = [call(['grub-reboot', '--boot-directory=' + grub.HOST_PATH, str(installed_images.index(image))])] + + with patch("sonic_installer.bootloader.grub.run_command") as mock_cmd: + bootloader = grub.GrubBootloader() + bootloader.get_installed_images = Mock(return_value=installed_images) + bootloader.set_next_image(image) + mock_cmd.assert_has_calls(expected_call) + +def test_install_image(): + image_path = 'sonic' + expected_calls = [ + call(["bash", image_path]), + call(['grub-set-default', '--boot-directory=' + grub.HOST_PATH, '0']) + ] + with patch('sonic_installer.bootloader.grub.run_command') as mock_cmd: + bootloader = grub.GrubBootloader() + bootloader.install_image(image_path) + mock_cmd.assert_has_calls(expected_calls) @patch("sonic_installer.bootloader.grub.subprocess.call", Mock()) @patch("sonic_installer.bootloader.grub.open") diff --git a/tests/installer_bootloader_uboot_test.py b/tests/installer_bootloader_uboot_test.py index 49973558d0..b896e097cd 100644 --- a/tests/installer_bootloader_uboot_test.py +++ b/tests/installer_bootloader_uboot_test.py @@ -1,9 +1,15 @@ import os -from unittest.mock import Mock, patch +from unittest.mock import Mock, patch, call # Import test module import sonic_installer.bootloader.uboot as uboot +# Constants +installed_images = [ + f'{uboot.IMAGE_PREFIX}expeliarmus-{uboot.IMAGE_PREFIX}abcde', + f'{uboot.IMAGE_PREFIX}expeliarmus-abcde', +] + class MockProc(): commandline = "linuxargs=" def communicate(): @@ -12,28 +18,73 @@ def communicate(): def mock_run_command(cmd): MockProc.commandline = cmd +@patch('sonic_installer.bootloader.uboot.run_command') +def test_set_default_image(mock_run_cmd): + subcmd = ['/usr/bin/fw_setenv', 'boot_next'] + image0, image1 = ['run sonic_image_1'], ['run sonic_image_2'] + expected_call0, expected_call1 = [call(subcmd + image0)], [call(subcmd + image1)] + + bootloader = uboot.UbootBootloader() + bootloader.get_installed_images = Mock(return_value=installed_images) + bootloader.set_default_image(installed_images[0]) + assert mock_run_cmd.call_args_list == expected_call0 + + mock_run_cmd.call_args_list = [] + bootloader.set_default_image(installed_images[1]) + assert mock_run_cmd.call_args_list == expected_call1 + +@patch('sonic_installer.bootloader.uboot.run_command') +def test_set_next_image(mock_run_cmd): + subcmd = ['/usr/bin/fw_setenv', 'boot_once'] + image0, image1 = ['run sonic_image_1'], ['run sonic_image_2'] + expected_call0, expected_call1 = [call(subcmd + image0)], [call(subcmd + image1)] + + bootloader = uboot.UbootBootloader() + bootloader.get_installed_images = Mock(return_value=installed_images) + bootloader.set_next_image(installed_images[0]) + assert mock_run_cmd.call_args_list == expected_call0 + + mock_run_cmd.call_args_list = [] + bootloader.set_next_image(installed_images[1]) + assert mock_run_cmd.call_args_list == expected_call1 + +@patch("sonic_installer.bootloader.uboot.run_command") +def test_install_image(mock_run_cmd): + image_path = ['sonic_image'] + expected_call = [call(['bash', image_path])] + + bootloader = uboot.UbootBootloader() + bootloader.install_image(image_path) + assert mock_run_cmd.call_args_list == expected_call + @patch("sonic_installer.bootloader.uboot.subprocess.call", Mock()) @patch("sonic_installer.bootloader.uboot.run_command") def test_remove_image(run_command_patch): # Constants image_path_prefix = os.path.join(uboot.HOST_PATH, uboot.IMAGE_DIR_PREFIX) - exp_image_path = f'{image_path_prefix}expeliarmus-{uboot.IMAGE_PREFIX}abcde' - - intstalled_images = [ - f'{uboot.IMAGE_PREFIX}expeliarmus-{uboot.IMAGE_PREFIX}abcde', - f'{uboot.IMAGE_PREFIX}expeliarmus-abcde', + exp_image_path = [ + f'{image_path_prefix}expeliarmus-{uboot.IMAGE_PREFIX}abcde', + f'{image_path_prefix}expeliarmus-abcde' ] bootloader = uboot.UbootBootloader() - bootloader.get_installed_images = Mock(return_value=intstalled_images) + bootloader.get_installed_images = Mock(return_value=installed_images) # Verify rm command was executed with image path - bootloader.remove_image(intstalled_images[0]) + bootloader.remove_image(installed_images[0]) args_list = uboot.subprocess.call.call_args_list assert len(args_list) > 0 args, _ = args_list[0] - assert exp_image_path in args[0] + assert exp_image_path[0] in args[0] + + uboot.subprocess.call.call_args_list = [] + bootloader.remove_image(installed_images[1]) + args_list = uboot.subprocess.call.call_args_list + assert len(args_list) > 0 + + args, _ = args_list[0] + assert exp_image_path[1] in args[0] @patch("sonic_installer.bootloader.uboot.subprocess.Popen") @patch("sonic_installer.bootloader.uboot.run_command") @@ -45,26 +96,21 @@ def communicate(self): def mock_run_command(cmd): # Remove leading string "/usr/bin/fw_setenv boot_next " -- the 29 characters + cmd = ' '.join(cmd) MockProc.commandline = cmd[29:] - # Constants - intstalled_images = [ - f'{uboot.IMAGE_PREFIX}expeliarmus-{uboot.IMAGE_PREFIX}abcde', - f'{uboot.IMAGE_PREFIX}expeliarmus-abcde', - ] - run_command_patch.side_effect = mock_run_command popen_patch.return_value = MockProc() bootloader = uboot.UbootBootloader() - bootloader.get_installed_images = Mock(return_value=intstalled_images) + bootloader.get_installed_images = Mock(return_value=installed_images) - bootloader.set_default_image(intstalled_images[1]) + bootloader.set_default_image(installed_images[1]) # Verify get_next_image was executed with image path next_image=bootloader.get_next_image() - assert next_image == intstalled_images[1] + assert next_image == installed_images[1] @patch("sonic_installer.bootloader.uboot.subprocess.Popen") @patch("sonic_installer.bootloader.uboot.run_command") @@ -76,6 +122,7 @@ def communicate(self): def mock_run_command(cmd): # Remove leading string "/usr/bin/fw_setenv linuxargs " -- the 29 characters + cmd = ' '.join(cmd) MockProc.commandline = 'linuxargs=' + cmd[29:] run_command_patch.side_effect = mock_run_command diff --git a/tests/installer_docker_test.py b/tests/installer_docker_test.py index 8897b8413f..d80379ba9a 100644 --- a/tests/installer_docker_test.py +++ b/tests/installer_docker_test.py @@ -1,11 +1,39 @@ +import sys import pytest +import subprocess import sonic_installer.main as sonic_installer +import sonic_installer.common as sonic_installer_common from click.testing import CliRunner -from unittest.mock import patch, MagicMock +from unittest.mock import patch, MagicMock, Mock SUCCESS = 0 +@patch("sonic_installer.main.subprocess.Popen") +def test_get_container_image_id(mock_popen): + image_tag = 'abcde' + + mock_proc = MagicMock() + attrs = {"communicate.return_value": ("output", "error")} + mock_proc.configure_mock(**attrs) + mock_popen.return_value = mock_proc + + expected_call = ["docker", "images", "--format", '{{.ID}}', image_tag] + sonic_installer.get_container_image_id(image_tag) + mock_popen.assert_called_with(expected_call, stdout=subprocess.PIPE, text=True) + +@patch("sonic_installer.main.subprocess.Popen") +def test_get_container_image_id_all(mock_popen): + image_name = 'abcde' + + mock_proc = MagicMock() + attrs = {"communicate.return_value": ("output", "error")} + mock_proc.configure_mock(**attrs) + mock_popen.return_value = mock_proc + + expected_call = ["docker", "images", "--format", '{{.ID}}', image_name] + sonic_installer.get_container_image_id_all(image_name) + mock_popen.assert_called_with(expected_call, stdout=subprocess.PIPE, text=True) @patch('sonic_installer.main.get_container_image_name', MagicMock(return_value='docker-fpm-frr')) @patch('sonic_installer.main.get_container_image_id_all', MagicMock(return_value=['1', '2'])) @@ -20,7 +48,7 @@ def test_rollback_docker_basic(mock_run_cmd): ) assert result.exit_code == SUCCESS - expect_docker_tag_command = 'docker tag docker-fpm-frr:some_tag docker-fpm-frr:latest' + expect_docker_tag_command = ['docker', 'tag', 'docker-fpm-frr:some_tag', 'docker-fpm-frr:latest'] mock_run_cmd.assert_called_with(expect_docker_tag_command) mock_run_cmd.reset() @@ -29,7 +57,7 @@ def test_rollback_docker_basic(mock_run_cmd): ) assert result.exit_code == SUCCESS - mock_run_cmd.assert_any_call('systemctl restart snmp') + mock_run_cmd.assert_any_call(['systemctl', 'restart', 'snmp']) @patch('sonic_installer.main.get_container_image_name', MagicMock(return_value='docker-fpm-frr')) diff --git a/tests/swap_allocator_test.py b/tests/swap_allocator_test.py index 960d4e8caf..b77d7cadf8 100644 --- a/tests/swap_allocator_test.py +++ b/tests/swap_allocator_test.py @@ -58,7 +58,7 @@ def test_setup_swapmem(self): mock_fallocate.assert_called_once_with(pseudo_fd_fileno, 0, expected_swap_mem_size_in_bytes) mock_chmod.assert_called_once_with(expected_swapfile_location, expected_swapfile_permission) - mock_run.assert_called_once_with(f'mkswap {expected_swapfile_location}; swapon {expected_swapfile_location}') + mock_run.assert_has_calls([mock.call(['mkswap', expected_swapfile_location]), mock.call(['swapon', expected_swapfile_location])]) def test_remove_swapmem(self): with mock.patch("subprocess.Popen") as mock_popen, \ diff --git a/tests/test_sonic_installer.py b/tests/test_sonic_installer.py index c445dfb6e3..f44dcdf2f2 100644 --- a/tests/test_sonic_installer.py +++ b/tests/test_sonic_installer.py @@ -1,8 +1,17 @@ import os +import sys +import pytest from contextlib import contextmanager from sonic_installer.main import sonic_installer from click.testing import CliRunner from unittest.mock import patch, Mock, call +import sonic_installer.common as sonic_installer_common + + +def test_run_command(): + with pytest.raises(SystemExit) as e: + output = sonic_installer_common.run_command([sys.executable, "-c", "import sys; sys.exit(6)"]) + assert e.value.code == 6 @patch("sonic_installer.main.SWAPAllocator") @patch("sonic_installer.main.get_bootloader") From 56b6ac29f8363fcb8142a0192b16a5746b5a6411 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Wed, 7 Jun 2023 09:49:47 -0400 Subject: [PATCH 31/35] [consutil] replace shell=True (#2725) #### What I did `subprocess()` - when using with `shell=True` is dangerous. Using subprocess function without a static string can lead to command injection. #### How I did it `subprocess()` - use `shell=False` instead, use list of strings Ref: [https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation](https://semgrep.dev/docs/cheat-sheets/python-command-injection/#mitigation) #### How to verify it Pass UT Signed-off-by: maipbui --- consutil/lib.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/consutil/lib.py b/consutil/lib.py index e2ebf1da06..1d7f967bd3 100644 --- a/consutil/lib.py +++ b/consutil/lib.py @@ -12,6 +12,7 @@ import click from sonic_py_common import device_info +from sonic_py_common.general import getstatusoutput_noshell_pipe ERR_DISABLE = 1 ERR_CMD = 2 @@ -199,7 +200,7 @@ def clear_session(self): try: if not self._session: pid = self.session_pid - cmd = "sudo kill -SIGTERM " + pid + cmd = ['sudo', 'kill', '-SIGTERM', str(pid)] SysInfoProvider.run_command(cmd) else: self._session.close() @@ -276,7 +277,7 @@ def init_device_prefix(): @staticmethod def list_console_ttys(): """Lists all console tty devices""" - cmd = "ls " + SysInfoProvider.DEVICE_PREFIX + "*" + cmd = ["ls", SysInfoProvider.DEVICE_PREFIX + "*"] output, _ = SysInfoProvider.run_command(cmd, abort=False) ttys = output.split('\n') ttys = list([dev for dev in ttys if re.match(SysInfoProvider.DEVICE_PREFIX + r"\d+", dev) != None]) @@ -285,15 +286,17 @@ def list_console_ttys(): @staticmethod def list_active_console_processes(): """Lists all active console session processes""" - cmd = 'ps -eo pid,lstart,cmd | grep -E "(mini|pico)com"' - output = SysInfoProvider.run_command(cmd) + cmd0 = ['ps', '-eo', 'pid,lstart,cmd'] + cmd1 = ['grep', '-E', "(mini|pico)com"] + output = SysInfoProvider.run_command(cmd0, cmd1) return SysInfoProvider._parse_processes_info(output) @staticmethod def get_active_console_process_info(pid): """Gets active console process information by PID""" - cmd = 'ps -p {} -o pid,lstart,cmd | grep -E "(mini|pico)com"'.format(pid) - output = SysInfoProvider.run_command(cmd) + cmd0 = ['ps', '-p', str(pid), '-o', 'pid,lstart,cmd'] + cmd1 = ['grep', '-E', "(mini|pico)com"] + output = SysInfoProvider.run_command(cmd0, cmd1) processes = SysInfoProvider._parse_processes_info(output) if len(list(processes.keys())) == 1: return (list(processes.keys())[0],) + list(processes.values())[0] @@ -325,15 +328,12 @@ def _parse_processes_info(output): return console_processes @staticmethod - def run_command(cmd, abort=True): - """runs command, exit if stderr is written to and abort argument is ture, returns stdout, stderr otherwise""" - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, text=True) - output = proc.stdout.read() - error = proc.stderr.read() - if abort and error != "": - click.echo("Command resulted in error: {}".format(error)) + def run_command(*args, abort=True): + exitcodes, output = getstatusoutput_noshell_pipe(*args) + if abort and any(exitcodes) and output != '': + click.echo("Command resulted in error: {}".format(output)) sys.exit(ERR_CMD) - return output if abort else (output, error) + return output if abort else (output, output) class DbUtils(object): def __init__(self, db): From 252c08351ce25bf8327b9a90b1145a68e5259891 Mon Sep 17 00:00:00 2001 From: amulyan7 <98349131+amulyan7@users.noreply.github.com> Date: Wed, 7 Jun 2023 12:46:39 -0700 Subject: [PATCH 32/35] Add display support for serial field in show chassis modules status CLI (#2858) Add display support for serial field in show chassis modules status CLI --- show/chassis_modules.py | 6 ++++-- tests/chassis_modules_test.py | 18 +++++++++--------- tests/mock_tables/state_db.json | 15 ++++++++++----- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/show/chassis_modules.py b/show/chassis_modules.py index 73ea92d1ed..b94d8d620d 100644 --- a/show/chassis_modules.py +++ b/show/chassis_modules.py @@ -12,6 +12,7 @@ CHASSIS_MODULE_INFO_SLOT_FIELD = 'slot' CHASSIS_MODULE_INFO_OPERSTATUS_FIELD = 'oper_status' CHASSIS_MODULE_INFO_ADMINSTATUS_FIELD = 'admin_status' +CHASSIS_MODULE_INFO_SERIAL_FIELD = 'serial' CHASSIS_MIDPLANE_INFO_TABLE = 'CHASSIS_MIDPLANE_TABLE' CHASSIS_MIDPLANE_INFO_IP_FIELD = 'ip_address' @@ -33,7 +34,7 @@ def modules(): def status(db, chassis_module_name): """Show chassis-modules status""" - header = ['Name', 'Description', 'Physical-Slot', 'Oper-Status', 'Admin-Status'] + header = ['Name', 'Description', 'Physical-Slot', 'Oper-Status', 'Admin-Status', 'Serial'] chassis_cfg_table = db.cfgdb.get_table('CHASSIS_MODULE') state_db = SonicV2Connector(host="127.0.0.1") @@ -59,13 +60,14 @@ def status(db, chassis_module_name): desc = data_dict[CHASSIS_MODULE_INFO_DESC_FIELD] slot = data_dict[CHASSIS_MODULE_INFO_SLOT_FIELD] oper_status = data_dict[CHASSIS_MODULE_INFO_OPERSTATUS_FIELD] + serial = data_dict[CHASSIS_MODULE_INFO_SERIAL_FIELD] admin_status = 'up' config_data = chassis_cfg_table.get(key_list[1]) if config_data is not None: admin_status = config_data.get(CHASSIS_MODULE_INFO_ADMINSTATUS_FIELD) - table.append((key_list[1], desc, slot, oper_status, admin_status)) + table.append((key_list[1], desc, slot, oper_status, admin_status, serial)) if table: click.echo(tabulate(table, header, tablefmt='simple', stralign='right')) diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py index a9ba0f82a7..8196b12f4f 100644 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -13,23 +13,23 @@ from .utils import get_result_and_return_code show_linecard0_shutdown_output="""\ -LINE-CARD0 line-card 1 Empty down +LINE-CARD0 line-card 1 Empty down LC1000101 """ show_linecard0_startup_output="""\ -LINE-CARD0 line-card 1 Empty up +LINE-CARD0 line-card 1 Empty up LC1000101 """ header_lines = 2 warning_lines = 0 show_chassis_modules_output="""\ - Name Description Physical-Slot Oper-Status Admin-Status ------------- --------------- --------------- ------------- -------------- -FABRIC-CARD0 fabric-card 17 Online up -FABRIC-CARD1 fabric-card 18 Offline up - LINE-CARD0 line-card 1 Empty up - LINE-CARD1 line-card 2 Online down - SUPERVISOR0 supervisor-card 16 Online up + Name Description Physical-Slot Oper-Status Admin-Status Serial +------------ --------------- --------------- ------------- -------------- --------- +FABRIC-CARD0 fabric-card 17 Online up FC1000101 +FABRIC-CARD1 fabric-card 18 Offline up FC1000102 + LINE-CARD0 line-card 1 Empty up LC1000101 + LINE-CARD1 line-card 2 Online down LC1000102 + SUPERVISOR0 supervisor-card 16 Online up RP1000101 """ show_chassis_midplane_output="""\ diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 289bf3cec2..1e7b506c28 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1114,27 +1114,32 @@ "CHASSIS_MODULE_TABLE|SUPERVISOR0": { "desc": "supervisor-card", "oper_status": "Online", - "slot": "16" + "slot": "16", + "serial": "RP1000101" }, "CHASSIS_MODULE_TABLE|LINE-CARD0": { "desc": "line-card", "oper_status": "Empty", - "slot": "1" + "slot": "1", + "serial": "LC1000101" }, "CHASSIS_MODULE_TABLE|LINE-CARD1": { "desc": "line-card", "oper_status": "Online", - "slot": "2" + "slot": "2", + "serial": "LC1000102" }, "CHASSIS_MODULE_TABLE|FABRIC-CARD0": { "desc": "fabric-card", "oper_status": "Online", - "slot": "17" + "slot": "17", + "serial": "FC1000101" }, "CHASSIS_MODULE_TABLE|FABRIC-CARD1": { "desc": "fabric-card", "oper_status": "Offline", - "slot": "18" + "slot": "18", + "serial": "FC1000102" }, "MUX_CABLE_TABLE|Ethernet32": { "state": "active" From 788db8c3b9ce3b4b624e27e1cdd2e2105d0e88e6 Mon Sep 17 00:00:00 2001 From: isabelmsft <67024108+isabelmsft@users.noreply.github.com> Date: Wed, 7 Jun 2023 17:18:11 -0700 Subject: [PATCH 33/35] [GCU] Complete RDMA Platform Validator PR (#2857) --- .../field_operation_validators.py | 117 ++++++++++++++- .../gcu_field_operation_validators.conf.json | 119 ++++++++++++++- generic_config_updater/gu_common.py | 6 +- .../field_operation_validator_test.py | 142 ++++++++++++++++++ .../gcu_feature_patch_application_test.py | 5 +- .../generic_config_updater/gu_common_test.py | 56 ------- 6 files changed, 379 insertions(+), 66 deletions(-) create mode 100644 tests/generic_config_updater/field_operation_validator_test.py diff --git a/generic_config_updater/field_operation_validators.py b/generic_config_updater/field_operation_validators.py index 84cc48547f..72af9c8bd0 100644 --- a/generic_config_updater/field_operation_validators.py +++ b/generic_config_updater/field_operation_validators.py @@ -1,10 +1,117 @@ -from sonic_py_common import device_info +import os import re +import json +import jsonpointer +import subprocess +from sonic_py_common import device_info +from .gu_common import GenericConfigUpdaterError + + +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +GCU_TABLE_MOD_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" +GET_HWSKU_CMD = "sonic-cfggen -d -v DEVICE_METADATA.localhost.hwsku" + +def get_asic_name(): + asic = "unknown" + + if os.path.exists(GCU_TABLE_MOD_CONF_FILE): + with open(GCU_TABLE_MOD_CONF_FILE, "r") as s: + gcu_field_operation_conf = json.load(s) + else: + raise GenericConfigUpdaterError("GCU table modification validators config file not found") + + asic_mapping = gcu_field_operation_conf["helper_data"]["rdma_config_update_validator"] + asic_type = device_info.get_sonic_version_info()['asic_type'] + + if asic_type == 'cisco-8000': + asic = "cisco-8000" + elif asic_type == 'mellanox' or asic_type == 'vs' or asic_type == 'broadcom': + proc = subprocess.Popen(GET_HWSKU_CMD, shell=True, universal_newlines=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + hwsku = output.rstrip('\n') + if asic_type == 'mellanox' or asic_type == 'vs': + spc1_hwskus = asic_mapping["mellanox_asics"]["spc1"] + if hwsku.lower() in [spc1_hwsku.lower() for spc1_hwsku in spc1_hwskus]: + asic = "spc1" + return asic + if asic_type == 'broadcom' or asic_type == 'vs': + broadcom_asics = asic_mapping["broadcom_asics"] + for asic_shorthand, hwskus in broadcom_asics.items(): + if asic != "unknown": + break + for hwsku_cur in hwskus: + if hwsku_cur.lower() in hwsku.lower(): + asic = asic_shorthand + break + + return asic -def rdma_config_update_validator(): - version_info = device_info.get_sonic_version_info() - asic_type = version_info.get('asic_type') - if (asic_type != 'mellanox' and asic_type != 'broadcom' and asic_type != 'cisco-8000'): +def rdma_config_update_validator(patch_element): + asic = get_asic_name() + if asic == "unknown": return False + version_info = device_info.get_sonic_version_info() + build_version = version_info.get('build_version') + version_substrings = build_version.split('.') + branch_version = None + + for substring in version_substrings: + if substring.isdigit() and re.match(r'^\d{8}$', substring): + branch_version = substring + + path = patch_element["path"] + table = jsonpointer.JsonPointer(path).parts[0] + + # Helper function to return relevant cleaned paths, consdiers case where the jsonpatch value is a dict + # For paths like /PFC_WD/Ethernet112/action, remove Ethernet112 from the path so that we can clearly determine the relevant field (i.e. action, not Ethernet112) + def _get_fields_in_patch(): + cleaned_fields = [] + + field_elements = jsonpointer.JsonPointer(path).parts[1:] + cleaned_field_elements = [elem for elem in field_elements if not any(char.isdigit() for char in elem)] + cleaned_field = '/'.join(cleaned_field_elements).lower() + + + if 'value' in patch_element.keys() and isinstance(patch_element['value'], dict): + for key in patch_element['value']: + cleaned_fields.append(cleaned_field+ '/' + key) + else: + cleaned_fields.append(cleaned_field) + + return cleaned_fields + + if os.path.exists(GCU_TABLE_MOD_CONF_FILE): + with open(GCU_TABLE_MOD_CONF_FILE, "r") as s: + gcu_field_operation_conf = json.load(s) + else: + raise GenericConfigUpdaterError("GCU table modification validators config file not found") + + tables = gcu_field_operation_conf["tables"] + scenarios = tables[table]["validator_data"]["rdma_config_update_validator"] + + cleaned_fields = _get_fields_in_patch() + for cleaned_field in cleaned_fields: + scenario = None + for key in scenarios.keys(): + if cleaned_field in scenarios[key]["fields"]: + scenario = scenarios[key] + break + + if scenario is None: + return False + + if scenario["platforms"][asic] == "": + return False + + if patch_element['op'] not in scenario["operations"]: + return False + + if branch_version is not None: + if asic in scenario["platforms"]: + if branch_version < scenario["platforms"][asic]: + return False + else: + return False + return True diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index f12a14d8eb..2dcf1649b7 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -10,11 +10,128 @@ "e.g. 'show.acl.test_acl'", "", "field_operation_validators for a given table defines a list of validators that all must pass for modification to the specified field and table to be allowed", + "", + "validator_data provides data relevant to each validator", "" ], + "helper_data": { + "rdma_config_update_validator": { + "mellanox_asics": { + "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-D48C8" ] + }, + "broadcom_asics": { + "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], + "th2": [ "Arista-7260CX3-D108C8", "Arista-7260CX3-C64", "Arista-7260CX3-Q64" ], + "td2": [ "Force10-S6000", "Force10-S6000-Q24S32", "Arista-7050-QX32", "Arista-7050-QX-32S", "Nexus-3164", "Arista-7050QX32S-Q32" ], + "td3": [ "Arista-7050CX3-32S-C32", "Arista-7050CX3-32S-D48C8" ] + } + } + }, "tables": { "PFC_WD": { - "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ] + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "PFCWD enable/disable": { + "fields": [ + "restoration_time", + "detection_time", + "action", + "global/poll_interval" + ], + "operations": ["remove", "add", "replace"], + "platforms": { + "spc1": "20181100", + "td2": "20181100", + "th": "20181100", + "th2": "20181100", + "td3": "20201200", + "cisco-8000": "20201200" + } + } + } + } + }, + "BUFFER_POOL": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "Shared/headroom pool size changes": { + "fields": [ + "ingress_lossless_pool/xoff", + "ingress_lossless_pool/size", + "egress_lossy_pool/size" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20191100", + "td2": "", + "th": "20221100", + "th2": "20221100", + "td3": "20221100", + "cisco-8000": "" + } + } + } + } + }, + "BUFFER_PROFILE": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "Dynamic threshold tuning": { + "fields": [ + "dynamic_th" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20181100", + "td2": "20181100", + "th": "20181100", + "th2": "20181100", + "td3": "20201200", + "cisco-8000": "" + } + }, + "PG headroom modification": { + "fields": [ + "xoff" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20191100", + "td2": "", + "th": "20221100", + "th2": "20221100", + "td3": "20221100", + "cisco-8000": "" + } + } + } + } + }, + "WRED_PROFILE": { + "field_operation_validators": [ "generic_config_updater.field_operation_validators.rdma_config_update_validator" ], + "validator_data": { + "rdma_config_update_validator": { + "ECN tuning": { + "fields": [ + "azure_lossless/green_min_threshold", + "azure_lossless/green_max_threshold", + "azure_lossless/green_drop_probability" + ], + "operations": ["replace"], + "platforms": { + "spc1": "20181100", + "td2": "20181100", + "th": "20181100", + "th2": "20181100", + "td3": "20201200", + "cisco-8000": "" + } + } + } + } } } } diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index e8c66fcbbe..a6cb8de094 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -166,7 +166,7 @@ def validate_field_operation(self, old_config, target_config): if any(op['op'] == operation and field == op['path'] for op in patch): raise IllegalPatchOperationError("Given patch operation is invalid. Operation: {} is illegal on field: {}".format(operation, field)) - def _invoke_validating_function(cmd): + def _invoke_validating_function(cmd, jsonpatch_element): # cmd is in the format as . method_name = cmd.split(".")[-1] module_name = ".".join(cmd.split(".")[0:-1]) @@ -174,7 +174,7 @@ def _invoke_validating_function(cmd): raise GenericConfigUpdaterError("Attempting to call invalid method {} in module {}. Module must be generic_config_updater.field_operation_validators, and method must be a defined validator".format(method_name, module_name)) module = importlib.import_module(module_name, package=None) method_to_call = getattr(module, method_name) - return method_to_call() + return method_to_call(jsonpatch_element) if os.path.exists(GCU_FIELD_OP_CONF_FILE): with open(GCU_FIELD_OP_CONF_FILE, "r") as s: @@ -194,7 +194,7 @@ def _invoke_validating_function(cmd): validating_functions.update(tables.get(table, {}).get("field_operation_validators", [])) for function in validating_functions: - if not _invoke_validating_function(function): + if not _invoke_validating_function(function, element): raise IllegalPatchOperationError("Modification of {} table is illegal- validating function {} returned False".format(table, function)) diff --git a/tests/generic_config_updater/field_operation_validator_test.py b/tests/generic_config_updater/field_operation_validator_test.py new file mode 100644 index 0000000000..4ffe11d5bd --- /dev/null +++ b/tests/generic_config_updater/field_operation_validator_test.py @@ -0,0 +1,142 @@ +import io +import unittest +import mock +import json +import subprocess +import generic_config_updater +import generic_config_updater.field_operation_validators as fov +import generic_config_updater.gu_common as gu_common + +from unittest.mock import MagicMock, Mock, mock_open +from mock import patch +from sonic_py_common.device_info import get_hwsku, get_sonic_version_info + + +class TestValidateFieldOperation(unittest.TestCase): + + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="unknown")) + def test_rdma_config_update_validator_unknown_asic(self): + patch_element = {"path": "/PFC_WD/Ethernet4/restoration_time", "op": "replace", "value": "234234"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="td3")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"BUFFER_POOL": {"validator_data": {"rdma_config_update_validator": {"Shared/headroom pool size changes": {"fields": ["ingress_lossless_pool/xoff", "ingress_lossless_pool/size", "egress_lossy_pool/size"], "operations": ["replace"], "platforms": {"td3": "20221100"}}}}}}}')) + def test_rdma_config_update_validator_td3_asic_invalid_version(self): + patch_element = {"path": "/BUFFER_POOL/ingress_lossless_pool/xoff", "op": "replace", "value": "234234"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"PFC_WD": {"validator_data": {"rdma_config_update_validator": {"PFCWD enable/disable": {"fields": ["detection_time", "action"], "operations": ["remove", "replace", "add"], "platforms": {"spc1": "20181100"}}}}}}}')) + def test_rdma_config_update_validator_spc_asic_valid_version(self): + patch_element = {"path": "/PFC_WD/Ethernet8/detection_time", "op": "remove"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == True + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"BUFFER_POOL": {"validator_data": {"rdma_config_update_validator": {"Shared/headroom pool size changes": {"fields": ["ingress_lossless_pool/xoff", "egress_lossy_pool/size"], "operations": ["replace"], "platforms": {"spc1": "20181100"}}}}}}}')) + def test_rdma_config_update_validator_spc_asic_invalid_op(self): + patch_element = {"path": "/BUFFER_POOL/ingress_lossless_pool/xoff", "op": "remove"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"build_version": "SONiC.20220530"})) + @patch("generic_config_updater.field_operation_validators.get_asic_name", mock.Mock(return_value="spc1")) + @patch("os.path.exists", mock.Mock(return_value=True)) + @patch("builtins.open", mock_open(read_data='{"tables": {"PFC_WD": {"validator_data": {"rdma_config_update_validator": {"PFCWD enable/disable": {"fields": ["detection_time", "action"], "operations": ["remove", "replace", "add"], "platforms": {"spc1": "20181100"}}}}}}}')) + def test_rdma_config_update_validator_spc_asic_other_field(self): + patch_element = {"path": "/PFC_WD/Ethernet8/other_field", "op": "add", "value": "sample_value"} + assert generic_config_updater.field_operation_validators.rdma_config_update_validator(patch_element) == False + + def test_validate_field_operation_illegal__pfcwd(self): + old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} + target_config = {"PFC_WD": {"GLOBAL": {}}} + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + + def test_validate_field_operation_legal__rm_loopback1(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + config_wrapper.validate_field_operation(old_config, target_config) + + def test_validate_field_operation_illegal__rm_loopback0(self): + old_config = { + "LOOPBACK_INTERFACE": { + "Loopback0": {}, + "Loopback0|10.1.0.32/32": {}, + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + target_config = { + "LOOPBACK_INTERFACE": { + "Loopback1": {}, + "Loopback1|10.1.0.33/32": {} + } + } + config_wrapper = gu_common.ConfigWrapper() + self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) + +class TestGetAsicName(unittest.TestCase): + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_spc1(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'mellanox'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Mellanox-SN2700-D48C8", 0] + self.assertEqual(fov.get_asic_name(), "spc1") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_th(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Force10-S6100", 0] + self.assertEqual(fov.get_asic_name(), "th") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_th2(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Arista-7260CX3-D108C8", 0] + self.assertEqual(fov.get_asic_name(), "th2") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_td2(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Force10-S6000", 0] + self.assertEqual(fov.get_asic_name(), "td2") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_td3(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'broadcom'} + mock_popen.return_value = mock.Mock() + mock_popen.return_value.communicate.return_value = ["Arista-7050CX3-32S-C32", 0] + self.assertEqual(fov.get_asic_name(), "td3") + + @patch('sonic_py_common.device_info.get_sonic_version_info') + @patch('subprocess.Popen') + def test_get_asic_cisco(self, mock_popen, mock_get_sonic_version_info): + mock_get_sonic_version_info.return_value = {'asic_type': 'cisco-8000'} + self.assertEqual(fov.get_asic_name(), "cisco-8000") diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py index 3f744e20ca..db625e8cd1 100644 --- a/tests/generic_config_updater/gcu_feature_patch_application_test.py +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -1,6 +1,7 @@ import jsonpatch import unittest import copy +import mock from unittest.mock import MagicMock, Mock from mock import patch @@ -31,7 +32,8 @@ def get_running_config(): class TestFeaturePatchApplication(unittest.TestCase): def setUp(self): self.config_wrapper = ConfigWrapper() - + + @patch("generic_config_updater.field_operation_validators.rdma_config_update_validator", mock.Mock(return_value=True)) def test_feature_patch_application_success(self): # Format of the JSON file containing the test-cases: # @@ -52,6 +54,7 @@ def test_feature_patch_application_success(self): with self.subTest(name=test_case_name): self.run_single_success_case_applier(data[test_case_name]) + @patch("generic_config_updater.field_operation_validators.rdma_config_update_validator", mock.Mock(return_value=True)) def test_feature_patch_application_failure(self): # Fromat of the JSON file containing the test-cases: # diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index a319a25ead..a2a776c0bb 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -71,62 +71,6 @@ def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "mellanox", "build_version": "SONiC.20181131"})) - def test_validate_field_operation_legal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} - target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "40"}}} - config_wrapper = gu_common.ConfigWrapper() - config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} - target_config = {"PFC_WD": {"GLOBAL": {}}} - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - - @patch("sonic_py_common.device_info.get_sonic_version_info", mock.Mock(return_value={"asic_type": "invalid-asic", "build_version": "SONiC.20181131"})) - def test_validate_field_modification_illegal__pfcwd(self): - old_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "60"}}} - target_config = {"PFC_WD": {"GLOBAL": {"POLL_INTERVAL": "80"}}} - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - - def test_validate_field_operation_legal__rm_loopback1(self): - old_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {}, - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - target_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {} - } - } - config_wrapper = gu_common.ConfigWrapper() - config_wrapper.validate_field_operation(old_config, target_config) - - def test_validate_field_operation_illegal__rm_loopback0(self): - old_config = { - "LOOPBACK_INTERFACE": { - "Loopback0": {}, - "Loopback0|10.1.0.32/32": {}, - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - target_config = { - "LOOPBACK_INTERFACE": { - "Loopback1": {}, - "Loopback1|10.1.0.33/32": {} - } - } - config_wrapper = gu_common.ConfigWrapper() - self.assertRaises(gu_common.IllegalPatchOperationError, config_wrapper.validate_field_operation, old_config, target_config) - def test_ctor__default_values_set(self): config_wrapper = gu_common.ConfigWrapper() From 0f67ab799d46b8e9449dd093bfde713e49641a02 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Fri, 9 Jun 2023 09:33:40 -0400 Subject: [PATCH 34/35] remove docker-sonic-vs directory (#2868) #### Why I did it docker-sonic-vs/Dockerfile is not used, remove it. ##### Work item tracking - Microsoft ADO **(number only)**: 17418730 #### How I did it remove docker-sonic-vs/Dockerfile Signed-off-by: Mai Bui --- .azure-pipelines/docker-sonic-vs/Dockerfile | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 .azure-pipelines/docker-sonic-vs/Dockerfile diff --git a/.azure-pipelines/docker-sonic-vs/Dockerfile b/.azure-pipelines/docker-sonic-vs/Dockerfile deleted file mode 100644 index 2b3e634232..0000000000 --- a/.azure-pipelines/docker-sonic-vs/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM docker-sonic-vs - -ARG docker_container_name - -ADD ["wheels", "/wheels"] - -# Uninstalls only sonic-utilities and does not impact its dependencies -RUN pip3 uninstall -y sonic-utilities - -# Installs sonic-utilities, adds missing dependencies, upgrades out-dated depndencies -RUN pip3 install /wheels/sonic_utilities-1.2-py3-none-any.whl From dbcaaf8aa27b37dfba1b088e2fa59fd94b8dc683 Mon Sep 17 00:00:00 2001 From: Yaqiang Zhu Date: Mon, 12 Jun 2023 09:49:41 +0800 Subject: [PATCH 35/35] [dhcp-relay] Fix dhcp6relay counter issue (#2866) Why I did While deleting a Vlan, clear dhcpv6_relay counter info state_db before dhcp_relay container restart would cause that counter info still exist in state_db, which is incorrect. Microsoft ADO number: 24211173 How I did it Clear counter info in state_db after container restart. How to verify it Previous ut and build image to verify. Signed-off-by: Yaqiang Zhu --- config/vlan.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/config/vlan.py b/config/vlan.py index ec4b269cc2..03660ab0af 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -118,15 +118,14 @@ def del_vlan(db, vid, no_restart_dhcp_relay): # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, None) - delete_state_db_entry(vlan) - if not no_restart_dhcp_relay and is_dhcpv6_relay_config_exist(db, vlan): # set dhcpv6_relay table set_dhcp_relay_table('DHCP_RELAY', config_db, vlan, None) # We need to restart dhcp_relay service after dhcpv6_relay config change if is_dhcp_relay_running(): dhcp_relay_util.handle_restart_dhcp_relay_service() - + delete_state_db_entry(vlan) + vlans = db.cfgdb.get_keys('VLAN') if not vlans: docker_exec_cmd = "docker exec -i swss {}"