diff --git a/acl_loader/main.py b/acl_loader/main.py index 77fd03a35b..362f1f75ea 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -474,8 +474,11 @@ def deny_rule(self, table_name): rule_props = {} rule_data = {(table_name, "DEFAULT_RULE"): rule_props} rule_props["PRIORITY"] = str(self.min_priority) - rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"]) rule_props["PACKET_ACTION"] = "DROP" + if 'v6' in table_name.lower(): + rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV6"]) + else: + rule_props["ETHER_TYPE"] = str(self.ethertype_map["ETHERTYPE_IPV4"]) return rule_data def convert_rules(self): diff --git a/clear/main.py b/clear/main.py index f6e1d8c477..9798b7b5a5 100755 --- a/clear/main.py +++ b/clear/main.py @@ -380,5 +380,30 @@ def line(linenum): cmd = "consutil clear " + str(linenum) run_command(cmd) +# +# 'nat' group ("clear nat ...") +# + +@cli.group(cls=AliasedGroup, default_if_no_args=False) +def nat(): + """Clear the nat info""" + pass + +# 'statistics' subcommand ("clear nat statistics") +@nat.command() +def statistics(): + """ Clear all NAT statistics """ + + cmd = "natclear -s" + run_command(cmd) + +# 'translations' subcommand ("clear nat translations") +@nat.command() +def translations(): + """ Clear all NAT translations """ + + cmd = "natclear -t" + run_command(cmd) + if __name__ == '__main__': cli() diff --git a/config/main.py b/config/main.py index 5cde600ee6..9b1045be77 100755 --- a/config/main.py +++ b/config/main.py @@ -8,6 +8,7 @@ import netaddr import re import syslog +import time import netifaces import sonic_device_util @@ -18,6 +19,7 @@ import aaa import mlnx +import nat CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?']) @@ -156,6 +158,61 @@ def interface_name_to_alias(interface_name): return None +def get_interface_table_name(interface_name): + """Get table name by interface_name prefix + """ + if interface_name.startswith("Ethernet"): + if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: + return "VLAN_SUB_INTERFACE" + return "INTERFACE" + elif interface_name.startswith("PortChannel"): + if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: + return "VLAN_SUB_INTERFACE" + return "PORTCHANNEL_INTERFACE" + elif interface_name.startswith("Vlan"): + return "VLAN_INTERFACE" + elif interface_name.startswith("Loopback"): + return "LOOPBACK_INTERFACE" + else: + return "" + +def interface_ipaddr_dependent_on_interface(config_db, interface_name): + """Get table keys including ipaddress + """ + data = [] + table_name = get_interface_table_name(interface_name) + if table_name == "": + return data + keys = config_db.get_keys(table_name) + for key in keys: + if interface_name in key and len(key) == 2: + data.append(key) + return data + +def is_interface_bind_to_vrf(config_db, interface_name): + """Get interface if bind to vrf or not + """ + table_name = get_interface_table_name(interface_name) + if table_name == "": + return False + entry = config_db.get_entry(table_name, interface_name) + if entry and entry.get("vrf_name"): + return True + return False + +def del_interface_bind_to_vrf(config_db, vrf_name): + """del interface bind to vrf + """ + tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] + for table_name in tables: + interface_dict = config_db.get_table(table_name) + if interface_dict: + for interface_name in interface_dict.keys(): + if interface_dict[interface_name].has_key('vrf_name') and vrf_name == interface_dict[interface_name]['vrf_name']: + interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) + for interface_del in interface_dependent: + config_db.set_entry(table_name, interface_del, None) + config_db.set_entry(table_name, interface_name, None) def set_interface_naming_mode(mode): """Modify SONIC_CLI_IFACE_MODE env variable in user .bashrc @@ -348,6 +405,7 @@ def _stop_services(): 'pmon', 'bgp', 'hostcfgd', + 'nat' ] if asic_type == 'mellanox' and 'pmon' in services_to_stop: services_to_stop.remove('pmon') @@ -376,7 +434,8 @@ def _reset_failed_services(): 'snmp', 'swss', 'syncd', - 'teamd' + 'teamd', + 'nat' ] for service in services_to_reset: @@ -399,6 +458,7 @@ def _restart_services(): 'pmon', 'lldp', 'hostcfgd', + 'nat', 'sflow', ] if asic_type == 'mellanox' and 'pmon' in services_to_restart: @@ -431,6 +491,8 @@ def config(): exit("Root privileges are required for this operation") config.add_command(aaa.aaa) config.add_command(aaa.tacacs) +# === Add NAT Configuration ========== +config.add_command(nat.nat) @config.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, @@ -476,7 +538,7 @@ def reload(filename, yes, load_sysinfo): _stop_services() config_db = ConfigDBConnector() config_db.connect() - client = config_db.redis_clients[config_db.CONFIG_DB] + client = config_db.get_redis_client(config_db.CONFIG_DB) client.flushdb() if load_sysinfo: command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku) @@ -542,7 +604,7 @@ def load_minigraph(): config_db = ConfigDBConnector() config_db.connect() - client = config_db.redis_clients[config_db.CONFIG_DB] + client = config_db.get_redis_client(config_db.CONFIG_DB) client.flushdb() if os.path.isfile('/etc/sonic/init_cfg.json'): command = "{} -H -m -j /etc/sonic/init_cfg.json --write-to-db".format(SONIC_CFGGEN_PATH) @@ -948,11 +1010,9 @@ def mvrf_restart_services(): cmd="service ntp start" os.system (cmd) -def vrf_add_management_vrf(): +def vrf_add_management_vrf(config_db): """Enable management vrf in config DB""" - config_db = ConfigDBConnector() - config_db.connect() entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global") if entry and entry['mgmtVrfEnabled'] == 'true' : click.echo("ManagementVRF is already Enabled.") @@ -960,11 +1020,9 @@ def vrf_add_management_vrf(): config_db.mod_entry('MGMT_VRF_CONFIG',"vrf_global",{"mgmtVrfEnabled": "true"}) mvrf_restart_services() -def vrf_delete_management_vrf(): +def vrf_delete_management_vrf(config_db): """Disable management vrf in config DB""" - config_db = ConfigDBConnector() - config_db.connect() entry = config_db.get_entry('MGMT_VRF_CONFIG', "vrf_global") if not entry or entry['mgmtVrfEnabled'] == 'false' : click.echo("ManagementVRF is already Disabled.") @@ -972,35 +1030,6 @@ def vrf_delete_management_vrf(): config_db.mod_entry('MGMT_VRF_CONFIG',"vrf_global",{"mgmtVrfEnabled": "false"}) mvrf_restart_services() -# -# 'vrf' group ('config vrf ...') -# - -@config.group('vrf') -def vrf(): - """VRF-related configuration tasks""" - pass - -@vrf.command('add') -@click.argument('vrfname', metavar='. Type mgmt for management VRF', required=True) -@click.pass_context -def vrf_add (ctx, vrfname): - """Create management VRF and move eth0 into it""" - if vrfname == 'mgmt' or vrfname == 'management': - vrf_add_management_vrf() - else: - click.echo("Creation of data vrf={} is not yet supported".format(vrfname)) - -@vrf.command('del') -@click.argument('vrfname', metavar='. Type mgmt for management VRF', required=False) -@click.pass_context -def vrf_del (ctx, vrfname): - """Delete management VRF and move back eth0 to default VRF""" - if vrfname == 'mgmt' or vrfname == 'management': - vrf_delete_management_vrf() - else: - click.echo("Deletion of data vrf={} is not yet supported".format(vrfname)) - @config.group() @click.pass_context def snmpagentaddress(ctx): @@ -1177,6 +1206,51 @@ def shutdown(): """Shut down BGP session(s)""" pass +@config.group() +def kdump(): + """ Configure kdump """ + if os.geteuid() != 0: + exit("Root privileges are required for this operation") + pass + +@kdump.command() +def disable(): + """Disable kdump operation""" + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + config_db.mod_entry("KDUMP", "config", {"enabled": "false"}) + run_command("sonic-kdump-config --disable") + +@kdump.command() +def enable(): + """Enable kdump operation""" + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + config_db.mod_entry("KDUMP", "config", {"enabled": "true"}) + run_command("sonic-kdump-config --enable") + +@kdump.command() +@click.argument('kdump_memory', metavar='', required=True) +def memory(kdump_memory): + """Set memory allocated for kdump capture kernel""" + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + config_db.mod_entry("KDUMP", "config", {"memory": kdump_memory}) + run_command("sonic-kdump-config --memory %s" % kdump_memory) + +@kdump.command() +@click.argument('kdump_num_dumps', metavar='', required=True, type=int) +def num_dumps(kdump_num_dumps): + """Set max number of dump files for kdump""" + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + config_db.mod_entry("KDUMP", "config", {"num_dumps": kdump_num_dumps}) + run_command("sonic-kdump-config --num_dumps %d" % kdump_num_dumps) + # 'all' subcommand @shutdown.command() @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") @@ -1373,14 +1447,8 @@ def add(ctx, interface_name, ip_addr, gw): try: ipaddress.ip_network(unicode(ip_addr), strict=False) - if interface_name.startswith("Ethernet"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.set_entry("VLAN_SUB_INTERFACE", interface_name, {"admin_status": "up"}) - config_db.set_entry("VLAN_SUB_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"}) - else: - config_db.set_entry("INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"}) - config_db.set_entry("INTERFACE", interface_name, {"NULL": "NULL"}) - elif interface_name == 'eth0': + + if interface_name == 'eth0': # Configuring more than 1 IPv4 or more than 1 IPv6 address fails. # Allow only one IPv4 and only one IPv6 address to be configured for IPv6. @@ -1403,20 +1471,18 @@ def add(ctx, interface_name, ip_addr, gw): config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), {"gwaddr": gw}) mgmt_ip_restart_services() - elif interface_name.startswith("PortChannel"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.set_entry("VLAN_SUB_INTERFACE", interface_name, {"admin_status": "up"}) - config_db.set_entry("VLAN_SUB_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"}) - else: - config_db.set_entry("PORTCHANNEL_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"}) - config_db.set_entry("PORTCHANNEL_INTERFACE", interface_name, {"NULL": "NULL"}) - elif interface_name.startswith("Vlan"): - config_db.set_entry("VLAN_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"}) - config_db.set_entry("VLAN_INTERFACE", interface_name, {"NULL": "NULL"}) - elif interface_name.startswith("Loopback"): - config_db.set_entry("LOOPBACK_INTERFACE", (interface_name, ip_addr), {"NULL": "NULL"}) - else: + return + + table_name = get_interface_table_name(interface_name) + if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") + interface_entry = config_db.get_entry(table_name, interface_name) + if len(interface_entry) == 0: + if table_name == "VLAN_SUB_INTERFACE": + config_db.set_entry(table_name, interface_name, {"admin_status": "up"}) + else: + config_db.set_entry(table_name, interface_name, {"NULL": "NULL"}) + config_db.set_entry(table_name, (interface_name, ip_addr), {"NULL": "NULL"}) except ValueError: ctx.fail("'ip_addr' is not valid.") @@ -1436,51 +1502,258 @@ def remove(ctx, interface_name, ip_addr): if interface_name is None: ctx.fail("'interface_name' is None!") - if_table = "" try: ipaddress.ip_network(unicode(ip_addr), strict=False) - if interface_name.startswith("Ethernet"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.set_entry("VLAN_SUB_INTERFACE", (interface_name, ip_addr), None) - if_table = "VLAN_SUB_INTERFACE" - else: - config_db.set_entry("INTERFACE", (interface_name, ip_addr), None) - if_table = "INTERFACE" - elif interface_name == 'eth0': + + if interface_name == 'eth0': config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), None) mgmt_ip_restart_services() - elif interface_name.startswith("PortChannel"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.set_entry("VLAN_SUB_INTERFACE", (interface_name, ip_addr), None) - if_table = "VLAN_SUB_INTERFACE" - else: - config_db.set_entry("PORTCHANNEL_INTERFACE", (interface_name, ip_addr), None) - if_table = "PORTCHANNEL_INTERFACE" - elif interface_name.startswith("Vlan"): - config_db.set_entry("VLAN_INTERFACE", (interface_name, ip_addr), None) - if_table = "VLAN_INTERFACE" - elif interface_name.startswith("Loopback"): - config_db.set_entry("LOOPBACK_INTERFACE", (interface_name, ip_addr), None) - else: + return + + table_name = get_interface_table_name(interface_name) + if table_name == "": ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") + config_db.set_entry(table_name, (interface_name, ip_addr), None) + interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) + if len(interface_dependent) == 0 and is_interface_bind_to_vrf(config_db, interface_name) is False: + config_db.set_entry(table_name, interface_name, None) - command = "ip neigh flush {}".format(ip_addr) + command = "ip neigh flush dev {} {}".format(interface_name, ip_addr) run_command(command) except ValueError: ctx.fail("'ip_addr' is not valid.") - exists = False - if if_table: - interfaces = config_db.get_table(if_table) - for key in interfaces.keys(): - if not isinstance(key, tuple): - continue - if interface_name in key: - exists = True - break +# +# 'vrf' subgroup ('config interface vrf ...') +# - if not exists: - config_db.set_entry(if_table, interface_name, None) + +@interface.group() +@click.pass_context +def vrf(ctx): + """Bind or unbind VRF""" + pass + +# +# 'bind' subcommand +# +@vrf.command() +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrf_name', metavar='', required=True) +@click.pass_context +def bind(ctx, interface_name, vrf_name): + """Bind the interface to VRF""" + config_db = ctx.obj["config_db"] + if get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") + if is_interface_bind_to_vrf(config_db, interface_name) is True and \ + config_db.get_entry(table_name, interface_name).get('vrf_name') == vrf_name: + return + # Clean ip addresses if interface configured + interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) + for interface_del in interface_dependent: + config_db.set_entry(table_name, interface_del, None) + config_db.set_entry(table_name, interface_name, None) + # When config_db del entry and then add entry with same key, the DEL will lost. + state_db = SonicV2Connector(host='127.0.0.1') + state_db.connect(state_db.STATE_DB, False) + _hash = '{}{}'.format('INTERFACE_TABLE|', interface_name) + while state_db.get(state_db.STATE_DB, _hash, "state") == "ok": + time.sleep(0.01) + state_db.close(state_db.STATE_DB) + config_db.set_entry(table_name, interface_name, {"vrf_name": vrf_name}) + +# +# 'unbind' subcommand +# + +@vrf.command() +@click.argument('interface_name', metavar='', required=True) +@click.pass_context +def unbind(ctx, interface_name): + """Unbind the interface to VRF""" + config_db = ctx.obj["config_db"] + if get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(interface_name) + if interface_name is None: + ctx.fail("interface is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan/Loopback]") + if is_interface_bind_to_vrf(config_db, interface_name) is False: + return + interface_dependent = interface_ipaddr_dependent_on_interface(config_db, interface_name) + for interface_del in interface_dependent: + config_db.set_entry(table_name, interface_del, None) + config_db.set_entry(table_name, interface_name, None) + + +# +# 'vrf' group ('config vrf ...') +# + +@config.group('vrf') +@click.pass_context +def vrf(ctx): + """VRF-related configuration tasks""" + config_db = ConfigDBConnector() + config_db.connect() + ctx.obj = {} + ctx.obj['config_db'] = config_db + pass + +@vrf.command('add') +@click.argument('vrf_name', metavar='', required=True) +@click.pass_context +def add_vrf(ctx, vrf_name): + """Add vrf""" + config_db = ctx.obj['config_db'] + if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'): + ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!") + if len(vrf_name) > 15: + ctx.fail("'vrf_name' is too long!") + if (vrf_name == 'mgmt' or vrf_name == 'management'): + vrf_add_management_vrf(config_db) + else: + config_db.set_entry('VRF', vrf_name, {"NULL": "NULL"}) + +@vrf.command('del') +@click.argument('vrf_name', metavar='', required=True) +@click.pass_context +def del_vrf(ctx, vrf_name): + """Del vrf""" + config_db = ctx.obj['config_db'] + if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'): + ctx.fail("'vrf_name' is not start with Vrf, mgmt or management!") + if len(vrf_name) > 15: + ctx.fail("'vrf_name' is too long!") + if (vrf_name == 'mgmt' or vrf_name == 'management'): + vrf_delete_management_vrf(config_db) + else: + del_interface_bind_to_vrf(config_db, vrf_name) + config_db.set_entry('VRF', vrf_name, None) + + +# +# 'route' group ('config route ...') +# + +@config.group() +@click.pass_context +def route(ctx): + """route-related configuration tasks""" + pass + +@route.command('add',context_settings={"ignore_unknown_options":True}) +@click.argument('command_str', metavar='prefix [vrf ] nexthop <[vrf ] >|>', nargs=-1, type=click.Path()) +@click.pass_context +def add_route(ctx, command_str): + """Add route command""" + if len(command_str) < 4 or len(command_str) > 9: + ctx.fail("argument is not in pattern prefix [vrf ] nexthop <[vrf ] >|>!") + if "prefix" not in command_str: + ctx.fail("argument is incomplete, prefix not found!") + if "nexthop" not in command_str: + ctx.fail("argument is incomplete, nexthop not found!") + for i in range(0,len(command_str)): + if "nexthop" == command_str[i]: + prefix_str = command_str[:i] + nexthop_str = command_str[i:] + vrf_name = "" + cmd = 'sudo vtysh -c "configure terminal" -c "ip route' + if prefix_str: + if len(prefix_str) == 2: + prefix_mask = prefix_str[1] + cmd += ' {}'.format(prefix_mask) + elif len(prefix_str) == 4: + vrf_name = prefix_str[2] + prefix_mask = prefix_str[3] + cmd += ' {}'.format(prefix_mask) + else: + ctx.fail("prefix is not in pattern!") + if nexthop_str: + if len(nexthop_str) == 2: + ip = nexthop_str[1] + if vrf_name == "": + cmd += ' {}'.format(ip) + else: + cmd += ' {} vrf {}'.format(ip, vrf_name) + elif len(nexthop_str) == 3: + dev_name = nexthop_str[2] + if vrf_name == "": + cmd += ' {}'.format(dev_name) + else: + cmd += ' {} vrf {}'.format(dev_name, vrf_name) + elif len(nexthop_str) == 4: + vrf_name_dst = nexthop_str[2] + ip = nexthop_str[3] + if vrf_name == "": + cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst) + else: + cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst) + else: + ctx.fail("nexthop is not in pattern!") + cmd += '"' + run_command(cmd) + +@route.command('del',context_settings={"ignore_unknown_options":True}) +@click.argument('command_str', metavar='prefix [vrf ] nexthop <[vrf ] >|>', nargs=-1, type=click.Path()) +@click.pass_context +def del_route(ctx, command_str): + """Del route command""" + if len(command_str) < 4 or len(command_str) > 9: + ctx.fail("argument is not in pattern prefix [vrf ] nexthop <[vrf ] >|>!") + if "prefix" not in command_str: + ctx.fail("argument is incomplete, prefix not found!") + if "nexthop" not in command_str: + ctx.fail("argument is incomplete, nexthop not found!") + for i in range(0,len(command_str)): + if "nexthop" == command_str[i]: + prefix_str = command_str[:i] + nexthop_str = command_str[i:] + vrf_name = "" + cmd = 'sudo vtysh -c "configure terminal" -c "no ip route' + if prefix_str: + if len(prefix_str) == 2: + prefix_mask = prefix_str[1] + cmd += ' {}'.format(prefix_mask) + elif len(prefix_str) == 4: + vrf_name = prefix_str[2] + prefix_mask = prefix_str[3] + cmd += ' {}'.format(prefix_mask) + else: + ctx.fail("prefix is not in pattern!") + if nexthop_str: + if len(nexthop_str) == 2: + ip = nexthop_str[1] + if vrf_name == "": + cmd += ' {}'.format(ip) + else: + cmd += ' {} vrf {}'.format(ip, vrf_name) + elif len(nexthop_str) == 3: + dev_name = nexthop_str[2] + if vrf_name == "": + cmd += ' {}'.format(dev_name) + else: + cmd += ' {} vrf {}'.format(dev_name, vrf_name) + elif len(nexthop_str) == 4: + vrf_name_dst = nexthop_str[2] + ip = nexthop_str[3] + if vrf_name == "": + cmd += ' {} nexthop-vrf {}'.format(ip, vrf_name_dst) + else: + cmd += ' {} vrf {} nexthop-vrf {}'.format(ip, vrf_name, vrf_name_dst) + else: + ctx.fail("nexthop is not in pattern!") + cmd += '"' + run_command(cmd) # # 'acl' group ('config acl ...') @@ -1962,6 +2235,17 @@ def enable(ctx): config_db.mod_entry('SFLOW', 'global', sflow_tbl['global']) + try: + proc = subprocess.Popen("systemctl is-active sflow", shell=True, stdout=subprocess.PIPE) + (out, err) = proc.communicate() + except SystemExit as e: + ctx.fail("Unable to check sflow status {}".format(e)) + + if out != "active": + log_info("sflow service is not enabled. Starting sflow docker...") + run_command("sudo systemctl enable sflow") + run_command("sudo systemctl start sflow") + # # 'sflow' command ('config sflow disable') # @@ -2201,6 +2485,23 @@ def delete(ctx): sflow_tbl['global'].pop('agent_id') config_db.set_entry('SFLOW', 'global', sflow_tbl['global']) +# +# 'feature' command ('config feature name state') +# +@config.command('feature') +@click.argument('name', metavar='', required=True) +@click.argument('state', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +def feature_status(name, state): + """ Configure status of feature""" + config_db = ConfigDBConnector() + config_db.connect() + status_data = config_db.get_entry('FEATURE', name) + + if not status_data: + click.echo(" Feature '{}' doesn't exist".format(name)) + return + + config_db.mod_entry('FEATURE', name, {'status': state}) if __name__ == '__main__': config() diff --git a/config/nat.py b/config/nat.py new file mode 100644 index 0000000000..1c30aa80d4 --- /dev/null +++ b/config/nat.py @@ -0,0 +1,1088 @@ +#!/usr/bin/env python + +import click +import socket +import netaddr +import ipaddress +from swsssdk import ConfigDBConnector +from swsssdk import SonicV2Connector + +def is_valid_ipv4_address(address): + """Check if the given ipv4 address is valid""" + invalid_list = ['0.0.0.0','255.255.255.255'] + try: + ip = ipaddress.IPv4Address(address) + if (ip.is_reserved) or (ip.is_multicast) or (ip.is_loopback) or (address in invalid_list): + return False + except ipaddress.AddressValueError: + return False + + return True + +def is_valid_port_address(address): + """Check if the given port address is valid""" + try: + port_address = int(address) + except ValueError: + return False + + if port_address not in xrange(1, 65535): + return False + + return True + +def nat_interface_name_is_valid(interface_name): + """Check if the given nat interface is valid""" + + config_db = ConfigDBConnector() + config_db.connect() + + if interface_name.startswith("Ethernet"): + interface_dict = config_db.get_table('PORT') + elif interface_name.startswith("PortChannel"): + interface_dict = config_db.get_table('PORTCHANNEL') + elif interface_name.startswith("Vlan"): + interface_dict = config_db.get_table('VLAN') + elif interface_name.startswith("Loopback"): + return True + else: + return False + + if interface_name is not None: + if not interface_dict: + return False + return interface_name in interface_dict + + return False + +def isIpOverlappingWithAnyStaticEntry(ipAddress, table): + """Check if the given ipAddress is overlapping with any static entry""" + + config_db = ConfigDBConnector() + config_db.connect() + + static_dict = config_db.get_table(table) + + if not static_dict: + return False + + for key,values in static_dict.items(): + global_ip = "---" + local_ip = "---" + nat_type = "dnat" + + if table == 'STATIC_NAPT': + if isinstance(key, tuple) is False: + continue + + if (len(key) == 3): + global_ip = key[0] + else: + continue + elif table == 'STATIC_NAT': + if isinstance(key, unicode) is True: + global_ip = key + else: + continue + + local_ip = values["local_ip"] + + if "nat_type" in values: + nat_type = values["nat_type"] + + if nat_type == "snat": + global_ip = local_ip + + if global_ip == ipAddress: + return True + + return False + +def isOverlappingWithAnyDynamicEntry(ipAddress): + """Check if the given ipAddress is overlapping with any dynamic pool entry""" + + config_db = ConfigDBConnector() + config_db.connect() + + ip = int(ipaddress.IPv4Address(ipAddress)) + nat_pool_dict = config_db.get_table('NAT_POOL') + + if not nat_pool_dict: + return False + + for values in nat_pool_dict.values(): + global_ip = values["nat_ip"] + ipAddr = global_ip.split('-') + if (len(ipAddr) == 1): + startIp = int(ipaddress.IPv4Address(unicode(ipAddr[0]))) + endIp = int(ipaddress.IPv4Address(unicode(ipAddr[0]))) + else: + startIp = int(ipaddress.IPv4Address(unicode(ipAddr[0]))) + endIp = int(ipaddress.IPv4Address(unicode(ipAddr[1]))) + + if ((ip >= startIp) and (ip <= endIp)): + return True + + return False + +def getTwiceNatIdCountWithStaticEntries(twice_nat_id, table, count): + """Get the twice nat id count with static entries""" + + config_db = ConfigDBConnector() + config_db.connect() + + static_dict = config_db.get_table(table) + twice_id_count = count + + if not static_dict: + return twice_id_count + + for key,values in static_dict.items(): + twice_id = 0 + + if "twice_nat_id" in values: + twice_id = int(values["twice_nat_id"]) + else: + continue + + if twice_id == twice_nat_id: + twice_id_count += 1 + + return twice_id_count + +def getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, dynamic_key): + """Get the twice nat id count with dynamic binding""" + + config_db = ConfigDBConnector() + config_db.connect() + + nat_binding_dict = config_db.get_table('NAT_BINDINGS') + twice_id_count = count + + if not nat_binding_dict: + return twice_id_count + + for key, values in nat_binding_dict.items(): + nat_pool_data = config_db.get_entry('NAT_POOL',values["nat_pool"]) + twice_id = 0 + + if dynamic_key is not None: + if dynamic_key == key: + continue + + if not nat_pool_data: + continue + + if "twice_nat_id" in values: + if values["twice_nat_id"] == "NULL": + continue + else: + twice_id = int(values["twice_nat_id"]) + else: + continue + + if twice_id == twice_nat_id: + twice_id_count += 1 + + return twice_id_count + +############### NAT Configuration ################## + +# +# 'nat' group ('config nat ...') +# +@click.group('nat') +def nat(): + """NAT-related configuration tasks""" + pass + +# +# 'nat add' group ('config nat add ...') +# +@nat.group('add') +def add(): + """Add NAT-related configutation tasks""" + pass + +# +# 'nat remove' group ('config nat remove ...') +# +@nat.group('remove') +def remove(): + """Remove NAT-related configutation tasks""" + pass + +# +# 'nat set' group ('config nat set ...') +# +@nat.group('set') +def set(): + """Set NAT-related timeout configutation tasks""" + pass + +# +# 'nat reset' group ('config nat reset ...') +# +@nat.group('reset') +def reset(): + """Reset NAT-related timeout configutation tasks""" + pass + +# +# 'nat add static' group ('config nat add static ...') +# +@add.group('static') +def static(): + """Add Static related configutation""" + pass + +# +# 'nat add static basic' command ('config nat add static basic ') +# +@static.command('basic') +@click.pass_context +@click.argument('global_ip', metavar='', required=True) +@click.argument('local_ip', metavar='', required=True) +@click.option('-nat_type', metavar='', required=False, type=click.Choice(["snat", "dnat"]), help="Set nat type") +@click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") +def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): + """Add Static NAT-related configutation""" + + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) + + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + table = "STATIC_NAT" + key = global_ip + dataKey1 = 'local_ip' + dataKey2 = 'nat_type' + dataKey3 = 'twice_nat_id' + + data = config_db.get_entry(table, key) + if data: + if data[dataKey1] == local_ip: + click.echo("Trying to add static nat entry, which is already present.") + entryFound = True + + if nat_type == 'snat': + ipAddress = local_ip + else: + ipAddress = global_ip + + if isIpOverlappingWithAnyStaticEntry(ipAddress, 'STATIC_NAPT') is True: + ctx.fail("Given entry is overlapping with existing NAPT entry !!") + + if isOverlappingWithAnyDynamicEntry(ipAddress) is True: + ctx.fail("Given entry is overlapping with existing Dynamic entry !!") + + if entryFound is False: + counters_db = SonicV2Connector(host="127.0.0.1") + counters_db.connect(counters_db.COUNTERS_DB) + snat_entries = 0 + max_entries = 0 + exists = counters_db.exists(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if exists: + counter_entry = counters_db.get_all(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if 'SNAT_ENTRIES' in counter_entry: + snat_entries = counter_entry['SNAT_ENTRIES'] + if 'MAX_NAT_ENTRIES' in counter_entry: + max_entries = counter_entry['MAX_NAT_ENTRIES'] + + if int(snat_entries) >= int(max_entries): + click.echo("Max limit is reached for NAT entries, skipping adding the entry.") + entryFound = True + + if entryFound is False: + count = 0 + if twice_nat_id is not None: + count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, table, count) + count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, None) + if count > 1: + ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") + + if nat_type is not None and twice_nat_id is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type, dataKey3: twice_nat_id}) + elif nat_type is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: nat_type}) + elif twice_nat_id is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey3: twice_nat_id}) + else: + config_db.set_entry(table, key, {dataKey1: local_ip}) + +# +# 'nat add static tcp' command ('config nat add static tcp ') +# +@static.command('tcp') +@click.pass_context +@click.argument('global_ip', metavar='', required=True) +@click.argument('global_port', metavar='', type=click.IntRange(1, 65535), required=True) +@click.argument('local_ip', metavar='', required=True) +@click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) +@click.option('-nat_type', metavar='', required=False, type=click.Choice(["snat", "dnat"]), help="Set nat type") +@click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") +def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_nat_id): + """Add Static TCP Protocol NAPT-related configutation""" + + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) + + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + table = "STATIC_NAPT" + key = "{}|TCP|{}".format(global_ip, global_port) + dataKey1 = 'local_ip' + dataKey2 = 'local_port' + dataKey3 = 'nat_type' + dataKey4 = 'twice_nat_id' + + data = config_db.get_entry(table, key) + if data: + if data[dataKey1] == local_ip and data[dataKey2] == str(local_port): + click.echo("Trying to add static napt entry, which is already present.") + entryFound = True + + if nat_type == 'snat': + ipAddress = local_ip + else: + ipAddress = global_ip + + if isIpOverlappingWithAnyStaticEntry(ipAddress, 'STATIC_NAT') is True: + ctx.fail("Given entry is overlapping with existing NAT entry !!") + + if entryFound is False: + counters_db = SonicV2Connector(host="127.0.0.1") + counters_db.connect(counters_db.COUNTERS_DB) + snat_entries = 0 + max_entries = 0 + exists = counters_db.exists(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if exists: + counter_entry = counters_db.get_all(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if 'SNAT_ENTRIES' in counter_entry: + snat_entries = counter_entry['SNAT_ENTRIES'] + if 'MAX_NAT_ENTRIES' in counter_entry: + max_entries = counter_entry['MAX_NAT_ENTRIES'] + + if int(snat_entries) >= int(max_entries): + click.echo("Max limit is reached for NAT entries, skipping adding the entry.") + entryFound = True + + if entryFound is False: + count = 0 + if twice_nat_id is not None: + count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, table, count) + count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, None) + if count > 1: + ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") + + if nat_type is not None and twice_nat_id is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + elif nat_type is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + elif twice_nat_id is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + else: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + +# +# 'nat add static udp' command ('config nat add static udp ') +# +@static.command('udp') +@click.pass_context +@click.argument('global_ip', metavar='', required=True) +@click.argument('global_port', metavar='', type=click.IntRange(1, 65535), required=True) +@click.argument('local_ip', metavar='', required=True) +@click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) +@click.option('-nat_type', metavar='', required=False, type=click.Choice(["snat", "dnat"]), help="Set nat type") +@click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") +def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_nat_id): + """Add Static UDP Protocol NAPT-related configutation""" + + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) + + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + table = "STATIC_NAPT" + key = "{}|UDP|{}".format(global_ip, global_port) + dataKey1 = 'local_ip' + dataKey2 = 'local_port' + dataKey3 = 'nat_type' + dataKey4 = 'twice_nat_id' + + data = config_db.get_entry(table, key) + if data: + if data[dataKey1] == local_ip and data[dataKey2] == str(local_port): + click.echo("Trying to add static napt entry, which is already present.") + entryFound = True + + if nat_type == 'snat': + ipAddress = local_ip + else: + ipAddress = global_ip + + if isIpOverlappingWithAnyStaticEntry(ipAddress, 'STATIC_NAT') is True: + ctx.fail("Given entry is overlapping with existing NAT entry !!") + + if entryFound is False: + counters_db = SonicV2Connector(host="127.0.0.1") + counters_db.connect(counters_db.COUNTERS_DB) + snat_entries = 0 + max_entries = 0 + exists = counters_db.exists(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if exists: + counter_entry = counters_db.get_all(counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if 'SNAT_ENTRIES' in counter_entry: + snat_entries = counter_entry['SNAT_ENTRIES'] + if 'MAX_NAT_ENTRIES' in counter_entry: + max_entries = counter_entry['MAX_NAT_ENTRIES'] + + if int(snat_entries) >= int(max_entries): + click.echo("Max limit is reached for NAT entries, skipping adding the entry.") + entryFound = True + + if entryFound is False: + count = 0 + if twice_nat_id is not None: + count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, table, count) + count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, None) + if count > 1: + ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") + + if nat_type is not None and twice_nat_id is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type, dataKey4: twice_nat_id}) + elif nat_type is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey3: nat_type}) + elif twice_nat_id is not None: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port, dataKey4: twice_nat_id}) + else: + config_db.set_entry(table, key, {dataKey1: local_ip, dataKey2: local_port}) + +# +# 'nat remove static' group ('config nat remove static ...') +# +@remove.group('static') +def static(): + """Remove Static related configutation""" + pass + +# +# 'nat remove static basic' command ('config nat remove static basic ') +# +@static.command('basic') +@click.pass_context +@click.argument('global_ip', metavar='', required=True) +@click.argument('local_ip', metavar='', required=True) +def remove_basic(ctx, global_ip, local_ip): + """Remove Static NAT-related configutation""" + + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) + + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + table = 'STATIC_NAT' + key = global_ip + dataKey = 'local_ip' + + data = config_db.get_entry(table, key) + if data: + if data[dataKey] == local_ip: + config_db.set_entry(table, key, None) + entryFound = True + + if entryFound is False: + click.echo("Trying to delete static nat entry, which is not present.") + + +# +# 'nat remove static tcp' command ('config nat remove static tcp ') +# +@static.command('tcp') +@click.pass_context +@click.argument('global_ip', metavar='', required=True) +@click.argument('global_port', metavar='', type=click.IntRange(1, 65535), required=True) +@click.argument('local_ip', metavar='', required=True) +@click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) +def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): + """Remove Static TCP Protocol NAPT-related configutation""" + + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) + + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + table = "STATIC_NAPT" + key = "{}|TCP|{}".format(global_ip, global_port) + dataKey1 = 'local_ip' + dataKey2 = 'local_port' + + data = config_db.get_entry(table, key) + if data: + if data['local_ip'] == local_ip and data['local_port'] == str(local_port): + config_db.set_entry(table, key, None) + entryFound = True + + if entryFound is False: + click.echo("Trying to delete static napt entry, which is not present.") + +# +# 'nat remove static udp' command ('config nat remove static udp ') +# +@static.command('udp') +@click.pass_context +@click.argument('global_ip', metavar='', required=True) +@click.argument('global_port', metavar='', type=click.IntRange(1, 65535), required=True) +@click.argument('local_ip', metavar='', required=True) +@click.argument('local_port', metavar='', type=click.IntRange(1, 65535), required=True) +def remove_udp(ctx, global_ip, global_port, local_ip, local_port): + """Remove Static UDP Protocol NAPT-related configutation""" + + # Verify the ip address format + if is_valid_ipv4_address(local_ip) is False: + ctx.fail("Given local ip address {} is invalid. Please enter a valid local ip address !!".format(local_ip)) + + if is_valid_ipv4_address(global_ip) is False: + ctx.fail("Given global ip address {} is invalid. Please enter a valid global ip address !!".format(global_ip)) + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + table = "STATIC_NAPT" + key = "{}|UDP|{}".format(global_ip, global_port) + dataKey1 = 'local_ip' + dataKey2 = 'local_port' + + data = config_db.get_entry(table, key) + if data: + if data[dataKey1] == local_ip and data[dataKey2] == str(local_port): + config_db.set_entry(table, key, None) + entryFound = True + + if entryFound is False: + click.echo("Trying to delete static napt entry, which is not present.") + +# +# 'nat remove static all' command ('config nat remove static all') +# +@static.command('all') +@click.pass_context +def remove_static_all(ctx): + """Remove all Static related configutation""" + + config_db = ConfigDBConnector() + config_db.connect() + + tables = ['STATIC_NAT', 'STATIC_NAPT'] + + for table_name in tables: + table_dict = config_db.get_table(table_name) + if table_dict: + for table_key_name in table_dict.keys(): + config_db.set_entry(table_name, table_key_name, None) + +# +# 'nat add pool' command ('config nat add pool ') +# +@add.command('pool') +@click.pass_context +@click.argument('pool_name', metavar='', required=True) +@click.argument('global_ip_range', metavar='', required=True) +@click.argument('global_port_range', metavar='', required=False) +def add_pool(ctx, pool_name, global_ip_range, global_port_range): + """Add Pool for Dynamic NAT-related configutation""" + + if len(pool_name) > 32: + ctx.fail("Invalid pool name. Maximum allowed pool name is 32 characters !!") + + # Verify the ip address range and format + ip_address = global_ip_range.split("-") + if len(ip_address) > 2: + ctx.fail("Given ip address range {} is invalid. Please enter a valid ip address range !!".format(global_ip_range)) + elif len(ip_address) == 2: + if is_valid_ipv4_address(ip_address[0]) is False: + ctx.fail("Given ip address {} is not valid global address. Please enter a valid ip address !!".format(ip_address[0])) + + if is_valid_ipv4_address(ip_address[1]) is False: + ctx.fail("Given ip address {} is not valid global address. Please enter a valid ip address !!".format(ip_address[1])) + + ipLowLimit = int(ipaddress.IPv4Address(ip_address[0])) + ipHighLimit = int(ipaddress.IPv4Address(ip_address[1])) + if ipLowLimit >= ipHighLimit: + ctx.fail("Given ip address range {} is invalid. Please enter a valid ip address range !!".format(global_ip_range)) + else: + if is_valid_ipv4_address(ip_address[0]) is False: + ctx.fail("Given ip address {} is not valid global address. Please enter a valid ip address !!".format(ip_address[0])) + ipLowLimit = int(ipaddress.IPv4Address(ip_address[0])) + ipHighLimit = int(ipaddress.IPv4Address(ip_address[0])) + + # Verify the port address range and format + if global_port_range is not None: + port_address = global_port_range.split("-") + + if len(port_address) > 2: + ctx.fail("Given port address range {} is invalid. Please enter a valid port address range !!".format(global_port_range)) + elif len(port_address) == 2: + if is_valid_port_address(port_address[0]) is False: + ctx.fail("Given port value {} is invalid. Please enter a valid port value !!".format(port_address[0])) + + if is_valid_port_address(port_address[1]) is False: + ctx.fail("Given port value {} is invalid. Please enter a valid port value !!".format(port_address[1])) + + portLowLimit = int(port_address[0]) + portHighLimit = int(port_address[1]) + if portLowLimit >= portHighLimit: + ctx.fail("Given port address range {} is invalid. Please enter a valid port address range !!".format(global_port_range)) + else: + if is_valid_port_address(port_address[0]) is False: + ctx.fail("Given port value {} is invalid. Please enter a valid port value !!".format(port_address[0])) + portLowLimit = int(port_address[0]) + portHighLimit = int(port_address[0]) + else: + global_port_range = "NULL" + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + table = "NAT_POOL" + key = pool_name + dataKey1 = 'nat_ip' + dataKey2 = 'nat_port' + + data = config_db.get_entry(table, key) + if data: + if data[dataKey1] == global_ip_range and data[dataKey2] == global_port_range: + click.echo("Trying to add pool, which is already present.") + entryFound = True + + pool_dict = config_db.get_table(table) + if len(pool_dict) == 16: + click.echo("Failed to add pool, as already reached maximum pool limit 16.") + entryFound = True + + # Verify the Ip address is overlapping with any Static NAT entry + if entryFound == False: + static_dict = config_db.get_table('STATIC_NAT') + if static_dict: + for staticKey,staticValues in static_dict.items(): + global_ip = "---" + local_ip = "---" + nat_type = "dnat" + + if isinstance(staticKey, unicode) is True: + global_ip = staticKey + else: + continue + + local_ip = staticValues["local_ip"] + + if "nat_type" in staticValues: + nat_type = staticValues["nat_type"] + + if nat_type == "snat": + global_ip = local_ip + + ipAddress = int(ipaddress.IPv4Address(unicode(global_ip))) + if (ipAddress >= ipLowLimit and ipAddress <= ipHighLimit): + ctx.fail("Given Ip address entry is overlapping with existing Static NAT entry !!") + + if entryFound == False: + config_db.set_entry(table, key, {dataKey1: global_ip_range, dataKey2 : global_port_range}) + +# +# 'nat add binding' command ('config nat add binding ') +# +@add.command('binding') +@click.pass_context +@click.argument('binding_name', metavar='', required=True) +@click.argument('pool_name', metavar='', required=True) +@click.argument('acl_name', metavar='', required=False) +@click.option('-nat_type', metavar='', required=False, type=click.Choice(["snat", "dnat"]), help="Set nat type") +@click.option('-twice_nat_id', metavar='', required=False, type=click.IntRange(1, 9999), help="Set the twice nat id") +def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id): + """Add Binding for Dynamic NAT-related configutation""" + + entryFound = False + table = 'NAT_BINDINGS' + key = binding_name + dataKey1 = 'access_list' + dataKey2 = 'nat_pool' + dataKey3 = 'nat_type' + dataKey4 = 'twice_nat_id' + + if acl_name is None: + acl_name = "" + + if len(binding_name) > 32: + ctx.fail("Invalid binding name. Maximum allowed binding name is 32 characters !!") + + config_db = ConfigDBConnector() + config_db.connect() + + data = config_db.get_entry(table, key) + if data: + if data[dataKey1] == acl_name and data[dataKey2] == pool_name: + click.echo("Trying to add binding, which is already present.") + entryFound = True + + binding_dict = config_db.get_table(table) + if len(binding_dict) == 16: + click.echo("Failed to add binding, as already reached maximum binding limit 16.") + entryFound = True + + if nat_type is not None: + if nat_type == "dnat": + click.echo("Ignored, DNAT is not yet suported for Binding ") + entryFound = True + else: + nat_type = "snat" + + if twice_nat_id is None: + twice_nat_id = "NULL" + + if entryFound is False: + count = 0 + if twice_nat_id is not None: + count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAT', count) + count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAPT', count) + count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, key) + if count > 1: + ctx.fail("Same Twice nat id is not allowed for more than 2 entries!!") + + config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id}) + +# +# 'nat remove pool' command ('config nat remove pool ') +# +@remove.command('pool') +@click.pass_context +@click.argument('pool_name', metavar='', required=True) +def remove_pool(ctx, pool_name): + """Remove Pool for Dynamic NAT-related configutation""" + + entryFound = False + table = "NAT_POOL" + key = pool_name + + if len(pool_name) > 32: + ctx.fail("Invalid pool name. Maximum allowed pool name is 32 characters !!") + + config_db = ConfigDBConnector() + config_db.connect() + + data = config_db.get_entry(table, key) + if not data: + click.echo("Trying to delete pool, which is not present.") + entryFound = True + + binding_dict = config_db.get_table('NAT_BINDINGS') + if binding_dict and entryFound == False: + for binding_name, binding_values in binding_dict.items(): + if binding_values['nat_pool'] == pool_name: + click.echo("Pool is not removed, as it is mapped to Binding {}, remove the pool binding first !!".format(binding_name)) + entryFound = True + break + + if entryFound == False: + config_db.set_entry(table, key, None) + +# +# 'nat remove pools' command ('config nat remove pools') +# +@remove.command('pools') +@click.pass_context +def remove_pools(ctx): + """Remove all Pools for Dynamic configutation""" + + config_db = ConfigDBConnector() + config_db.connect() + + entryFound = False + pool_table_name = 'NAT_POOL' + binding_table_name = 'NAT_BINDINGS' + binding_dict = config_db.get_table(binding_table_name) + pool_dict = config_db.get_table(pool_table_name) + if pool_dict: + for pool_key_name in pool_dict.keys(): + entryFound = False + for binding_name, binding_values in binding_dict.items(): + if binding_values['nat_pool'] == pool_key_name: + click.echo("Pool {} is not removed, as it is mapped to Binding {}, remove the pool binding first !!".format(pool_key_name,binding_name)) + entryFound = True + break + + if entryFound == False: + config_db.set_entry(pool_table_name, pool_key_name, None) + +# +# 'nat remove binding' command ('config nat remove binding ') +# +@remove.command('binding') +@click.pass_context +@click.argument('binding_name', metavar='', required=True) +def remove_binding(ctx, binding_name): + """Remove Binding for Dynamic NAT-related configutation""" + + entryFound = False + table = 'NAT_BINDINGS' + key = binding_name + + if len(binding_name) > 32: + ctx.fail("Invalid binding name. Maximum allowed binding name is 32 characters !!") + + config_db = ConfigDBConnector() + config_db.connect() + + data = config_db.get_entry(table, key) + if not data: + click.echo("Trying to delete binding, which is not present.") + entryFound = True + + if entryFound == False: + config_db.set_entry(table, key, None) + +# +# 'nat remove bindings' command ('config nat remove bindings') +# +@remove.command('bindings') +@click.pass_context +def remove_bindings(ctx): + """Remove all Bindings for Dynamic configutation""" + + config_db = ConfigDBConnector() + config_db.connect() + + binding_table_name = 'NAT_BINDINGS' + binding_dict = config_db.get_table(binding_table_name) + if binding_dict: + for binding_key_name in binding_dict.keys(): + config_db.set_entry(binding_table_name, binding_key_name, None) + +# +# 'nat add interface' command ('config nat add interface -nat_zone ') +# +@add.command('interface') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.option('-nat_zone', metavar='', required=True, type=click.IntRange(0, 3), help="Set nat zone") +def add_interface(ctx, interface_name, nat_zone): + """Add interface related nat configuration""" + + config_db = ConfigDBConnector() + config_db.connect() + + if nat_interface_name_is_valid(interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + if interface_name.startswith("Ethernet"): + interface_table_type = "INTERFACE" + elif interface_name.startswith("PortChannel"): + interface_table_type = "PORTCHANNEL_INTERFACE" + elif interface_name.startswith("Vlan"): + interface_table_type = "VLAN_INTERFACE" + elif interface_name.startswith("Loopback"): + interface_table_type = "LOOPBACK_INTERFACE" + + interface_table_dict = config_db.get_table(interface_table_type) + + if not interface_table_dict or interface_name not in interface_table_dict: + ctx.fail("Interface table is not present. Please configure ip-address on {} and apply the nat zone !!".format(interface_name)) + + config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": nat_zone}) + +# +# 'nat remove interface' command ('config nat remove interface ') +# +@remove.command('interface') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +def remove_interface(ctx, interface_name): + """Remove interface related NAT configuration""" + config_db = ConfigDBConnector() + config_db.connect() + + if nat_interface_name_is_valid(interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + if interface_name.startswith("Ethernet"): + interface_table_type = "INTERFACE" + elif interface_name.startswith("PortChannel"): + interface_table_type = "PORTCHANNEL_INTERFACE" + elif interface_name.startswith("Vlan"): + interface_table_type = "VLAN_INTERFACE" + elif interface_name.startswith("Loopback"): + interface_table_type = "LOOPBACK_INTERFACE" + + interface_table_dict = config_db.get_table(interface_table_type) + + if not interface_table_dict or interface_name not in interface_table_dict: + ctx.fail("Interface table is not present. Ignoring the nat zone configuration") + + config_db.mod_entry(interface_table_type, interface_name, {"nat_zone": "0"}) + +# +# 'nat remove interfaces' command ('config nat remove interfaces') +# +@remove.command('interfaces') +@click.pass_context +def remove_interfaces(ctx): + """Remove all interface related NAT configuration""" + config_db = ConfigDBConnector() + config_db.connect() + + tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] + nat_config = {"nat_zone": "0"} + + for table_name in tables: + table_dict = config_db.get_table(table_name) + if table_dict: + for table_key_name in table_dict.keys(): + if isinstance(table_key_name, unicode) is False: + continue + + config_db.set_entry(table_name, table_key_name, nat_config) + +# +# 'nat feature' group ('config nat feature ') +# +@nat.group('feature') +def feature(): + """Enable or Disable the NAT feature""" + pass + +# +# 'nat feature enable' command ('config nat feature enable>') +# +@feature.command('enable') +@click.pass_context +def enable(ctx): + """Enbale the NAT feature """ + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "enabled"}) + +# +# 'nat feature disable' command ('config nat feature disable>') +# +@feature.command('disable') +@click.pass_context +def disable(ctx): + """Disable the NAT feature """ + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("NAT_GLOBAL", "Values", {"admin_mode": "disabled"}) + +# +# 'nat set timeout' command ('config nat set timeout ') +# +@set.command('timeout') +@click.pass_context +@click.argument('seconds', metavar='', type=click.IntRange(300, 432000), required=True) +def timeout(ctx, seconds): + """Set NAT timeout configuration""" + config_db = ConfigDBConnector() + config_db.connect() + + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + +# +# 'nat set tcp-timeout' command ('config nat set tcp-timeout ') +# +@set.command('tcp-timeout') +@click.pass_context +@click.argument('seconds', metavar='', type=click.IntRange(300, 432000), required=True) +def tcp_timeout(ctx, seconds): + """Set NAT TCP timeout configuration""" + config_db = ConfigDBConnector() + config_db.connect() + + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + +# +# 'nat set udp-timeout' command ('config nat set udp-timeout ') +# +@set.command('udp-timeout') +@click.pass_context +@click.argument('seconds', metavar='', type=click.IntRange(120, 600), required=True) +def udp_timeout(ctx, seconds): + """Set NAT UDP timeout configuration""" + config_db = ConfigDBConnector() + config_db.connect() + + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + +# +# 'nat reset timeout' command ('config nat reset timeout') +# +@reset.command('timeout') +@click.pass_context +def timeout(ctx): + """Reset NAT timeout configuration to default value (600 seconds)""" + config_db = ConfigDBConnector() + config_db.connect() + seconds = 600 + + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_timeout": seconds}) + +# +# 'nat reset tcp-timeout' command ('config nat reset tcp-timeout') +# +@reset.command('tcp-timeout') +@click.pass_context +def tcp_timeout(ctx): + """Reset NAT TCP timeout configuration to default value (86400 seconds)""" + config_db = ConfigDBConnector() + config_db.connect() + seconds = 86400 + + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_tcp_timeout": seconds}) + +# +# 'nat reset udp-timeout' command ('config nat reset udp-timeout') +# +@reset.command('udp-timeout') +@click.pass_context +def udp_timeout(ctx): + """Reset NAT UDP timeout configuration to default value (300 seconds)""" + config_db = ConfigDBConnector() + config_db.connect() + seconds = 300 + + config_db.mod_entry("NAT_GLOBAL", "Values", {"nat_udp_timeout": seconds}) + +if __name__ == "__main__": + nat() + diff --git a/data/etc/bash_completion.d/pddf_fanutil b/data/etc/bash_completion.d/pddf_fanutil new file mode 100644 index 0000000000..408c0f4cd4 --- /dev/null +++ b/data/etc/bash_completion.d/pddf_fanutil @@ -0,0 +1,8 @@ +_pddf_fanutil_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _PDDF_FANUTIL_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _pddf_fanutil_completion -o default pddf_fanutil; diff --git a/data/etc/bash_completion.d/pddf_ledutil b/data/etc/bash_completion.d/pddf_ledutil new file mode 100644 index 0000000000..f39ceba823 --- /dev/null +++ b/data/etc/bash_completion.d/pddf_ledutil @@ -0,0 +1,8 @@ +_pddf_ledutil_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _PDDF_LEDUTIL_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _pddf_ledutil_completion -o default pddf_ledutil; diff --git a/data/etc/bash_completion.d/pddf_psuutil b/data/etc/bash_completion.d/pddf_psuutil new file mode 100644 index 0000000000..afc6ab02d5 --- /dev/null +++ b/data/etc/bash_completion.d/pddf_psuutil @@ -0,0 +1,8 @@ +_pddf_psuutil_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _PDDF_PSUUTIL_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _pddf_psuutil_completion -o default pddf_psuutil; diff --git a/data/etc/bash_completion.d/pddf_thermalutil b/data/etc/bash_completion.d/pddf_thermalutil new file mode 100644 index 0000000000..c536307594 --- /dev/null +++ b/data/etc/bash_completion.d/pddf_thermalutil @@ -0,0 +1,8 @@ +_pddf_thermalutil_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _PDDF_THERMALUTIL_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _pddf_thermalutil_completion -o default pddf_thermalutil; diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index ccba9c6112..0244f82a03 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -57,9 +57,16 @@ * [Reloading Configuration](#reloading-configuration) * [Loading Management Configuration](#loading-management-configuration) * [Saving Configuration to a File for Persistence](saving-configuration-to-a-file-for-persistence) +* [Management VRF](#Management-VRF) + * [Management VRF Show commands](#management-vrf-show-commands) + * [Management VRF Config commands](#management-vrf-config-commands) * [Mirroring](#mirroring) * [Mirroring Show commands](#mirroring-show-commands) * [Mirroring Config commands](#mirroring-config-commands) +* [NAT](#nat) + * [NAT Show commands](#nat-show-commands) + * [NAT Config commands](#nat-config-commands) + * [NAT Clear commands](#nat-clear-commands) * [NTP](#ntp) * [NTP show commands](#ntp-show-commands) * [NTP config commands](#ntp-config-commands) @@ -183,16 +190,16 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#basic- The management interface (eth0) in SONiC is configured (by default) to use DHCP client to get the IP address from the DHCP server. Connect the management interface to the same network in which your DHCP server is connected and get the IP address from DHCP server. The IP address received from DHCP server can be verified using the `/sbin/ifconfig eth0` Linux command. -SONiC does not provide a CLI to configure the static IP for the management interface. There are few alternate ways by which a static IP address can be configured for the management interface. - 1. Use the `/sbin/ifconfig eth0 ...` Linux command. NOTE: This configuration **will not** be preserved across reboots. +SONiC provides a CLI to configure the static IP for the management interface. There are few ways by which a static IP address can be configured for the management interface. + 1. Use the `config interface ip add eth0` command. - Example: ``` - admin@sonic:~$ /sbin/ifconfig eth0 10.11.12.13/24 + admin@sonic:~$ sudo config interface ip add eth0 20.11.12.13/24 20.11.12.254 ``` 2. Use config_db.json and configure the MGMT_INTERFACE key with the appropriate values. Refer [here](https://github.com/Azure/SONiC/wiki/Configuration#Management-Interface) 3. Use minigraph.xml and configure "ManagementIPInterfaces" tag inside "DpgDesc" tag as given at the [page](https://github.com/Azure/SONiC/wiki/Configuration-with-Minigraph-(~Sep-2017)) -Once the IP address is configured, the same can be verified using "/sbin/ifconfig eth0" linux command. +Once the IP address is configured, the same can be verified using either `show management_interface address` command or the `/sbin/ifconfig eth0` linux command. Users can SSH login to this management interface IP address from their management network. - Example: @@ -247,6 +254,7 @@ This command lists all the possible configuration commands at the top level. load_mgmt_config Reconfigure hostname and mgmt interface based... load_minigraph Reconfigure based on minigraph. mirror_session + nat NAT-related configuration tasks platform Platform-related configuration tasks portchannel qos @@ -295,6 +303,7 @@ This command displays the full list of show commands available in the software; mac Show MAC (FDB) entries mirror_session Show existing everflow sessions mmu Show mmu configuration + nat Show details of the nat ndp Show IPv6 Neighbour table ntp Show NTP information pfc Show details of the priority-flow-control... @@ -376,6 +385,8 @@ This command displays relevant information as the SONiC and Linux kernel version docker-syncd-brcm latest 434240daff6e 362MB docker-orchagent-brcm HEAD.32-21ea29a e4f9c4631025 287MB docker-orchagent-brcm latest e4f9c4631025 287MB + docker-nat HEAD.32-21ea29a 46075edc1c69 305MB + docker-nat latest 46075edc1c69 305MB docker-lldp-sv2 HEAD.32-21ea29a 9681bbfea3ac 275MB docker-lldp-sv2 latest 9681bbfea3ac 275MB docker-dhcp-relay HEAD.32-21ea29a 2db34c7bc6f4 257MB @@ -1351,6 +1362,26 @@ This command displays the summary of all IPv4 & IPv6 bgp neighbors that are conf show ip bgp summary ``` +- Example: + ``` + admin@sonic-z9264f-9251:~# show ip bgp summary + + IPv4 Unicast Summary: + BGP router identifier 10.1.0.32, local AS number 65100 vrf-id 0 + BGP table version 6465 + RIB entries 12807, using 2001 KiB of memory + Peers 4, using 83 KiB of memory + Peer groups 2, using 128 bytes of memory + + Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName + 10.0.0.57 4 64600 3995 4001 0 0 0 00:39:32 6400 Lab-T1-01 + 10.0.0.59 4 64600 3995 3998 0 0 0 00:39:32 6400 Lab-T1-02 + 10.0.0.61 4 64600 3995 4001 0 0 0 00:39:32 6400 Lab-T1-03 + 10.0.0.63 4 64600 3995 3998 0 0 0 00:39:32 6400 NotAvailable + + Total number of neighbors 4 + ``` + - Example: ``` admin@sonic-z9264f-9251:~# show bgp summary @@ -1512,11 +1543,11 @@ This command displays the summary of all IPv6 bgp neighbors that are configured Peers 4, using 83 KiB of memory Peer groups 2, using 128 bytes of memory - Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd - fc00::72 4 64600 3995 5208 0 0 0 00:39:30 6400 - fc00::76 4 64600 3994 5208 0 0 0 00:39:30 6400 - fc00::7a 4 64600 3993 5208 0 0 0 00:39:30 6400 - fc00::7e 4 64600 3993 5208 0 0 0 00:39:30 6400 + Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName + fc00::72 4 64600 3995 5208 0 0 0 00:39:30 6400 Lab-T1-01 + fc00::76 4 64600 3994 5208 0 0 0 00:39:30 6400 Lab-T1-02 + fc00::7a 4 64600 3993 5208 0 0 0 00:39:30 6400 Lab-T1-03 + fc00::7e 4 64600 3993 5208 0 0 0 00:39:30 6400 Lab-T1-04 Total number of neighbors 4 ``` @@ -2048,13 +2079,13 @@ Subsequent pages explain each of these commands in detail. **show interfaces counters** -This show command displays packet counters for all interfaces since the last time the counters were cleared. There is no facility to display counters for one specific interface. Optional argument "-a" does not have any significance in this command. -Optional argument "-c" can be used to clear the counters for all interfaces. +This show command displays packet counters for all interfaces since the last time the counters were cleared. To display l3 counters "rif" subcommand can be used. There is no facility to display counters for one specific l2 interface. For l3 interfaces a single interface output mode is present. Optional argument "-a" provides two additional columns - RX-PPS and TX_PPS. Optional argument "-p" specify a period (in seconds) with which to gather counters over. - Usage: ``` show interfaces counters [-a|--printall] [-p|--period ] + show interfaces counters rif [-p|--period ] ``` - Example: @@ -2071,10 +2102,49 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte Ethernet24 U 33,543,533,441 36.59 MB/s 0.71% 0 1,613 0 43,066,076,370 49.92 MB/s 0.97% 0 0 0 ``` +The "rif" subcommand is used to display l3 interface counters. Layer 3 interfaces include router interfaces, portchannels and vlan interfaces. + +- Example: + +``` + admin@sonic:~$ show interfaces counters rif + IFACE RX_OK RX_BPS RX_PPS RX_ERR TX_OK TX_BPS TX_PPS TX_ERR +--------------- ------- ---------- -------- -------- ------- -------- -------- -------- +PortChannel0001 62,668 107.81 B/s 1.34/s 3 6 0.02 B/s 0.00/s 0 +PortChannel0002 62,645 107.77 B/s 1.34/s 3 2 0.01 B/s 0.00/s 0 +PortChannel0003 62,481 107.56 B/s 1.34/s 3 3 0.01 B/s 0.00/s 0 +PortChannel0004 62,732 107.88 B/s 1.34/s 2 3 0.01 B/s 0.00/s 0 + Vlan1000 0 0.00 B/s 0.00/s 0 0 0.00 B/s 0.00/s 0 +``` + + +Optionally, you can specify a layer 3 interface name to display the counters in single interface mode. + +- Example: + +``` + admin@sonic:~$ show interfaces counters rif PortChannel0001 + PortChannel0001 + --------------- + + RX: + 3269 packets + 778494 bytesq + 3 error packets + 292 error bytes + TX: + 0 packets + 0 bytes + 0 error packets + 0 error bytes +``` + + Optionally, you can specify a period (in seconds) with which to gather counters over. Note that this function will take `` seconds to execute. - Example: - ``` + +``` admin@sonic:~$ show interfaces counters -p 5 IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL TX_ERR TX_DRP TX_OVR ----------- ------- ------- ----------- --------- -------- -------- -------- ------- ----------- --------- -------- -------- -------- @@ -2085,13 +2155,20 @@ Optionally, you can specify a period (in seconds) with which to gather counters Ethernet16 U 377 32.64 KB/s 0.00% 0 0 0 214 18.01 KB/s 0.00% 0 0 0 Ethernet20 U 284 36.81 KB/s 0.00% 0 0 0 138 8758.25 B/s 0.00% 0 0 0 Ethernet24 U 173 16.09 KB/s 0.00% 0 0 0 169 11.39 KB/s 0.00% 0 0 0 - ``` +``` - NOTE: Interface counters can be cleared by the user with the following command: + ``` root@sonic:~# sonic-clear counters ``` +- NOTE: Layer 3 interface counters can be cleared by the user with the following command: + + ``` + root@sonic:~# sonic-clear rifcounters + ``` + **show interfaces description** This command displays the key fields of the interfaces such as Operational Status, Administrative Status, Alias and Description. @@ -2224,12 +2301,13 @@ The syntax for all such interface_subcommands are given below under each command NOTE: In older versions of SONiC until 201811 release, the command syntax was `config interface interface_subcommand` -**config interface ip add (Versions >= 201904)** +**config interface ip add [default_gw] (Versions >= 201904)** **config interface ip add (Versions <= 201811)** This command is used for adding the IP address for an interface. -IP address for either physical interface or for portchannel or for VLAN interface can be configured using this command. +IP address for either physical interface or for portchannel or for VLAN interface can be configured using this command. +While configuring the IP address for the management interface "eth0", users can provide the default gateway IP address as an optional parameter from release 201911. - Usage: @@ -2248,6 +2326,7 @@ IP address for either physical interface or for portchannel or for VLAN interfac *Versions >= 201904* ``` admin@sonic:~$ sudo config interface ip add Ethernet63 10.11.12.13/24 + admin@sonic:~$ sudo config interface ip add eth0 20.11.12.13/24 20.11.12.254 ``` *Versions <= 201811* ``` @@ -2288,6 +2367,7 @@ VLAN interface names take the form of `vlan`. E.g., VLAN 100 will be na *Versions >= 201904* ``` admin@sonic:~$ sudo config interface ip remove Ethernet63 10.11.12.13/24 + admin@sonic:~$ sudo config interface ip remove eth0 20.11.12.13/24 ``` *Versions <= 201811* ``` @@ -2981,6 +3061,213 @@ Saved file can be transferred to remote machines for debugging. If users wants t Go Back To [Beginning of the document](#) or [Beginning of this section](#loading-reloading-and-saving-configuration) +## Management VRF + +### Management VRF Show commands + +**show mgmt-vrf** + +This command displays whether the management VRF is enabled or disabled. It also displays the details about the the links (eth0, mgmt, lo-m) that are related to management VRF. + +- Usage: + ``` + show mgmt-vrf + ``` + +- Example: + ``` + root@sonic:/etc/init.d# show mgmt-vrf + + ManagementVRF : Enabled + + Management VRF interfaces in Linux: + 348: mgmt: mtu 65536 qdisc noqueue state UP mode DEFAULT group default qlen 1000 + link/ether f2:2a:d9:bc:e8:f0 brd ff:ff:ff:ff:ff:ff + 2: eth0: mtu 1500 qdisc mq master mgmt state UP mode DEFAULT group default qlen 1000 + link/ether 4c:76:25:f4:f9:f3 brd ff:ff:ff:ff:ff:ff + 350: lo-m: mtu 1500 qdisc noqueue master mgmt state UNKNOWN mode DEFAULT group default qlen 1000 + link/ether b2:4c:c6:f3:e9:92 brd ff:ff:ff:ff:ff:ff + + NOTE: The management interface "eth0" shows the "master" as "mgmt" since it is part of management VRF. + ``` + +**show mgmt-vrf routes** + +This command displays the routes that are present in the routing table 5000 that is meant for management VRF. + +- Usage: + ``` + show mgmt-vrf routes + ``` + +- Example: + ``` + root@sonic:/etc/init.d# show mgmt-vrf routes + + Routes in Management VRF Routing Table: + default via 10.16.210.254 dev eth0 metric 201 + broadcast 10.16.210.0 dev eth0 proto kernel scope link src 10.16.210.75 + 10.16.210.0/24 dev eth0 proto kernel scope link src 10.16.210.75 + local 10.16.210.75 dev eth0 proto kernel scope host src 10.16.210.75 + broadcast 10.16.210.255 dev eth0 proto kernel scope link src 10.16.210.75 + broadcast 127.0.0.0 dev lo-m proto kernel scope link src 127.0.0.1 + 127.0.0.0/8 dev lo-m proto kernel scope link src 127.0.0.1 + local 127.0.0.1 dev lo-m proto kernel scope host src 127.0.0.1 + broadcast 127.255.255.255 dev lo-m proto kernel scope link src 127.0.0.1 + ``` + +**show management_interface address** + +This command displays the IP address(es) configured for the management interface "eth0" and the management network default gateway. + +- Usage: + ``` + show management_interface address + ``` + +- Example: + ``` + root@sonic:/etc/init.d# show management_interface address + Management IP address = 10.16.210.75/24 + Management NetWork Default Gateway = 10.16.210.254 + Management IP address = FC00:2::32/64 + Management Network Default Gateway = fc00:2::1 + ``` + +**show snmpagentaddress** + +This command displays the configured SNMP agent IP addresses. + +- Usage: + ``` + show snmpagentaddress + ``` + +- Example: + ``` + root@sonic-s6100-07:~# show snmpagentaddress + ListenIP ListenPort ListenVrf + ---------- ------------ ----------- + 1.2.3.4 787 mgmt + ``` + +**show snmptrap** + +This command displays the configured SNMP Trap server IP addresses. + +- Usage: + ``` + show snmptrap + ``` + +- Example: + ``` + root@sonic-s6100-07:~# show snmptrap + Version TrapReceiverIP Port VRF Community + --------- ---------------- ------ ----- ----------- + 2 31.31.31.31 456 mgmt public + ``` + +### Management VRF Config commands + +**config vrf add mgmt** + +This command enables the management VRF in the system. This command restarts the "interfaces-config" service which in turn regenerates the /etc/network/interfaces file and restarts the "networking" service. This creates a new interface and l3mdev CGROUP with the name as "mgmt" and enslaves the management interface "eth0" into this master interface "mgmt". Note that the VRFName "mgmt" (or "management") is reserved for management VRF. i.e. Data VRFs should not use these reserved VRF names. + +- Usage: + ``` + config vrf add mgmt + ``` + +- Example: + ``` + root@sonic-s6100-07:~# config vrf add mgmt + ``` + +**config vrf del mgmt** + +This command disables the management VRF in the system. This command restarts the "interfaces-config" service which in turn regenerates the /etc/network/interfaces file and restarts the "networking" service. This deletes the interface "mgmt" and deletes the l3mdev CGROUP named "mgmt" and puts back the management interface "eth0" into the default VRF. Note that the VRFName "mgmt" (or "management") is reserved for management VRF. i.e. Data VRFs should not use these reserved VRF names. + +- Usage: + ``` + config vrf del mgmt + ``` + +- Example: + ``` + root@sonic-s6100-07:~# config vrf del mgmt + ``` + +**config snmpagentaddress add** + +This command adds the SNMP agent IP address on which the SNMP agent is expected to listen. When SNMP agent is expected to work as part of management VRF, users should specify the optional vrf_name parameter as "mgmt". This configuration goes into snmpd.conf that is used by SNMP agent. SNMP service is restarted to make this configuration effective in SNMP agent. + +- Usage: + ``` + config snmpagentaddress add [-p ] [-v ] agentip + ``` + +- Example: + ``` + root@sonic-s6100-07:~#config snmpagentaddress add -v mgmt -p 123 21.22.13.14 + + For this example, configuration goes into /etc/snmp/snmpd.conf inside snmp docker as follows. When "-v" parameter is not used, the additional "%" in the following line will not be present. + + agentAddress 21.22.13.14:123%mgmt + ``` + +**config snmpagentaddress del** + +This command deletes the SNMP agent IP address on which the SNMP agent is expected to listen. When users had added the agent IP as part of "mgmt" VRF, users should specify the optional vrf_name parameter as "mgmt" while deleting as well. This configuration is removed from snmpd.conf that is used by SNMP agent. SNMP service is restarted to make this configuration effective in SNMP agent. + +- Usage: + ``` + config snmpagentaddress del [-p ] [-v ] agentip + ``` + +- Example: + ``` + root@sonic-s6100-07:~#config snmpagentaddress del -v mgmt -p 123 21.22.13.14 + + ``` + +**config snmptrap modify** + +This command modifies the SNMP trap server IP address to which the SNMP agent is expected to send the traps. Users can configure one server IP addrss for each SNMP version to send the traps. When SNMP agent is expected to send traps as part of management VRF, users should specify the optional vrf_name parameter as "mgmt". This configuration goes into snmpd.conf that is used by SNMP agent. SNMP service is restarted to make this configuration effective in SNMP agent. + +- Usage: + ``` + config snmptrap modify [-p ] [-v ] [-c ] trapserverip + ``` + +- Example: + ``` + root@sonic-s6100-07:~#config snmptrap modify 2 -p 456 -v mgmt 21.21.21.21 + + For this example, configuration goes into /etc/snmp/snmpd.conf inside snmp docker as follows. When "-v" parameter is not used, the additional "%" in the following line will not be present. In case of SNMPv1, "trapsink" will be updated, in case of v2, "trap2sink" will be updated and in case of v3, "informsink" will be updated. + + trap2sink 31.31.31.31:456%mgmt public + + ``` + +**config snmptrap del** + +This command deletes the SNMP Trap server IP address to which SNMP agent is expected to send TRAPs. When users had added the trap server IP as part of "mgmt" VRF, users should specify the optional vrf_name parameter as "mgmt" while deleting as well. This configuration is removed from snmpd.conf that is used by SNMP agent. SNMP service is restarted to make this configuration effective in SNMP agent. + +- Usage: + ``` + config snmptrap del [-p ] [-v ] [-c ] trapserverip + ``` + +- Example: + ``` + root@sonic-s6100-07:~#config snmptrap del -v mgmt -p 123 21.22.13.14 + + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#management-vrf) + + ## Mirroring ### Mirroring Show commands @@ -3033,6 +3320,349 @@ While adding a new session, users need to configure the following fields that ar Go Back To [Beginning of the document](#) or [Beginning of this section](#mirroring) +## NAT + +### NAT Show commands + +**show nat config** + +This command displays the NAT configuration. + +- Usage: + ``` + show nat config [static | pool | bindings | globalvalues | zones] + ``` + +With no optional arguments, the whole NAT configuration is displayed. + +- Example: + ``` + root@sonic:/# show nat config static + + Nat Type IP Protocol Global IP Global L4 Port Local IP Local L4 Port Twice-Nat Id + -------- ----------- ------------ -------------- ------------- ------------- ------------ + dnat all 65.55.45.5 --- 10.0.0.1 --- --- + dnat all 65.55.45.6 --- 10.0.0.2 --- --- + dnat tcp 65.55.45.7 2000 20.0.0.1 4500 1 + snat tcp 20.0.0.2 4000 65.55.45.8 1030 1 + + root@sonic:/# show nat config pool + + Pool Name Global IP Range Global L4 Port Range + ------------ ------------------------- -------------------- + Pool1 65.55.45.5 1024-65535 + Pool2 65.55.45.6-65.55.45.8 --- + Pool3 65.55.45.10-65.55.45.15 500-1000 + + root@sonic:/# show nat config bindings + + Binding Name Pool Name Access-List Nat Type Twice-Nat Id + ------------ ------------ ------------ -------- ------------ + Bind1 Pool1 --- snat --- + Bind2 Pool2 1 snat 1 + Bind3 Pool3 2 snat -- + + root@sonic:/# show nat config globalvalues + + Admin Mode : enabled + Global Timeout : 600 secs + TCP Timeout : 86400 secs + UDP Timeout : 300 secs + + root@sonic:/# show nat config zones + + Port Zone + ---- ---- + Ethernet2 0 + Vlan100 1 + ``` + +**show nat statistics** + +This command displays the NAT translation statistics for each entry. + +- Usage: + ``` + show nat statistics + ``` + +- Example: + ``` + root@sonic:/# show nat statistics + + Protocol Source Destination Packets Bytes + -------- --------- -------------- ------------- ------------- + all 10.0.0.1 --- 802 1009280 + all 10.0.0.2 --- 23 5590 + tcp 20.0.0.1:4500 --- 110 12460 + udp 20.0.0.1:4000 --- 1156 789028 + tcp 20.0.0.1:6000 --- 30 34800 + tcp 20.0.0.1:5000 65.55.42.1:2000 128 110204 + tcp 20.0.0.1:5500 65.55.42.1:2000 8 3806 + ``` + +**show nat translations** + +This command displays the NAT translation entries. + +- Usage: + ``` + show nat translations [count] + ``` +Giving the optional count argument displays only the details about the number of translation entries. +- Example: + ``` + root@sonic:/# show nat translations + + Static NAT Entries ................. 4 + Static NAPT Entries ................. 2 + Dynamic NAT Entries ................. 0 + Dynamic NAPT Entries ................. 4 + Static Twice NAT Entries ................. 0 + Static Twice NAPT Entries ................. 4 + Dynamic Twice NAT Entries ................ 0 + Dynamic Twice NAPT Entries ................ 0 + Total SNAT/SNAPT Entries ................ 9 + Total DNAT/DNAPT Entries ................ 9 + Total Entries ................ 14 + + Protocol Source Destination Translated Source Translated Destination + -------- --------- -------------- ----------------- ---------------------- + all 10.0.0.1 --- 65.55.42.2 --- + all --- 65.55.42.2 --- 10.0.0.1 + all 10.0.0.2 --- 65.55.42.3 --- + all --- 65.55.42.3 --- 10.0.0.2 + tcp 20.0.0.1:4500 --- 65.55.42.1:2000 --- + tcp --- 65.55.42.1:2000 --- 20.0.0.1:4500 + udp 20.0.0.1:4000 --- 65.55.42.1:1030 --- + udp --- 65.55.42.1:1030 --- 20.0.0.1:4000 + tcp 20.0.0.1:6000 --- 65.55.42.1:1024 --- + tcp --- 65.55.42.1:1024 --- 20.0.0.1:6000 + tcp 20.0.0.1:5000 65.55.42.1:2000 65.55.42.1:1025 20.0.0.1:4500 + tcp 20.0.0.1:4500 65.55.42.1:1025 65.55.42.1:2000 20.0.0.1:5000 + tcp 20.0.0.1:5500 65.55.42.1:2000 65.55.42.1:1026 20.0.0.1:4500 + tcp 20.0.0.1:4500 65.55.42.1:1026 65.55.42.1:2000 20.0.0.1:5500 + + root@sonic:/# show nat translations count + + Static NAT Entries ................. 4 + Static NAPT Entries ................. 2 + Dynamic NAT Entries ................. 0 + Dynamic NAPT Entries ................. 4 + Static Twice NAT Entries ................. 0 + Static Twice NAPT Entries ................. 4 + Dynamic Twice NAT Entries ................ 0 + Dynamic Twice NAPT Entries ................ 0 + Total SNAT/SNAPT Entries ................ 9 + Total DNAT/DNAPT Entries ................ 9 + Total Entries ................ 14 + ``` + +### NAT Config commands + +**config nat add static** + +This command is used to add a static NAT or NAPT entry. +When configuring the Static NAT entry, user has to specify the following fields with 'basic' keyword. + +1. Global IP address, +2. Local IP address, +3. NAT type (snat / dnat) to be applied on the Global IP address. Default value is dnat. This is optinoal argument. +4. Twice NAT Id. This is optional argument used in case of twice nat configuration. + +When configuring the Static NAPT entry, user has to specify the following fields. + +1. IP protocol type (tcp / udp) +2. Global IP address + Port +3. Local IP address + Port +4. NAT type (snat / dnat) to be applied on the Global IP address + Port. Default value is dnat. This is optional argument. +5. Twicw NAT Id. This is optional argument used in case of twice nat configuration. + +- Usage: + ``` + config nat add static {{basic (global-ip) (local-ip)} | {{tcp | udp} (global-ip) (global-port) (local-ip) (local-port)}} [-nat_type {snat | dnat}] [-twice_nat_id (value)] + ``` + +To delete a static NAT or NAPT entry, use the command below. Giving the all argument deletes all the configured static NAT and NAPT entries. +``` +config nat remove static {{basic (global-ip) (local-ip)} | {{tcp | udp} (global-ip) (global-port) (local-ip) (local-port)} | all} +``` +- Example: + ``` + root@sonic:/# config nat add static basic 65.55.45.1 12.12.12.14 -nat_type dnat + root@sonic:/# config nat add static tcp 65.55.45.2 100 12.12.12.15 200 -nat_type dnat + + root@sonic:/# show nat translations + + Static NAT Entries ................. 2 + Static NAPT Entries ................. 2 + Dynamic NAT Entries ................. 0 + Dynamic NAPT Entries ................. 0 + Static Twice NAT Entries ................. 0 + Static Twice NAPT Entries ................. 0 + Dynamic Twice NAT Entries ................ 0 + Dynamic Twice NAPT Entries ................ 0 + Total SNAT/SNAPT Entries ................ 2 + Total DNAT/DNAPT Entries ................ 2 + Total Entries ................ 4 + + Protocol Source Destination Translated Source Translated Destination + -------- --------- -------------- ----------------- ---------------------- + all 12.12.12.14 --- 65.55.42.1 --- + all --- 65.55.42.1 --- 12.12.12.14 + tcp 12.12.12.15:200 --- 65.55.42.2:100 --- + tcp --- 65.55.42.2:100 --- 12.12.12.15:200 + ``` + +**config nat add pool** + +This command is used to create a NAT pool used for dynamic Source NAT or NAPT translations. +Pool can be configured in one of the following combinations. + +1. Global IP address range (or) +2. Global IP address + L4 port range (or) +3. Global IP address range + L4 port range. + +- Usage: + ``` + config nat add pool (pool-name) (global-ip-range) (global-port-range) + ``` +To delete a NAT pool, use the command. Pool cannot be removed if it is referenced by a NAT binding. Giving the pools argument removes all the configured pools. +``` +config nat remove {pool (pool-name) | pools} +``` +- Example: + ``` + root@sonic:/# config nat add pool pool1 65.55.45.2-65.55.45.10 + root@sonic:/# config nat add pool pool2 65.55.45.3 100-1024 + + root@sonic:/# show nat config pool + + Pool Name Global IP Range Global Port Range + ----------- ---------------------- ------------------- + pool1 65.55.45.2-65.55.45.10 --- + pool2 65.55.45.3 100-1024 + ``` + +**config nat add binding** + +This command is used to create a NAT binding between a pool and an ACL. The following fields are needed for configuring the binding. + + 1. ACL is an optional argument. If ACL argument is not given, the NAT binding is applicable to match all traffic. + 2. NAT type is an optional argument. Only DNAT type is supoprted for binding. + 3. Twice NAT Id is an optional argument. This Id is used to form a twice nat grouping with the static NAT/NAPT entry configured with the same Id. + +- Usage: + ``` + config nat add binding (binding-name) [(pool-name)] [(acl-name)] [-nat_type {snat | dnat}] [-twice_nat_id (value)] + ``` +To delete a NAT binding, use the command below. Giving the bindings argument removes all the configured bindings. +``` +config nat remove {binding (binding-name) | bindings} +``` +- Example: + ``` + root@sonic:/# config nat add binding bind1 pool1 acl1 + root@sonic:/# config nat add binding bind2 pool2 + + root@sonic:/# show nat config bindings + + Binding Name Pool Name Access-List Nat Type Twice-NAT Id + -------------- ----------- ------------- ---------- -------------- + bind1 pool1 acl1 snat --- + bind2 pool2 snat --- + ``` + +**config nat add interface** + +This command is used to configure NAT zone on an L3 interface. Default value of NAT zone on an L3 interface is 0. Valid range of zone values is 0-3. + +- Usage: + ``` + config nat add interface (interface-name) -nat_zone (value) + ``` +To reset the NAT zone on an interface, use the command below. Giving the interfaces argument resets the NAT zone on all the L3 interfaces to 0. +``` +config nat remove {interface (interface-name) | interfaces} +``` +- Example: + ``` + root@sonic:/# config nat add interface Ethernet28 -nat_zone 1 + + root@sonic:/# show nat config zones + + Port Zone + ---------- ------ + Ethernet0 0 + Ethernet28 1 + Ethernet22 0 + Vlan2091 0 + ``` + +**config nat set** + +This command is used to set the NAT timeout values. Different timeout values can be configured for the NAT entry timeout, NAPT TCP entry timeout, NAPT UDP entry timeout. +Range for Global NAT entry timeout is 300 sec to 432000 sec, default value is 600 sec. +Range for TCP NAT/NAPT entry timeout is 300 sec to 432000 sec, default value is 86400 sec. +Range for UDP NAT/NAPT entry timeout is 120 sec to 600 sec, default value is 300 sec. + +- Usage: + ``` + config nat set {tcp-timeout (value) | timeout (value) | udp-timeout (value)} + ``` +To reset the timeout values to the default values, use the command +``` +config nat reset {tcp-timeout | timeout | udp-timeout} +``` +- Example: + ``` + root@sonic:/# config nat add set tcp-timeout 3600 + + root@sonic:/# show nat config globalvalues + + Admin Mode : enabled + Global Timeout : 600 secs + TCP Timeout : 600 secs + UDP Timeout : 300 secs + ``` + +**config nat feature** + +This command is used to enable or disable the NAT feature. + +- Usage: + ``` + config nat feature {enable | disable} + ``` + +- Example: + ``` + root@sonic:/# config nat feature enable + root@sonic:/# config nat feature disable + ``` + +### NAT Clear commands + +**sonic-clear nat translations** + +This command is used to clear the dynamic NAT and NAPT translation entries. + +- Usage: + ``` + sonic-clear nat translations + ``` + +**sonic-clear nat statistics** + +This command is used to clear the statistics of all the NAT and NAPT entries. + +- Usage: + ``` + sonic-clear nat statistics + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#nat) + ## NTP @@ -3898,6 +4528,14 @@ This command displays the state of all the SONiC processes running inside a dock root 1 0 0 05:26 ? 00:00:12 /usr/bin/python /usr/bin/supervi root 24 1 0 05:26 ? 00:00:00 /usr/sbin/rsyslogd -n + nat docker + --------------------------- + USER PID PPID C STIME TTY TIME CMD + root 1 0 0 05:26 ? 00:00:12 /usr/bin/python /usr/bin/supervisord + root 18 1 0 05:26 ? 00:00:00 /usr/sbin/rsyslogd -n + root 23 1 0 05:26 ? 00:00:01 /usr/bin/natmgrd + root 34 1 0 05:26 ? 00:00:00 /usr/bin/natsyncd + snmp docker --------------------------- UID PID PPID C STIME TTY TIME CMD @@ -4351,6 +4989,7 @@ This command displays the warm_restart state. neighsyncd 0 teamsyncd 1 syncd 0 + natsyncd 0 ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#warm-restart) diff --git a/pddf_fanutil/__init__.py b/pddf_fanutil/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pddf_fanutil/main.py b/pddf_fanutil/main.py new file mode 100644 index 0000000000..dee001603b --- /dev/null +++ b/pddf_fanutil/main.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# +# main.py +# +# Command-line utility for interacting with FAN Controller in PDDF mode in SONiC +# + +try: + import sys + import os + import subprocess + import click + import imp + import syslog + import types + import traceback + from tabulate import tabulate + from utilities_common import util_base + from utilities_common.util_base import UtilLogger + from utilities_common.util_base import UtilHelper +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +VERSION = '1.0' + +SYSLOG_IDENTIFIER = "fanutil" +PLATFORM_SPECIFIC_MODULE_NAME = "fanutil" +PLATFORM_SPECIFIC_CLASS_NAME = "FanUtil" + +# Global platform-specific fanutil class instance +platform_fanutil = None + +#logger = UtilLogger(SYSLOG_IDENTIFIER) + +# This is our main entrypoint - the main 'fanutil' command +@click.group() +def cli(): + """pddf_fanutil - Command line utility for providing FAN information""" + + if os.geteuid() != 0: + click.echo("Root privileges are required for this operation") + sys.exit(1) + + # Load the helper class + helper = UtilHelper() + + if not helper.check_pddf_mode(): + click.echo("PDDF mode should be supported and enabled for this platform for this operation") + sys.exit(1) + + # Load platform-specific fanutil class + global platform_fanutil + try: + platform_fanutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME) + except Exception as e: + click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e))) + sys.exit(2) + +# 'version' subcommand +@cli.command() +def version(): + """Display version info""" + click.echo("PDDF fanutil version {0}".format(VERSION)) + +# 'numfans' subcommand +@cli.command() +def numfans(): + """Display number of FANs installed on device""" + click.echo(str(platform_fanutil.get_num_fans())) + +# 'status' subcommand +@cli.command() +@click.option('-i', '--index', default=-1, type=int, help="the index of FAN") +def status(index): + """Display FAN status""" + supported_fan = range(1, platform_fanutil.get_num_fans() + 1) + fan_ids = [] + if (index < 0): + fan_ids = supported_fan + else: + fan_ids = [index] + + header = ['FAN', 'Status'] + status_table = [] + + for fan in fan_ids: + msg = "" + fan_name = "FAN {}".format(fan) + if fan not in supported_fan: + click.echo("Error! The {} is not available on the platform.\n" \ + "Number of supported FAN - {}.".format(fan_name, platform_fanutil.get_num_fans())) + continue + presence = platform_fanutil.get_presence(fan) + if presence: + oper_status = platform_fanutil.get_status(fan) + msg = 'OK' if oper_status else "NOT OK" + else: + msg = 'NOT PRESENT' + status_table.append([fan_name, msg]) + + if status_table: + click.echo(tabulate(status_table, header, tablefmt="simple")) + +# 'direction' subcommand +@cli.command() +@click.option('-i', '--index', default=-1, type=int, help="the index of FAN") +def direction(index): + """Display FAN airflow direction""" + supported_fan = range(1, platform_fanutil.get_num_fans() + 1) + fan_ids = [] + if (index < 0): + fan_ids = supported_fan + else: + fan_ids = [index] + + header = ['FAN', 'Direction'] + status_table = [] + + for fan in fan_ids: + msg = "" + fan_name = "FAN {}".format(fan) + if fan not in supported_fan: + click.echo("Error! The {} is not available on the platform.\n" \ + "Number of supported FAN - {}.".format(fan_name, platform_fanutil.get_num_fans())) + continue + direction = platform_fanutil.get_direction(fan) + status_table.append([fan_name, direction]) + + if status_table: + click.echo(tabulate(status_table, header, tablefmt="simple")) + +# 'speed' subcommand +@cli.command() +@click.option('-i', '--index', default=-1, type=int, help="the index of FAN") +def getspeed(index): + """Display FAN speed in RPM""" + supported_fan = range(1, platform_fanutil.get_num_fans() + 1) + fan_ids = [] + if (index < 0): + fan_ids = supported_fan + else: + fan_ids = [index] + + header = ['FAN', 'Front Fan RPM', 'Rear Fan RPM'] + status_table = [] + + for fan in fan_ids: + msg = "" + fan_name = "FAN {}".format(fan) + if fan not in supported_fan: + click.echo("Error! The {} is not available on the platform.\n" \ + "Number of supported FAN - {}.".format(fan_name, platform_fanutil.get_num_fans())) + continue + front = platform_fanutil.get_speed(fan) + rear = platform_fanutil.get_speed_rear(fan) + status_table.append([fan_name, front, rear]) + + if status_table: + click.echo(tabulate(status_table, header, tablefmt="simple")) + +# 'setspeed' subcommand +@cli.command() +@click.argument('speed', type=int) +def setspeed(speed): + """Set FAN speed in percentage""" + if speed is None: + click.echo("speed value is required") + raise click.Abort() + + status = platform_fanutil.set_speed(speed) + if status: + click.echo("Successful") + else: + click.echo("Failed") + +@cli.group() +def debug(): + """pddf_fanutil debug commands""" + pass + +@debug.command() +def dump_sysfs(): + """Dump all Fan related SysFS paths""" + status = platform_fanutil.dump_sysfs() + + if status: + for i in status: + click.echo(i) + + + +if __name__ == '__main__': + cli() diff --git a/pddf_ledutil/__init__.py b/pddf_ledutil/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pddf_ledutil/main.py b/pddf_ledutil/main.py new file mode 100644 index 0000000000..df471ec8ef --- /dev/null +++ b/pddf_ledutil/main.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# +# main.py +# +# Command-line utility for interacting with FAN Controller in PDDF mode in SONiC +# + +try: + import sys + import os + import subprocess + import click + import imp + import syslog + import types + import traceback + from tabulate import tabulate + from utilities_common import util_base + from utilities_common.util_base import UtilLogger + from utilities_common.util_base import UtilHelper +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +VERSION = '1.0' + +SYSLOG_IDENTIFIER = "ledutil" +PLATFORM_SPECIFIC_MODULE_NAME = "ledutil" +PLATFORM_SPECIFIC_CLASS_NAME = "LedUtil" + +# Global platform-specific ledutil class instance +platform_ledutil = None + +#logger = UtilLogger(SYSLOG_IDENTIFIER) + +# ==================== CLI commands and groups ==================== + + +# This is our main entrypoint - the main 'ledutil' command +@click.group() +def cli(): + """pddf_ledutil - Command line utility for providing FAN information""" + + if os.geteuid() != 0: + click.echo("Root privileges are required for this operation") + sys.exit(1) + + # Load the helper class + helper = UtilHelper() + + if not helper.check_pddf_mode(): + click.echo("PDDF mode should be supported and enabled for this platform for this operation") + sys.exit(1) + + # Load platform-specific fanutil class + global platform_ledutil + try: + platform_ledutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME) + except Exception as e: + click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e))) + sys.exit(2) + +# 'version' subcommand +@cli.command() +def version(): + """Display version info""" + click.echo("PDDF ledutil version {0}".format(VERSION)) + +# 'getstatusled' subcommand +@cli.command() +@click.argument('device_name', type=click.STRING) +@click.argument('index', type=click.STRING) +def getstatusled(device_name, index): + if device_name is None: + click.echo("device_name is required") + raise click.Abort() + + outputs = platform_ledutil.get_status_led(device_name, index) + click.echo(outputs) + + +# 'setstatusled' subcommand +@cli.command() +@click.argument('device_name', type=click.STRING) +@click.argument('index', type=click.STRING) +@click.argument('color', type=click.STRING) +@click.argument('color_state', type=click.STRING) +def setstatusled(device_name, index, color, color_state): + if device_name is None: + click.echo("device_name is required") + raise click.Abort() + + outputs = platform_ledutil.set_status_led(device_name, index, color, color_state) + click.echo(outputs) + +if __name__ == '__main__': + cli() diff --git a/pddf_psuutil/__init__.py b/pddf_psuutil/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pddf_psuutil/main.py b/pddf_psuutil/main.py new file mode 100644 index 0000000000..1a00a3b774 --- /dev/null +++ b/pddf_psuutil/main.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# +# main.py +# +# Command-line utility for interacting with PSU Controller in PDDF mode in SONiC +# + +try: + import sys + import os + import subprocess + import click + import imp + import syslog + import types + import traceback + from tabulate import tabulate + from utilities_common import util_base + from utilities_common.util_base import UtilLogger + from utilities_common.util_base import UtilHelper +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +VERSION = '1.0' + +SYSLOG_IDENTIFIER = "psuutil" +PLATFORM_SPECIFIC_MODULE_NAME = "psuutil" +PLATFORM_SPECIFIC_CLASS_NAME = "PsuUtil" + +# Global platform-specific psuutil class instance +platform_psuutil = None + +#logger = UtilLogger(SYSLOG_IDENTIFIER) + +# ==================== CLI commands and groups ==================== + + +# This is our main entrypoint - the main 'psuutil' command +@click.group() +def cli(): + """psuutil - Command line utility for providing PSU status""" + + if os.geteuid() != 0: + click.echo("Root privileges are required for this operation") + sys.exit(1) + + # Load the helper class + helper = UtilHelper() + + if not helper.check_pddf_mode(): + click.echo("PDDF mode should be supported and enabled for this platform for this operation") + sys.exit(1) + + # Load platform-specific fanutil class + global platform_psuutil + try: + platform_psuutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME) + except Exception as e: + click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e))) + sys.exit(2) + +# 'version' subcommand +@cli.command() +def version(): + """Display version info""" + click.echo("psuutil version {0}".format(VERSION)) + +# 'numpsus' subcommand +@cli.command() +def numpsus(): + """Display number of supported PSUs on device""" + click.echo(str(platform_psuutil.get_num_psus())) + +# 'status' subcommand +@cli.command() +@click.option('-i', '--index', default=-1, type=int, help="the index of PSU") +def status(index): + """Display PSU status""" + supported_psu = range(1, platform_psuutil.get_num_psus() + 1) + psu_ids = [] + if (index < 0): + psu_ids = supported_psu + else: + psu_ids = [index] + + header = ['PSU', 'Status'] + status_table = [] + + for psu in psu_ids: + msg = "" + psu_name = "PSU {}".format(psu) + if psu not in supported_psu: + click.echo("Error! The {} is not available on the platform.\n" \ + "Number of supported PSU - {}.".format(psu_name, platform_psuutil.get_num_psus())) + continue + presence = platform_psuutil.get_psu_presence(psu) + if presence: + oper_status = platform_psuutil.get_psu_status(psu) + msg = 'OK' if oper_status else "NOT OK" + else: + msg = 'NOT PRESENT' + status_table.append([psu_name, msg]) + + if status_table: + click.echo(tabulate(status_table, header, tablefmt="simple")) + +# 'mfrinfo' subcommand +@cli.command() +@click.option('-i', '--index', default=-1, type=int, help="the index of PSU") +def mfrinfo(index): + """Display PSU manufacturer info""" + supported_psu = range(1, platform_psuutil.get_num_psus() + 1) + psu_ids = [] + info = "" + if (index < 0): + psu_ids = supported_psu + else: + psu_ids = [index] + + for psu in psu_ids: + msg = "" + psu_name = "PSU {}".format(psu) + if psu not in supported_psu: + click.echo("Error! The {} is not available on the platform.\n" \ + "Number of supported PSU - {}.".format(psu_name, platform_psuutil.get_num_psus())) + continue + status = platform_psuutil.get_psu_status(psu) + if not status: + click.echo("{} is Not OK\n".format(psu_name)) + continue + + model_name = platform_psuutil.get_model(psu) + mfr_id = platform_psuutil.get_mfr_id(psu) + serial_num = platform_psuutil.get_serial(psu) + airflow_dir = platform_psuutil.get_direction(psu) + + click.echo("{} is OK\nManufacture Id: {}\n" \ + "Model: {}\nSerial Number: {}\n" \ + "Fan Direction: {}\n".format(psu_name, mfr_id, model_name, serial_num, airflow_dir)) + + +# 'seninfo' subcommand +@cli.command() +@click.option('-i', '--index', default=-1, type=int, help="the index of PSU") +def seninfo(index): + """Display PSU sensor info""" + supported_psu = range(1, platform_psuutil.get_num_psus() + 1) + psu_ids = [] + if (index < 0): + psu_ids = supported_psu + else: + psu_ids = [index] + + for psu in psu_ids: + msg = "" + psu_name = "PSU {}".format(psu) + if psu not in supported_psu: + click.echo("Error! The {} is not available on the platform.\n" \ + "Number of supported PSU - {}.".format(psu_name, platform_psuutil.get_num_psus())) + continue + oper_status = platform_psuutil.get_psu_status(psu) + + if not oper_status: + click.echo("{} is Not OK\n".format(psu_name)) + continue + + v_out = platform_psuutil.get_output_voltage(psu) + i_out = platform_psuutil.get_output_current(psu) + p_out = platform_psuutil.get_output_power(psu) + # p_out would be in micro watts, convert it into milli watts + p_out = p_out/1000 + + fan1_rpm = platform_psuutil.get_fan_speed(psu, 1) + click.echo("{} is OK\nOutput Voltage: {} mv\n" \ + "Output Current: {} ma\nOutput Power: {} mw\n" \ + "Fan1 Speed: {} rpm\n".format(psu_name, v_out, i_out, p_out, fan1_rpm)) + +@cli.group() +def debug(): + """pddf_psuutil debug commands""" + pass + +@debug.command() +def dump_sysfs(): + """Dump all PSU related SysFS paths""" + status = platform_psuutil.dump_sysfs() + + if status: + for i in status: + click.echo(i) + + +if __name__ == '__main__': + cli() diff --git a/pddf_thermalutil/__init__.py b/pddf_thermalutil/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pddf_thermalutil/main.py b/pddf_thermalutil/main.py new file mode 100644 index 0000000000..c72e9d45dd --- /dev/null +++ b/pddf_thermalutil/main.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# +# main.py +# +# Command-line utility for interacting with Thermal sensors in PDDF mode in SONiC +# + +try: + import sys + import os + import subprocess + import click + import imp + import syslog + import types + import traceback + from tabulate import tabulate + from utilities_common import util_base + from utilities_common.util_base import UtilLogger + from utilities_common.util_base import UtilHelper +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +VERSION = '1.0' + +SYSLOG_IDENTIFIER = "thermalutil" +PLATFORM_SPECIFIC_MODULE_NAME = "thermalutil" +PLATFORM_SPECIFIC_CLASS_NAME = "ThermalUtil" + +# Global platform-specific thermalutil class instance +platform_thermalutil = None + +#logger = UtilLogger(SYSLOG_IDENTIFIER) + +# ==================== CLI commands and groups ==================== + + +# This is our main entrypoint - the main 'thermalutil' command +@click.group() +def cli(): + """pddf_thermalutil - Command line utility for providing Temp Sensors information""" + + if os.geteuid() != 0: + click.echo("Root privileges are required for this operation") + sys.exit(1) + + # Load the helper class + helper = UtilHelper() + + if not helper.check_pddf_mode(): + click.echo("PDDF mode should be supported and enabled for this platform for this operation") + sys.exit(1) + + # Load platform-specific fanutil class + global platform_thermalutil + try: + platform_thermalutil = helper.load_platform_util(PLATFORM_SPECIFIC_MODULE_NAME, PLATFORM_SPECIFIC_CLASS_NAME) + except Exception as e: + click.echo("Failed to load {}: {}".format(PLATFORM_SPECIFIC_MODULE_NAME, str(e))) + sys.exit(2) + + +# 'version' subcommand +@cli.command() +def version(): + """Display version info""" + click.echo("PDDF thermalutil version {0}".format(VERSION)) + +# 'numthermals' subcommand +@cli.command() +def numthermals(): + """Display number of Thermal Sensors installed """ + click.echo(str(platform_thermalutil.get_num_thermals())) + +# 'gettemp' subcommand +@cli.command() +@click.option('-i', '--index', default=-1, type=int, help="the index of Temp Sensor") +def gettemp(index): + """Display Temperature values of thermal sensors""" + supported_thermal = range(1, platform_thermalutil.get_num_thermals() + 1) + thermal_ids = [] + if (index < 0): + thermal_ids = supported_thermal + else: + thermal_ids = [index] + + header = ['Temp Sensor', 'Label', 'Value'] + status_table = [] + + for thermal in thermal_ids: + msg = "" + thermal_name = "TEMP{}".format(thermal) + if thermal not in supported_thermal: + click.echo("Error! The {} is not available on the platform.\n" \ + "Number of supported Temp - {}.".format(thermal_name, platform_thermalutil.get_num_thermals())) + ##continue + label, value = platform_thermalutil.show_thermal_temp_values(thermal) + status_table.append([thermal_name, label, value]) + + if status_table: + click.echo(tabulate(status_table, header, tablefmt="simple")) + +@cli.group() +def debug(): + """pddf_thermalutil debug commands""" + pass + +@debug.command() +def dump_sysfs(): + """Dump all Temp Sensor related SysFS paths""" + status = platform_thermalutil.dump_sysfs() + + if status: + for i in status: + click.echo(i) + + +if __name__ == '__main__': + cli() diff --git a/scripts/configlet b/scripts/configlet index b622f011db..18ee933294 100755 --- a/scripts/configlet +++ b/scripts/configlet @@ -101,11 +101,16 @@ def init(): def db_update(t, k, lst): init() to_upd = False - data = db.get_entry(t, k) - for i in lst.keys(): - if not data.has_key(i) or data[i] != lst[i]: - to_upd = True - break + ct_keys = db.get_keys(t) + tuple_k = db.deserialize_key(k[0]) + if tuple_k in ct_keys: + data = db.get_entry(t, k) + for i in lst.keys(): + if not data.has_key(i) or data[i] != lst[i]: + to_upd = True + break + else: + to_upd = True if to_upd: db.mod_entry(t, k, lst) diff --git a/scripts/dropconfig b/scripts/dropconfig index 2ff8fab236..c9bbd34418 100755 --- a/scripts/dropconfig +++ b/scripts/dropconfig @@ -44,10 +44,6 @@ drop_counter_config_header = ['Counter', 'Description'] drop_counter_capability_header = ['Counter Type', 'Total'] -# Drop Reason Prefixes -in_drop_reason_prefix = 'SAI_IN_DROP_REASON_' -out_drop_reason_prefix = 'SAI_OUT_DROP_REASON_' - class InvalidArgumentError(RuntimeError): def __init__(self, msg): @@ -92,6 +88,7 @@ class DropConfig(object): if not device_caps: print('Current device does not support drop counters') + return table = [] for counter, capabilities in device_caps.iteritems(): @@ -106,10 +103,6 @@ class DropConfig(object): if supported_reasons and int(capabilities.get('count', 0)) > 0: print('\n{}'.format(counter)) for reason in supported_reasons: - if reason.startswith(in_drop_reason_prefix): - reason = reason[len(in_drop_reason_prefix):] - elif reason.startswith(out_drop_reason_prefix): - reason = reason[len(out_drop_reason_prefix):] print('\t{}'.format(reason)) def create_counter(self, counter_name, alias, group, counter_type, @@ -313,13 +306,7 @@ class DropConfig(object): if not cap_query: return None - reasons = [] - for reason in deserialize_reason_list(cap_query.get('reasons', '')): - if reason.startswith(in_drop_reason_prefix): - reasons.append(reason[len(in_drop_reason_prefix):]) - elif reason.startswith(out_drop_reason_prefix): - reasons.append(reason[len(out_drop_reason_prefix):]) - return reasons + return deserialize_reason_list(cap_query.get('reasons', '')) def deserialize_reason_list(list_str): diff --git a/scripts/dump_nat_entries.py b/scripts/dump_nat_entries.py new file mode 100644 index 0000000000..0bd1baf155 --- /dev/null +++ b/scripts/dump_nat_entries.py @@ -0,0 +1,22 @@ +#!/usr/bin/python + +"""" +Description: dump_nat_entries.py -- dump conntrack nat entries from kernel into a file + so as to restore them during warm reboot +""" + +import sys +import subprocess + +def main(): + ctdumpcmd = 'conntrack -L -j > /host/warmboot/nat/nat_entries.dump' + p = subprocess.Popen(ctdumpcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (output, err) = p.communicate() + rc = p.wait() + + if rc != 0: + print("Dumping conntrack entries failed") + return + +if __name__ == '__main__': + main() diff --git a/scripts/fanshow b/scripts/fanshow new file mode 100644 index 0000000000..75ad576cd4 --- /dev/null +++ b/scripts/fanshow @@ -0,0 +1,69 @@ +#!/usr/bin/python +""" + Script to show fan status. +""" +from __future__ import print_function + +import argparse + +from tabulate import tabulate +from swsssdk import SonicV2Connector + + +header = ['FAN', 'Speed', 'Direction', 'Presence', 'Status', 'Timestamp'] + +FAN_TABLE_NAME = 'FAN_INFO' +SPEED_FIELD_NAME = 'speed' +DIRECTION_FIELD_NAME = 'direction' +PRESENCE_FIELD_NAME = 'presence' +STATUS_FIELD_NAME = 'status' +TIMESTAMP_FIELD_NAME = 'timestamp' + + +class FanShow(object): + def __init__(self): + self.db = SonicV2Connector(host="127.0.0.1") + self.db.connect(self.db.STATE_DB) + + def show(self): + keys = self.db.keys(self.db.STATE_DB, FAN_TABLE_NAME + '*') + if not keys: + print('Fan Not detected\n') + return + + table = [] + for key in keys: + key_list = key.split('|') + if len(key_list) != 2: # error data in DB, log it and ignore + print('Warn: Invalid key in table FAN_INFO: {}'.format(key)) + continue + + name = key_list[1] + data_dict = self.db.get_all(self.db.STATE_DB, key) + try: + speed = float(data_dict[SPEED_FIELD_NAME]) + if speed > 100: + speed = '{}RPM'.format(int(speed)) + else: + speed = '{}%'.format(data_dict[SPEED_FIELD_NAME]) + except ValueError as e: + print('Warn: cannot convert speed value from {}'.format(data_dict[SPEED_FIELD_NAME])) + speed = data_dict[SPEED_FIELD_NAME] + + presence = data_dict[PRESENCE_FIELD_NAME].lower() + presence = 'Present' if presence == 'true' else 'Not Present' + status = data_dict[STATUS_FIELD_NAME].lower() + status = 'OK' if status == 'true' else 'Not OK' + + table.append((name, speed, data_dict[DIRECTION_FIELD_NAME], presence, status, data_dict[TIMESTAMP_FIELD_NAME])) + + if table: + table.sort() + print(tabulate(table, header, tablefmt='simple', stralign='right')) + else: + print('No fan status data available\n') + + +if __name__ == "__main__": + fanShow = FanShow() + fanShow.show() diff --git a/scripts/fast-reboot b/scripts/fast-reboot index e59f3dcbb5..ac33bf85da 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -13,6 +13,9 @@ STRICT=no REBOOT_METHOD="/sbin/kexec -e" ASSISTANT_IP_LIST="" ASSISTANT_SCRIPT="/usr/bin/neighbor_advertiser" +DEVPATH="/usr/share/sonic/device" +PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) +PLATFORM_PLUGIN="${REBOOT_TYPE}_plugin" # Require 100M available on the hard drive for warm reboot temp files, # Size is in 1K blocks: @@ -141,7 +144,6 @@ function request_pre_shutdown() debug "Requesting pre-shutdown ..." /usr/bin/docker exec -i syncd /usr/bin/syncd_request_shutdown --pre &> /dev/null || { error "Failed to request pre-shutdown" - exit "${EXIT_SYNCD_SHUTDOWN}" } } @@ -177,9 +179,9 @@ function wait_for_pre_shutdown_complete_or_fail() if [[ x"${STATE}" != x"pre-shutdown-succeeded" ]]; then debug "Syncd pre-shutdown failed: ${STATE} ..." - exit "${EXIT_SYNCD_SHUTDOWN}" + else + debug "Pre-shutdown succeeded ..." fi - debug "Pre-shutdown succeeded ..." } function backup_database() @@ -399,6 +401,17 @@ if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; t fi fi +# We are fully committed to reboot from this point on becasue critical +# service will go down and we cannot recover from it. +set +e + +# Kill nat docker after saving the conntrack table +debug "Stopping nat ..." +/usr/bin/dump_nat_entries.py +docker kill nat > /dev/null || true +systemctl stop nat +debug "Stopped nat ..." + # Kill radv before stopping BGP service to prevent annoucing our departure. debug "Stopping radv ..." docker kill radv &>/dev/null || [ $? == 1 ] @@ -471,7 +484,7 @@ if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; t fi debug "Stopping syncd ..." -systemctl stop syncd +systemctl stop syncd || debug "Ignore stopping syncd service error $?" debug "Stopped syncd ..." # Kill other containers to make the reboot faster @@ -482,7 +495,7 @@ debug "Stopping all remaining containers ..." for CONTAINER_NAME in $(docker ps --format '{{.Names}}'); do CONTAINER_STOP_RC=0 docker kill $CONTAINER_NAME &> /dev/null || CONTAINER_STOP_RC=$? - systemctl stop $CONTAINER_NAME + systemctl stop $CONTAINER_NAME || debug "Ignore stopping $CONTAINER_NAME error $?" if [[ CONTAINER_STOP_RC -ne 0 ]]; then debug "Failed killing container $CONTAINER_NAME RC $CONTAINER_STOP_RC ." fi @@ -490,12 +503,12 @@ done debug "Stopped all remaining containers ..." # Stop the docker container engine. Otherwise we will have a broken docker storage -systemctl stop docker.service +systemctl stop docker.service || debug "Ignore stopping docker service error $?" # Stop kernel modules for Nephos platform if [[ "$sonic_asic_type" = 'nephos' ]]; then - systemctl stop nps-modules-`uname -r`.service + systemctl stop nps-modules-`uname -r`.service || debug "Ignore stopping nps service error $?" fi # Update the reboot cause file to reflect that user issued this script @@ -513,6 +526,11 @@ if [ -x /sbin/hwclock ]; then /sbin/hwclock -w || /bin/true fi +if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_PLUGIN} ]; then + debug "Running ${PLATFORM} specific plugin..." + ${DEVPATH}/${PLATFORM}/${PLATFORM_PLUGIN} +fi + # Reboot: explicity call Linux native reboot under sbin debug "Rebooting with ${REBOOT_METHOD} to ${NEXT_SONIC_IMAGE} ..." exec ${REBOOT_METHOD} diff --git a/scripts/fdbshow b/scripts/fdbshow index 13d3630868..1c06d5a27d 100755 --- a/scripts/fdbshow +++ b/scripts/fdbshow @@ -76,11 +76,18 @@ class FdbShow(object): if br_port_id not in self.if_br_oid_map: continue port_id = self.if_br_oid_map[br_port_id] - if_name = self.if_oid_map[port_id] + if port_id in self.if_oid_map: + if_name = self.if_oid_map[port_id] + else: + if_name = port_id if 'vlan' in fdb: vlan_id = fdb["vlan"] elif 'bvid' in fdb: - vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) + try: + vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) + except: + vlan_id = fdb["bvid"] + print "Failed to get Vlan id for bvid {}\n".format(fdb["bvid"]) self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,) + (fdb_type,)) self.bridge_mac_list.sort(key = lambda x: x[0]) diff --git a/scripts/generate_dump b/scripts/generate_dump index b6261fd64f..678630949f 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -142,6 +142,26 @@ save_bgp_neighbor() { done } +############################################################################### +# Dump the nat config, iptables rules and conntrack nat entries +# Globals: +# None +# Arguments: +# None +# Returns: +# None +############################################################################### +save_nat_info() { + save_cmd "iptables -t nat -nv -L" "nat.iptables" + save_cmd "conntrack -j -L" "nat.conntrack" + save_cmd "conntrack -j -L | wc" "nat.conntrackcount" + save_cmd "conntrack -L" "nat.conntrackall" + save_cmd "conntrack -L | wc" "nat.conntrackallcount" + save_cmd "show nat config" "nat.config" +} + +############################################################################### + ############################################################################### # Given list of proc files, saves proc files to tar. # Globals: @@ -350,6 +370,8 @@ main() { save_cmd "df" "df" save_cmd "dmesg" "dmesg" + save_nat_info + save_redis "0" "APP_DB" save_redis "1" "ASIC_DB" save_redis "2" "COUNTERS_DB" @@ -379,6 +401,8 @@ main() { save_cmd "bcmcmd -t5 version" "broadcom.version" save_cmd "bcmcmd -t5 soc" "broadcom.soc" save_cmd "bcmcmd -t5 ps" "broadcom.ps" + save_cmd "bcmcmd \"l3 nat_ingress show\"" "broadcom.nat.ingress" + save_cmd "bcmcmd \"l3 nat_egress show\"" "broadcom.nat.egress" fi if $GREP -qi "aboot_platform=.*arista" /host/machine.conf; then @@ -436,6 +460,18 @@ main() { fi done + # archive kernel dump files + for file in $(find_files "/var/crash/"); do + # don't gzip already-gzipped dmesg files :) + if [ ! ${file} = "/var/crash/kexec_cmd" -a ! ${file} = "/var/crash/export" ]; then + if [[ ${file} == *"kdump."* ]]; then + save_file $file kdump false + else + save_file $file kdump true + fi + fi + done + # clean up working tar dir before compressing $RM $V -rf $TARDIR diff --git a/scripts/intfstat b/scripts/intfstat index daa5a91a12..9341658b50 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -269,6 +269,10 @@ def main(): cnstat_fqn_file = cnstat_dir + "/" + cnstat_file if delete_all_stats: + # There is nothing to delete + if not os.path.isdir(cnstat_dir): + sys.exit(0) + for file in os.listdir(cnstat_dir): os.remove(cnstat_dir + "/" + file) diff --git a/scripts/natclear b/scripts/natclear new file mode 100644 index 0000000000..76cd394f50 --- /dev/null +++ b/scripts/natclear @@ -0,0 +1,68 @@ +#!/usr/bin/python +""" + Script to clear nat dynamic entries from Hardware and also to clear the nat statistics + + usage: natclear [-t | -s] + arguments: + -t, --translations + -s, --statistics + +""" + +import argparse +import json +import sys +import subprocess + +from natsort import natsorted +from swsssdk import SonicV2Connector +from tabulate import tabulate + +class NatClear(object): + + def __init__(self): + super(NatClear,self).__init__() + self.db = SonicV2Connector(host="127.0.0.1") + self.db.connect(self.db.APPL_DB) + return + + def send_notification(self, op, data): + opdata = [op,data] + msg = json.dumps(opdata,separators=(',',':')) + self.db.publish('APPL_DB','FLUSHNATREQUEST', msg) + return + +def main(): + parser = argparse.ArgumentParser(description='Clear the nat information', + formatter_class=argparse.RawTextHelpFormatter, + epilog=""" + Examples: + natclear -t + natclear -s + """) + + parser.add_argument('-t', '--translations', action='store_true', help='Clear the nat translations') + parser.add_argument('-s', '--statistics', action='store_true', help='Clear the nat statistics') + + args = parser.parse_args() + + clear_translations = args.translations + clear_statistics = args.statistics + + try: + nat = NatClear() + if clear_translations: + nat.send_notification("ENTRIES", "ALL") + print "" + print("Dynamic NAT entries are cleared.") + elif clear_statistics: + nat.send_notification("STATISTICS", "ALL") + print "" + print("NAT statistics are cleared.") + except Exception as e: + print e.message + sys.exit(1) + +if __name__ == "__main__": + main() + diff --git a/scripts/natconfig b/scripts/natconfig new file mode 100644 index 0000000000..db5ea9b667 --- /dev/null +++ b/scripts/natconfig @@ -0,0 +1,386 @@ +#!/usr/bin/python + +""" + Script to show nat configuration + Example of the output: + + root@sonic:/home/admin# sudo natconfig -s + + Nat Type IP Protocol Global IP Global L4 Port Local IP Local L4 Port Twice-Nat Id + -------- ----------- ------------ -------------- ------------- ------------- ------------ + dnat all 65.55.45.5 --- 10.0.0.1 --- --- + dnat all 65.55.45.6 --- 10.0.0.2 --- --- + dnat tcp 65.55.45.7 2000 20.0.0.1 4500 1 + snat tcp 20.0.0.2 4000 65.55.45.8 1030 1 + + root@sonic:/home/admin# sudo natconfig -p + + Pool Name Global IP Range Global L4 Port Range + ------------ ------------------------- -------------------- + Pool1 65.55.45.5 100-200 + Pool2 65.55.45.6-65.55.45.8 --- + Pool3 65.55.45.10-65.55.45.15 500-1000 + + root@sonic:/home/admin# sudo natconfig -b + + Binding Name Pool Name Access-List Nat Type Twice-Nat Id + ------------ ------------ ------------ -------- ------------ + Bind1 Pool1 --- snat --- + Bind2 Pool2 1 snat 1 + Bind3 Pool3 1,2 snat -- + + root@sonic:/home/admin# sudo natconfig -g + + Admin Mode : disabled + Global Timeout : 600 + TCP Timeout : 86400 + UDP Timeout : 300 + +""" + +import argparse +import json +import sys + +from natsort import natsorted +from tabulate import tabulate +from swsssdk import ConfigDBConnector + +class NatConfig(object): + + def __init__(self): + super(NatConfig,self).__init__() + self.config_db = ConfigDBConnector() + self.config_db.connect() + return + + def fetch_static_nat(self): + """ + Fetch Static NAT config from CONFIG DB. + """ + self.static_nat_data = [] + + static_nat_dict = self.config_db.get_table('STATIC_NAT') + + if not static_nat_dict: + return + + for key,values in static_nat_dict.items(): + ip_protocol = "all" + global_ip = "---" + global_port = "---" + local_ip = "---" + local_port = "---" + nat_type = "dnat" + twice_nat_id = "---" + + if isinstance(key, unicode) is True: + global_ip = key + else: + continue + + local_ip = values["local_ip"] + + if "local_port" in values: + local_port = values["local_port"] + + if "nat_type" in values: + nat_type = values["nat_type"] + + if "twice_nat_id" in values: + twice_nat_id = values["twice_nat_id"] + + self.static_nat_data.append((nat_type,) + (ip_protocol,) + (global_ip,) + (global_port,) + (local_ip,) + (local_port,) + (twice_nat_id,)) + + self.static_nat_data.sort(key = lambda x: x[0]) + + def fetch_static_napt(self): + """ + Fetch Static NAPT config from CONFIG DB. + """ + self.static_napt_data = [] + + static_napt_dict = self.config_db.get_table('STATIC_NAPT') + + if not static_napt_dict: + return + + for key,values in static_napt_dict.items(): + ip_protocol = "all" + global_ip = "---" + global_port = "---" + local_ip = "---" + local_port = "---" + nat_type = "dnat" + twice_nat_id = "---" + + if isinstance(key, tuple) is False: + continue + + if (len(key) == 3): + global_ip = key[0] + global_port = key[2] + ip_protocol = key[1] + else: + continue + + local_ip = values["local_ip"] + + if "local_port" in values: + local_port = values["local_port"] + + if "nat_type" in values: + nat_type = values["nat_type"] + + if "twice_nat_id" in values: + twice_nat_id = values["twice_nat_id"] + + self.static_napt_data.append((nat_type,) + (ip_protocol,) + (global_ip,) + (global_port,) + (local_ip,) + (local_port,) + (twice_nat_id,)) + + self.static_napt_data.sort(key = lambda x: x[0]) + + def fetch_pool(self): + """ + Fetch NAT Pool config from CONFIG DB. + """ + self.nat_pool_data = [] + + nat_pool_dict = self.config_db.get_table('NAT_POOL') + + if not nat_pool_dict: + return + + for key,values in nat_pool_dict.items(): + pool_name = "---" + global_ip = "---" + global_port = "---" + + if isinstance(key, unicode) is True: + pool_name = key + else: + continue + + global_ip = values["nat_ip"] + + if "nat_port" in values: + if values["nat_port"] != "NULL": + global_port = values["nat_port"] + + self.nat_pool_data.append((pool_name,) + (global_ip,) + (global_port,)) + + self.nat_pool_data.sort(key = lambda x: x[0]) + + def fetch_binding(self): + """ + Fetch NAT Binding config from CONFIG DB. + """ + self.nat_binding_data = [] + + nat_binding_dict = self.config_db.get_table('NAT_BINDINGS') + + if not nat_binding_dict: + return + + for key,values in nat_binding_dict.items(): + binding_name = "---" + pool_name = "---" + access_list = "---" + nat_type = "snat" + twice_nat_id = "---" + + if isinstance(key, unicode) is True: + binding_name = key + else: + continue + + pool_name = values["nat_pool"] + + if "access_list" in values: + access_list = values["access_list"] + + if "nat_type" in values: + nat_type = values["nat_type"] + + if "twice_nat_id" in values: + if values["twice_nat_id"] != "NULL": + twice_nat_id = values["twice_nat_id"] + + self.nat_binding_data.append((binding_name,) + (pool_name,) + (access_list,) + (nat_type,) + (twice_nat_id,)) + + self.nat_binding_data.sort(key = lambda x: x[0]) + + def fetch_nat_zone(self): + """ + Fetch NAT zone config from CONFIG DB. + """ + interfaces = ['INTERFACE', 'VLAN_INTERFACE', 'PORTCHANNEL_INTERFACE', 'LOOPBACK_INTERFACE'] + + self.nat_zone_data = [] + + for i in interfaces: + interface_zone_dict = self.config_db.get_table(i) + + if not interface_zone_dict: + continue + + for key,values in interface_zone_dict.items(): + zone = "0" + + if isinstance(key, unicode) is False: + continue + + if "nat_zone" in values: + zone = values["nat_zone"] + + self.nat_zone_data.append((key,) + (zone,)) + + self.nat_zone_data.sort(key = lambda x: x[0]) + + def display_static(self): + """ + Display the static nat and napt + """ + + HEADER = ['Nat Type', 'IP Protocol', 'Global IP', 'Global Port', 'Local IP', 'Local Port', ' Twice-NAT Id'] + output = [] + + for nat in self.static_nat_data: + output.append([nat[0], nat[1], nat[2], nat[3], nat[4], nat[5], nat[6]]) + + for napt in self.static_napt_data: + output.append([napt[0], napt[1], napt[2], napt[3], napt[4], napt[5], napt[6]]) + + print "" + print tabulate(output, HEADER) + print "" + + def display_pool(self): + """ + Display the nat pool + """ + + HEADER = ['Pool Name', 'Global IP Range', 'Global Port Range'] + output = [] + + for nat in self.nat_pool_data: + output.append([nat[0], nat[1], nat[2]]) + + print "" + print tabulate(output, HEADER) + print "" + + def display_binding(self): + """ + Display the nat binding + """ + + HEADER = ['Binding Name', 'Pool Name', 'Access-List', 'Nat Type', 'Twice-NAT Id'] + output = [] + + for nat in self.nat_binding_data: + output.append([nat[0], nat[1], nat[2], nat[3], nat[4]]) + + print "" + print tabulate(output, HEADER) + print "" + + def display_global(self): + """ + Fetch NAT Global config from CONFIG DB and Display it. + """ + self.nat_global_data = [] + + global_data = self.config_db.get_entry('NAT_GLOBAL', 'Values') + if global_data: + print "" + if 'admin_mode' in global_data: + print "Admin Mode :", global_data['admin_mode'] + else: + print "Admin Mode : disabled" + if 'nat_timeout' in global_data: + print "Global Timeout :", global_data['nat_timeout'], "secs" + else: + print "Global Timeout : 600 secs" + if 'nat_tcp_timeout' in global_data: + print "TCP Timeout :", global_data['nat_tcp_timeout'], "secs" + else: + print "TCP Timeout : 86400 secs" + if 'nat_udp_timeout' in global_data: + print "UDP Timeout :", global_data['nat_udp_timeout'], "secs" + else: + print "UDP Timeout : 300 secs" + print "" + else: + print "" + print "Admin Mode : disabled" + print "Global Timeout : 600 secs" + print "TCP Timeout : 86400 secs" + print "UDP Timeout : 300 secs" + print "" + return + + def display_nat_zone(self): + """ + Display the nat zone + """ + + HEADER = ['Port', 'Zone'] + output = [] + + for nat in self.nat_zone_data: + output.append([nat[0], nat[1]]) + + print "" + print tabulate(output, HEADER) + print "" + +def main(): + parser = argparse.ArgumentParser(description='Display the nat configuration information', + formatter_class=argparse.RawTextHelpFormatter, + epilog=""" + Examples: + natconfig -s + natconfig -p + natconfig -b + natconfig -g + natconfig -z + """) + + parser.add_argument('-s', '--static', action='store_true', help='Show the nat static configuration') + parser.add_argument('-p', '--pool', action='store_true', help='Show the nat pool configuration') + parser.add_argument('-b', '--binding', action='store_true', help='Show the nat binding configuration') + parser.add_argument('-g', '--globalvalues', action='store_true', help='Show the nat global configuration') + parser.add_argument('-z', '--zones', action='store_true', help='Show the nat zone configuration') + + args = parser.parse_args() + + show_static = args.static + show_pool = args.pool + show_binding = args.binding + show_global = args.globalvalues + show_zone = args.zones + + try: + nat = NatConfig() + if show_static: + nat.fetch_static_nat() + nat.fetch_static_napt() + nat.display_static() + elif show_pool: + nat.fetch_pool() + nat.display_pool() + elif show_binding: + nat.fetch_binding() + nat.display_binding() + elif show_global: + nat.display_global() + elif show_zone: + nat.fetch_nat_zone() + nat.display_nat_zone() + except Exception as e: + print e.message + sys.exit(1) + +if __name__ == "__main__": + main() + diff --git a/scripts/natshow b/scripts/natshow new file mode 100644 index 0000000000..d0dc753702 --- /dev/null +++ b/scripts/natshow @@ -0,0 +1,418 @@ +#!/usr/bin/python + +""" + Script to show nat entries and nat statistics in a summary view + + Example of the output: + root@sonic:/home/admin# sudo natshow -c + + Static NAT Entries ..................... 2 + Static NAPT Entries ..................... 3 + Dynamic NAT Entries ..................... 0 + Dynamic NAPT Entries ..................... 0 + Static Twice NAT Entries ..................... 0 + Static Twice NAPT Entries ..................... 2 + Dynamic Twice NAT Entries ..................... 0 + Dynamic Twice NAPT Entries ..................... 0 + Total SNAT/SNAPT Entries ..................... 7 + Total DNAT/DNAPT Entries ..................... 0 + Total Entries ..................... 7 + + root@sonic:/home/admin# sudo natshow -t + + Static NAT Entries ..................... 2 + Static NAPT Entries ..................... 3 + Dynamic NAT Entries ..................... 0 + Dynamic NAPT Entries ..................... 0 + Static Twice NAT Entries ..................... 0 + Static Twice NAPT Entries ..................... 2 + Dynamic Twice NAT Entries ..................... 0 + Dynamic Twice NAPT Entries ..................... 0 + Total SNAT/SNAPT Entries ..................... 7 + Total DNAT/DNAPT Entries ..................... 0 + Total Entries ..................... 7 + + Protocol Source Destination Translated Source Translated Destination + -------- ----------------- ------------------ ------------------ ---------------------- + all 10.0.0.1 --- 65.55.45.5 --- + all 10.0.0.2 --- 65.55.45.6 --- + tcp 20.0.0.1:4500 --- 65.55.45.7:2000 --- + udp 20.0.0.1:4000 --- 65.55.45.7:1030 --- + tcp 20.0.0.1:6000 --- 65.55.45.7:1024 --- + udp 20.0.0.1:7000 65.55.45.8:1200 65.55.45.7:1100 20.0.0.2:8000 + tcp 20.0.0.1:6000 65.55.45.8:1500 65.55.45.7:1300 20.0.0.3:9000 + + root@sonic:/home/admin# sudo natshow -s + + Protocol Source Destination Packets Bytes + -------- ----------------- ------------------ -------- --------- + all 10.0.0.1 --- 802 1009280 + all 10.0.0.2 --- 23 5590 + tcp 20.0.0.1:4500 --- 110 12460 + udp 20.0.0.1:4000 --- 1156 789028 + tcp 20.0.0.1:6000 --- 30 34800 + udp 20.0.0.1:7000 65.55.45.8:1200 128 110204 + tcp 20.0.0.1:6000 65.55.45.8:1500 8 3806 + +""" + +import argparse +import json +import sys +import re + +from natsort import natsorted +from swsssdk import SonicV2Connector +from tabulate import tabulate + +class NatShow(object): + + def __init__(self): + super(NatShow,self).__init__() + self.asic_db = SonicV2Connector(host="127.0.0.1") + self.appl_db = SonicV2Connector(host="127.0.0.1") + self.counters_db = SonicV2Connector(host="127.0.0.1") + return + + def fetch_count(self): + """ + Fetch NAT entries count from COUNTERS DB. + """ + self.counters_db.connect(self.counters_db.COUNTERS_DB) + self.static_nat_entries = 0 + self.dynamic_nat_entries = 0 + self.static_napt_entries = 0 + self.dynamic_napt_entries = 0 + self.static_twice_nat_entries = 0 + self.dynamic_twice_nat_entries = 0 + self.static_twice_napt_entries = 0 + self.dynamic_twice_napt_entries = 0 + self.snat_entries = 0 + self.dnat_entries = 0 + + + exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if exists: + counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_GLOBAL_NAT:Values') + if 'STATIC_NAT_ENTRIES' in counter_entry: + self.static_nat_entries = counter_entry['STATIC_NAT_ENTRIES'] + if 'DYNAMIC_NAT_ENTRIES' in counter_entry: + self.dynamic_nat_entries = counter_entry['DYNAMIC_NAT_ENTRIES'] + if 'STATIC_NAPT_ENTRIES' in counter_entry: + self.static_napt_entries = counter_entry['STATIC_NAPT_ENTRIES'] + if 'DYNAMIC_NAPT_ENTRIES' in counter_entry: + self.dynamic_napt_entries = counter_entry['DYNAMIC_NAPT_ENTRIES'] + if 'STATIC_TWICE_NAT_ENTRIES' in counter_entry: + self.static_twice_nat_entries = counter_entry['STATIC_TWICE_NAT_ENTRIES'] + if 'DYNAMIC_TWICE_NAT_ENTRIES' in counter_entry: + self.dynamic_twice_nat_entries = counter_entry['DYNAMIC_TWICE_NAT_ENTRIES'] + if 'STATIC_TWICE_NAPT_ENTRIES' in counter_entry: + self.static_twice_napt_entries = counter_entry['STATIC_TWICE_NAPT_ENTRIES'] + if 'DYNAMIC_TWICE_NAPT_ENTRIES' in counter_entry: + self.dynamic_twice_napt_entries = counter_entry['DYNAMIC_TWICE_NAPT_ENTRIES'] + if 'SNAT_ENTRIES' in counter_entry: + self.snat_entries = counter_entry['SNAT_ENTRIES'] + if 'DNAT_ENTRIES' in counter_entry: + self.dnat_entries = counter_entry['DNAT_ENTRIES'] + + def fetch_translations(self): + """ + Fetch NAT entries from ASIC DB. + """ + self.asic_db.connect(self.asic_db.ASIC_DB) + self.nat_entries_list = [] + + nat_str = self.asic_db.keys('ASIC_DB', "ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY:*") + if not nat_str: + return + + for s in nat_str: + nat_entry = s.decode() + nat = json.loads(nat_entry .split(":", 2)[-1]) + if not nat: + continue + + ip_protocol = "all" + source = "---" + destination = "---" + translated_dst = "---" + translated_src = "---" + + ent = self.asic_db.get_all('ASIC_DB', s, blocking=True) + + nat_type = ent[b"SAI_NAT_ENTRY_ATTR_NAT_TYPE"] + + if nat_type == "SAI_NAT_TYPE_DESTINATION_NAT": + translated_dst_ip = ent[b"SAI_NAT_ENTRY_ATTR_DST_IP"] + if "SAI_NAT_ENTRY_ATTR_L4_DST_PORT" in ent: + translated_dst_port = ent[b"SAI_NAT_ENTRY_ATTR_L4_DST_PORT"] + translated_dst = translated_dst_ip + ":" + translated_dst_port + else: + translated_dst = translated_dst_ip + elif nat_type == "SAI_NAT_TYPE_SOURCE_NAT": + translated_src_ip = ent[b"SAI_NAT_ENTRY_ATTR_SRC_IP"] + if "SAI_NAT_ENTRY_ATTR_L4_SRC_PORT" in ent: + translated_src_port = ent[b"SAI_NAT_ENTRY_ATTR_L4_SRC_PORT"] + translated_src = translated_src_ip + ":" + translated_src_port + else: + translated_src = translated_src_ip + elif nat_type == "SAI_NAT_TYPE_DOUBLE_NAT": + translated_dst_ip = ent[b"SAI_NAT_ENTRY_ATTR_DST_IP"] + if "SAI_NAT_ENTRY_ATTR_L4_DST_PORT" in ent: + translated_dst_port = ent[b"SAI_NAT_ENTRY_ATTR_L4_DST_PORT"] + translated_dst = translated_dst_ip + ":" + translated_dst_port + else: + translated_dst = translated_dst_ip + + translated_src_ip = ent[b"SAI_NAT_ENTRY_ATTR_SRC_IP"] + if "SAI_NAT_ENTRY_ATTR_L4_SRC_PORT" in ent: + translated_src_port = ent[b"SAI_NAT_ENTRY_ATTR_L4_SRC_PORT"] + translated_src = translated_src_ip + ":" + translated_src_port + else: + translated_src = translated_src_ip + + source_ip = nat['nat_data']['key']["src_ip"] + destination_ip = nat['nat_data']['key']["dst_ip"] + source_port = nat['nat_data']['key']["l4_src_port"] + destination_port = nat['nat_data']['key']["l4_dst_port"] + protocol = nat['nat_data']['key']["proto"] + + if (source_ip == "0.0.0.0"): + source_ip = "---" + + if (destination_ip == "0.0.0.0"): + destination_ip = "---" + + if (source_port != "0"): + source = source_ip + ":" + source_port + else: + source = source_ip + + if (destination_port != "0"): + destination = destination_ip + ":" + destination_port + else: + destination = destination_ip + + if (protocol == "6"): + ip_protocol = "tcp" + elif (protocol == "17"): + ip_protocol = "udp" + + self.nat_entries_list.append((ip_protocol,) + (source,) + (destination,) + (translated_src,) + (translated_dst,)) + + self.nat_entries_list.sort(key = lambda x: x[0]) + return + + def fetch_statistics(self): + """ + Fetch NAT statistics from Counters DB. + """ + self.appl_db.connect(self.appl_db.APPL_DB) + self.counters_db.connect(self.counters_db.COUNTERS_DB) + self.nat_statistics_list = [] + + nat_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, "NAT_TABLE:*") + if nat_table_keys: + for i in nat_table_keys: + nat_entry = re.split(':', i, maxsplit=1)[-1].strip() + if nat_entry: + exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_NAT:{}'.format(nat_entry)) + + if not exists: + continue + + nat_keys = re.split(':', nat_entry) + nat_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAT_TABLE:{}'.format(nat_entry)) + + ip_protocol = "all" + source = "---" + destination = "---" + + if nat_values['nat_type'] == "snat": + source = nat_keys[0] + else: + destination = nat_keys[0] + + counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_NAT:{}'.format(nat_entry)) + packets = counter_entry['NAT_TRANSLATIONS_PKTS'] + byte = counter_entry['NAT_TRANSLATIONS_BYTES'] + + self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,)) + + napt_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, "NAPT_TABLE:*") + if napt_table_keys: + for i in napt_table_keys: + napt_entry = re.split(':', i, maxsplit=1)[-1].strip() + if napt_entry: + exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_NAPT:{}'.format(napt_entry)) + + if not exists: + continue + + napt_keys = re.split(':', napt_entry) + napt_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAPT_TABLE:{}'.format(napt_entry)) + + ip_protocol = napt_keys[0] + source = "---" + destination = "---" + + if napt_values['nat_type'] == "snat": + source = napt_keys[1] + ':' + napt_keys[2] + else: + destination = napt_keys[1] + ':' + napt_keys[2] + + counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_NAPT:{}'.format(napt_entry)) + packets = counter_entry['NAT_TRANSLATIONS_PKTS'] + byte = counter_entry['NAT_TRANSLATIONS_BYTES'] + + self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,)) + + nat_twice_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, "NAT_TWICE_TABLE:*") + if nat_twice_table_keys: + for i in nat_twice_table_keys: + nat_twice_entry = re.split(':', i, maxsplit=1)[-1].strip() + if nat_twice_entry: + exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAT:{}'.format(nat_twice_entry)) + + if not exists: + continue + + nat_twice_keys = re.split(':', nat_twice_entry) + nat_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAT_TWICE_TABLE:{}'.format(nat_twice_entry)) + + ip_protocol = "all" + source = "---" + destination = "---" + + source = nat_twice_keys[0] + destination = nat_twice_keys[1] + + counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAT:{}'.format(nat_twice_entry)) + packets = counter_entry['NAT_TRANSLATIONS_PKTS'] + byte = counter_entry['NAT_TRANSLATIONS_BYTES'] + + self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,)) + + napt_twice_table_keys = self.appl_db.keys(self.appl_db.APPL_DB, "NAPT_TWICE_TABLE:*") + if napt_twice_table_keys: + for i in napt_twice_table_keys: + napt_twice_entry = re.split(':', i, maxsplit=1)[-1].strip() + if napt_twice_entry: + exists = self.counters_db.exists(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAPT:{}'.format(napt_twice_entry)) + + if not exists: + continue + + napt_twice_keys = re.split(':', napt_twice_entry) + napt_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAPT_TWICE_TABLE:{}'.format(napt_twice_entry)) + + ip_protocol = napt_twice_keys[0] + source = "---" + destination = "---" + + source = napt_twice_keys[1] + ':' + napt_twice_keys[2] + destination = napt_twice_keys[3] + ':' + napt_twice_keys[4] + + counter_entry = self.counters_db.get_all(self.counters_db.COUNTERS_DB, 'COUNTERS_TWICE_NAPT:{}'.format(napt_twice_entry)) + packets = counter_entry['NAT_TRANSLATIONS_PKTS'] + byte = counter_entry['NAT_TRANSLATIONS_BYTES'] + + self.nat_statistics_list.append((ip_protocol,) + (source,) + (destination,) + (packets,) + (byte,)) + + self.nat_statistics_list.sort(key = lambda x: x[0]) + return + + def display_count(self): + """ + Display the nat entries count + """ + + totalEntries = int(self.static_nat_entries) + int(self.dynamic_nat_entries) + int(self.static_napt_entries) + int(self.dynamic_napt_entries) + totalEntries += (int(self.static_twice_nat_entries) + int(self.dynamic_twice_nat_entries) + int(self.static_twice_napt_entries) + int(self.dynamic_twice_napt_entries)) + + print "" + print "Static NAT Entries ..................... {}".format(self.static_nat_entries) + print "Static NAPT Entries ..................... {}".format(self.static_napt_entries) + print "Dynamic NAT Entries ..................... {}".format(self.dynamic_nat_entries) + print "Dynamic NAPT Entries ..................... {}".format(self.dynamic_napt_entries) + print "Static Twice NAT Entries ..................... {}".format(self.static_twice_nat_entries) + print "Static Twice NAPT Entries ..................... {}".format(self.static_twice_napt_entries) + print "Dynamic Twice NAT Entries ..................... {}".format(self.dynamic_twice_nat_entries) + print "Dynamic Twice NAPT Entries ..................... {}".format(self.dynamic_twice_napt_entries) + print "Total SNAT/SNAPT Entries ..................... {}".format(self.snat_entries) + print "Total DNAT/DNAPT Entries ..................... {}".format(self.dnat_entries) + print "Total Entries ..................... {}".format(totalEntries) + print "" + + def display_translations(self): + """ + Display the nat transactions + """ + + HEADER = ['Protocol', 'Source', 'Destination', 'Translated Source', 'Translated Destination'] + output = [] + + for nat in self.nat_entries_list: + output.append([nat[0], nat[1], nat[2], nat[3], nat[4]]) + + print tabulate(output, HEADER) + print "" + + def display_statistics(self): + """ + Display the nat statistics + """ + + HEADER = ['Protocol', 'Source', 'Destination', 'Packets', 'Bytes'] + output = [] + + for nat in self.nat_statistics_list: + output.append([nat[0], nat[1], nat[2], nat[3], nat[4]]) + + print "" + print tabulate(output, HEADER) + print "" + +def main(): + parser = argparse.ArgumentParser(description='Display the nat information', + formatter_class=argparse.RawTextHelpFormatter, + epilog=""" + Examples: + natshow -t + natshow -s + natshow -c + """) + + parser.add_argument('-t', '--translations', action='store_true', help='Show the nat translations') + parser.add_argument('-s', '--statistics', action='store_true', help='Show the nat statistics') + parser.add_argument('-c', '--count', action='store_true', help='Show the nat translations count') + + args = parser.parse_args() + + show_translations = args.translations + show_statistics = args.statistics + show_count = args.count + + try: + if show_translations: + nat = NatShow() + nat.fetch_count() + nat.fetch_translations() + nat.display_count() + nat.display_translations() + elif show_statistics: + nat = NatShow() + nat.fetch_statistics() + nat.display_statistics() + elif show_count: + nat = NatShow() + nat.fetch_count() + nat.display_count() + + except Exception as e: + print e.message + sys.exit(1) + +if __name__ == "__main__": + main() + diff --git a/scripts/nbrshow b/scripts/nbrshow index 18a49f8eac..7438933fc3 100644 --- a/scripts/nbrshow +++ b/scripts/nbrshow @@ -84,11 +84,18 @@ class NbrBase(object): if br_port_id not in self.if_br_oid_map: continue port_id = self.if_br_oid_map[br_port_id] - if_name = self.if_oid_map[port_id] + if port_id in self.if_oid_map: + if_name = self.if_oid_map[port_id] + else: + if_name = port_id if 'vlan' in fdb: vlan_id = fdb["vlan"] elif 'bvid' in fdb: - vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) + try: + vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) + except: + vlan_id = fdb["bvid"] + print "Failed to get Vlan id for bvid {}\n".format(fdb["bvid"]) self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,)) return diff --git a/scripts/neighbor_advertiser b/scripts/neighbor_advertiser index 6ebd8808de..66213239a3 100644 --- a/scripts/neighbor_advertiser +++ b/scripts/neighbor_advertiser @@ -21,7 +21,6 @@ import sonic_device_util from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector from netaddr import IPAddress, IPNetwork -from requests.exceptions import ConnectTimeout # @@ -340,51 +339,41 @@ def construct_neighbor_advertiser_slice(): return slice_obj -def wrapped_ferret_request(request_slice, https_endpoint, http_endpoint): - """ - Attempts to reach ferret by first trying HTTPS, failing over to HTTP in - case of failure (e.g. timeout, endpoint not found, etc.). - """ + +def wrapped_ferret_request(request_slice, https_endpoint): response = None # NOTE: While we transition to HTTPS we're disabling the verify field. We # need to add a way to fetch certificates in this script ASAP. - try: - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - response = requests.post(https_endpoint, - json=request_slice, - timeout=DEFAULT_REQUEST_TIMEOUT, - verify=False) - except ConnectTimeout: - log_info("HTTPS Ferret endpoint not found, trying HTTP...") - response = requests.post(http_endpoint, + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + response = requests.post(https_endpoint, json=request_slice, - timeout=DEFAULT_REQUEST_TIMEOUT) + timeout=DEFAULT_REQUEST_TIMEOUT, + verify=False) + + if not response: + raise RuntimeError("No response obtained from HTTPS endpoint") + + # If the request is unsuccessful (e.g. has a non 2xx response code), + # we'll consider it failed + response.raise_for_status() return response + def post_neighbor_advertiser_slice(ferret_service_vip): request_slice = construct_neighbor_advertiser_slice() save_as_json(request_slice, NEIGHBOR_ADVERTISER_REQUEST_SLICE_PATH) - https_endpoint = 'https://{}:448{}{}'.format(ferret_service_vip, FERRET_NEIGHBOR_ADVERTISER_API_PREFIX, get_switch_name()) - http_endpoint = 'http://{}:85{}{}'.format(ferret_service_vip, FERRET_NEIGHBOR_ADVERTISER_API_PREFIX, get_switch_name()) + https_endpoint = "https://{}:448{}{}".format(ferret_service_vip, FERRET_NEIGHBOR_ADVERTISER_API_PREFIX, get_switch_name()) response = None for retry in range(DEFAULT_FERRET_QUERY_RETRIES): try: - response = wrapped_ferret_request(request_slice, https_endpoint, http_endpoint) + response = wrapped_ferret_request(request_slice, https_endpoint) except Exception as e: - log_error('The request failed, vip: {}, error: {}'.format(ferret_service_vip, e)) - return None - - # Handle response errors - if not response: - log_error('Failed to set up neighbor advertiser slice, vip: {}, no response obtained'.format(ferret_service_vip)) - return None - if response and not response.ok: - log_error('Failed to set up neighbor advertiser slice, vip: {}, error_code: {}, error_content: {}'.format(ferret_service_vip, response.status_code, response.content)) + log_error("The request failed, vip: {}, error: {}".format(ferret_service_vip, e)) return None neighbor_advertiser_configuration = json.loads(response.content) @@ -392,15 +381,15 @@ def post_neighbor_advertiser_slice(ferret_service_vip): # Retry the request if the provided DIP is in the device VLAN if is_dip_in_device_vlan(ferret_server_ipv4_addr): - log_info('Failed to set up neighbor advertiser slice, vip: {}, dip {} is in device VLAN (attempt {}/{})'.format(ferret_service_vip, ferret_server_ipv4_addr, retry + 1, DEFAULT_FERRET_QUERY_RETRIES)) + log_info("Failed to set up neighbor advertiser slice, vip: {}, dip {} is in device VLAN (attempt {}/{})".format(ferret_service_vip, ferret_server_ipv4_addr, retry + 1, DEFAULT_FERRET_QUERY_RETRIES)) continue # If all the proceeding checks pass, return the provided DIP save_as_json(neighbor_advertiser_configuration, NEIGHBOR_ADVERTISER_RESPONSE_CONFIG_PATH) - log_info('Successfully set up neighbor advertiser slice, vip: {}, dip: {}'.format(ferret_service_vip, ferret_server_ipv4_addr)) + log_info("Successfully set up neighbor advertiser slice, vip: {}, dip: {}".format(ferret_service_vip, ferret_server_ipv4_addr)) return ferret_server_ipv4_addr - log_error('Failed to set up neighbor advertiser slice, vip: {}, returned dips were in device VLAN'.format(ferret_service_vip)) + log_error("Failed to set up neighbor advertiser slice, vip: {}, returned dips were in device VLAN".format(ferret_service_vip)) return None diff --git a/scripts/portstat b/scripts/portstat index 889ce72086..b628f564d8 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -155,9 +155,9 @@ class Portstat(object): if use_json: - table_as_json(table, header_all if print_all else header) + print table_as_json(table, header_all if print_all else header) else: - print tabulate(table, header_all, tablefmt='simple', stralign='right') # if print_all else header + print tabulate(table, header_all if print_all else header, tablefmt='simple', stralign='right') def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, use_json, print_all): """ @@ -208,7 +208,7 @@ class Portstat(object): STATUS_NA, cntr.tx_err, cntr.tx_drop, - cntr.tx_err)) + cntr.tx_ovr)) else: if old_cntr is not None: table.append((key, self.get_port_state(key), @@ -237,7 +237,7 @@ class Portstat(object): STATUS_NA, cntr.tx_err, cntr.tx_drop, - cntr.tx_err)) + cntr.tx_ovr)) if use_json: print table_as_json(table, header) diff --git a/scripts/reboot b/scripts/reboot index c495639f8a..131aba24ec 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -1,5 +1,12 @@ #!/bin/bash +# Reboot immediately if we run the kdump capture kernel +VMCORE_FILE=/proc/vmcore +if [ -e $VMCORE_FILE -a -s $VMCORE_FILE ]; then + debug "We have a /proc/vmcore, then we just kdump'ed" + /sbin/reboot +fi + REBOOT_USER=$(logname) REBOOT_TIME=$(date) PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) diff --git a/scripts/sonic-kdump-config b/scripts/sonic-kdump-config new file mode 100755 index 0000000000..b7dee8a265 --- /dev/null +++ b/scripts/sonic-kdump-config @@ -0,0 +1,632 @@ +#!/usr/bin/python +''' +Copyright 2019 Broadcom. The term "Broadcom" refers to Broadcom Inc. +and/or its subsidiaries. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +''' + +import sys +import argparse +import shlex +from argparse import RawTextHelpFormatter +import os +import subprocess +import errno +from swsssdk import ConfigDBConnector + +grub_cfg = "/host/grub/grub.cfg" +kdump_cfg = "/etc/default/kdump-tools" + +## Same as print(), but output to stderr instead of stdout +def print_err(*args): + sys.stderr.write(' '.join(map(str,args)) + '\n') + +## Run an external command, either from the shell or not +# The function capture the output of stdout and stderr, +# and return then a tupple with exit code, stdout, stderr +# +# @param cmd Command to execute (full path needed ig not using the shell) +def run_command(cmd, use_shell=False): + '''! + Execute a given command + + @param cmd (str) Command to execute. Since we execute the command directly, and not within the + context of the shell, the full path needs to be provided ($PATH is not used). + Command parameters are simply separated by a space. + Should be either string or a list + + @param use_shell (bool) Execute subprocess with shell access + ''' + + pid = None + try: + if isinstance(cmd, list): + if use_shell is False: + shcmd = cmd + else: + shcmd = '' + for c in cmd: + shcmd += c + ' ' + else: + if use_shell is False: + shcmd = shlex.split(cmd) + else: + shcmd = cmd + proc = subprocess.Popen(shcmd, shell=use_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, close_fds=True) + output_stdout, output_stderr = proc.communicate() + list_stdout = [] + for l in output_stdout.splitlines(): + list_stdout.append(str(l.decode())) + list_stderr = [] + for l in output_stderr.splitlines(): + list_stderr.append(str(l.decode())) + return (proc.returncode, list_stdout, list_stderr) + except (OSError, ValueError) as e: + print("!Exception [%s] encountered while processing the command : %s" % (str(e), str(cmd))) + return (1, None, None) + +## Search which SONiC image is the Current image +def get_current_image(): + (rc, img, err_str) = run_command("sonic_installer list | grep 'Current: ' | cut -d '-' -f 3-", use_shell=True); + if type(img) == list and len(img) == 1: + return img[0] + print_err("Unable to locate current SONiC image") + sys.exit(1) + +## Search which SONiC image is the Next image +def get_next_image(): + (rc, img, err_str) = run_command("sonic_installer list | grep 'Next: ' | cut -d '-' -f 3-", use_shell=True); + if type(img) == list and len(img) == 1: + return img[0] + print_err("Unable to locate current SONiC image") + sys.exit(1) + +## Search for Current/Next SONiC image in grub configuration +# +# @param lines Lines read from grub.cfg file +# @param img String we are looking for ("loop=image...") +# @return Index in lines array wehere we found the string +def locate_image(lines, img): + for num in range(len(lines)): + try: + lines[num].index(img) + return num + except Exception as exception: + pass + return -1 + +## Rewrite grub configuration file +# +# @param lines Lines read from grub.cfg file +# @param fname Grub configuration file +def rewrite_grub_cfg(lines, fname): + fd = open(fname, "w") + for x in lines: + fd.writelines(x+'\n') + fd.close() + +## Search for "crashkernel=X" in string +# +# @param where String should be in the form "crashkernel=X", X being a string +# @return The value X as a string +def search_for_crash_kernel(where): + expected_str = ' crashkernel=' + p = where.find(expected_str) + if p == -1: + return None + next_space = where.find(" ", p+1) + if next_space == -1: + return where[p+len(expected_str):] + else: + return where[p+len(expected_str):next_space] + +## Search for "crashkernel=X" in /proc/cmdline +# +# @return Return the X from "crashkernel=X" in /proc/cmdline +# None in case "crashkernel=" is not found +def search_for_crash_kernel_in_cmdline(): + try: + cmdline = [line.rstrip('\n') for line in open("/proc/cmdline")] + except Exception as exception: + print_err(exception) + sys.exit(1) + return search_for_crash_kernel(cmdline[0]) + +## Query current configuration to check if kdump is enabled or disabled +# +# @return True if kdump is enable, False if kdump is not enabled +# We read the running configuration to check if kdump is enabled or not +def get_kdump_administrative_mode(): + kdump_is_enabled = False + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + table_data = config_db.get_table('KDUMP') + if table_data is not None: + config_data = table_data.get('config') + if config_data is not None: + is_enabled = config_data.get('enabled') + if is_enabled and is_enabled.lower() == 'true': + kdump_is_enabled = True + if kdump_is_enabled: + return True + else: + return False + +## Query current configuration for kdump memory +# +# @return The current memory string used for kdump (read from running configuration) +def get_kdump_memory(): + (rc, lines, err_str) = run_command("/usr/bin/show kdump memory", use_shell=False) + try: + if rc == 0 and len(lines) == 1: + p = lines[0].find(': ') + if p != -1: + #print('XXX') + #print(lines[0][p+2:]) + #print('XXX') + return lines[0][p+2:] + except: + pass + return "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" + +## Query current configuration for kdump num_dumps +# +# @return The maximum number of kernel dump files stored locally +# (read from running configuration) +def get_kdump_num_dumps(): + (rc, lines, err_str) = run_command("/usr/bin/show kdump num_dumps", use_shell=False) + try: + if rc == 0 and len(lines) == 1: + p = lines[0].find(': ') + if p != -1: + return int(lines[0][p+2:]) + except: + pass + return 3 + +## Read current value for USE_KDUMP in kdump config file +# +# @return The integer value X from USE_KDUMP=X in /etc/default/kdump-tools +def read_use_kdump(): + (rc, lines, err_str) = run_command("grep 'USE_KDUMP=.*' %s | cut -d = -f 2" % kdump_cfg, use_shell=True); + if rc == 0 and type(lines) == list and len(lines) >= 1: + try: + return int(lines[0]) + except Exception as e: + print('Error! Exception[%s] occured while reading from %s' %(str(e), kdump_cfg)) + sys.exit(1) + else: + print_err("Unable to read USE_KDUMP from %s" % kdump_cfg) + sys.exit(1) + +## Rewrite value for USE_KDUMP in kdump config file /etc/default/kdump-tools +# +# @param use_kdump 0 or 1 +def write_use_kdump(use_kdump): + (rc, lines, err_str) = run_command("/bin/sed -i -e 's/USE_KDUMP=.*/USE_KDUMP=%s/' %s" % (use_kdump, kdump_cfg), use_shell=False); + if rc == 0 and type(lines) == list and len(lines) == 0: + use_kdump_in_cfg = read_use_kdump() + if use_kdump_in_cfg != use_kdump: + print_err("Unable to write USE_KDUMP into %s" % kdump_cfg) + sys.exit(1) + else: + print_err("Error while writing USE_KDUMP into %s" % kdump_cfg) + sys.exit(1) + +## Read current value for KDUMP_NUM_DUMPS in kdump config file +# +# @return The integer value X from KDUMP_NUM_DUMPS=X in /etc/default/kdump-tools +def read_num_dumps(): + (rc, lines, err_str) = run_command("grep '#*KDUMP_NUM_DUMPS=.*' %s | cut -d = -f 2" % kdump_cfg, use_shell=True); + if rc == 0 and type(lines) == list and len(lines) >= 1: + try: + return int(lines[0]) + except Exception as e: + print_err('Error! Exception[%s] occured while reading from %s' %(str(e), kdump_cfg)) + sys.exit(1) + else: + print_err("Unable to read KDUMP_NUM_DUMPS from %s" % kdump_cfg) + sys.exit(1) + +## Change the value for KDUMP_NUM_DUMPS in kdump config file /etc/default/kdump-tools +# +# #param num_dumps Integer value for new value +def write_num_dumps(num_dumps): + (rc, lines, err_str) = run_command("/bin/sed -i -e 's/#*KDUMP_NUM_DUMPS=.*/KDUMP_NUM_DUMPS=%d/' %s" % (num_dumps, kdump_cfg), use_shell=False); + if rc == 0 and type(lines) == list and len(lines) == 0: + num_dumps_in_cfg = read_num_dumps() + if num_dumps_in_cfg != num_dumps: + print_err("Unable to write KDUMP_NUM_DUMPS into %s" % kdump_cfg) + sys.exit(1) + else: + print_err("Error while writing KDUMP_NUM_DUMPS into %s" % kdump_cfg) + sys.exit(1) + +## Command: Enable kdump - Grub mode +# +# @param verbose If True, the function will display a few additinal information +# @return True is the grub configuration has changed, and False if it has not +def kdump_enable_grub(verbose, kdump_enabled, memory, num_dumps): + + current_img = get_current_image(); + if verbose: + print("Current image=[%s]" % current_img) + try: + lines = [line.rstrip('\n') for line in open(grub_cfg)] + except Exception as exception: + print_err(exception) + sys.exit(1) + current_img_index = locate_image(lines, "loop=image-"+current_img) + if verbose: + print("Image index in grub.cfg=%d" % current_img_index) + + changed = False + crash_kernel_in_cmdline = search_for_crash_kernel_in_cmdline() + if verbose: + print("crash_kernel_in_cmdline=[%s]" % crash_kernel_in_cmdline) + curr_crash_kernel_mem = search_for_crash_kernel(lines[current_img_index]) + if verbose: + print("curr_crash_kernel_mem=[%s]" % curr_crash_kernel_mem) + if curr_crash_kernel_mem == None: + lines[current_img_index] += " crashkernel=%s" % memory + changed = True + if verbose: + print("Added to grub.cfg: [ crashkernel=%s ]" % memory) + else: + if curr_crash_kernel_mem == memory: + if curr_crash_kernel_mem == crash_kernel_in_cmdline: + print("kdump is already enabled") + else: + changed = True + else: + lines[current_img_index] = lines[current_img_index].replace(curr_crash_kernel_mem, memory) + changed = True + if verbose: + print("Replace [%s] with [%s] in grub.cfg" % (curr_crash_kernel_mem, memory)) + + if changed: + rewrite_grub_cfg(lines, grub_cfg) + + write_use_kdump(1) + + return changed + +## Command: Enable kdump +# +# @param verbose If True, the function will display a few additinal information +# @return True is the grub configuration has changed, and False if it has not +def cmd_kdump_enable(verbose): + + kdump_enabled = get_kdump_administrative_mode() + memory = get_kdump_memory() + num_dumps = get_kdump_num_dumps() + if verbose: + print("configDB: kdump_enabled=%d memory=[%s] num_nums=%d" % (kdump_enabled, memory, num_dumps)) + + if os.path.exists(grub_cfg): + return kdump_enable_grub(verbose, kdump_enabled, memory, num_dumps) + else: + print("Feature not supported on this platform") + run_command("config kdump disable", use_shell=False); + return False + +## Command: Enable kdump on Next image only - Grub mode +# +# @param verbose If True, the function will display a few additional information +# @return True is the grub configuration has changed, and False if it has not +def kdump_config_next_grub(verbose, kdump_enabled, memory, num_dumps): + next_img = get_next_image(); + if verbose: + print("Next image=[%s]" % next_img) + try: + lines = [line.rstrip('\n') for line in open(grub_cfg)] + except Exception as exception: + print_err(exception) + sys.exit(1) + next_img_index = locate_image(lines, "loop=image-"+next_img) + if verbose: + print("Image index in grub.cfg=%d" % next_img_index) + + changed = False + crash_kernel_in_cmdline = search_for_crash_kernel_in_cmdline() + if verbose: + print("crash_kernel_in_cmdline=[%s]" % crash_kernel_in_cmdline) + curr_crash_kernel_mem = search_for_crash_kernel(lines[next_img_index]) + if verbose: + print("curr_crash_kernel_mem=[%s]" % curr_crash_kernel_mem) + if curr_crash_kernel_mem == None: + lines[next_img_index] += " crashkernel=%s" % memory + changed = True + if verbose: + print("Added to grub.cfg: [ crashkernel=%s ]" % memory) + else: + if curr_crash_kernel_mem == memory: + if curr_crash_kernel_mem == crash_kernel_in_cmdline: + print("kdump is already enabled") + else: + changed = True + else: + lines[next_img_index] = lines[next_img_index].replace(curr_crash_kernel_mem, memory) + changed = True + if verbose: + print("Replace [%s] with [%s] in grub.cfg" % (curr_crash_kernel_mem, memory)) + + if changed: + rewrite_grub_cfg(lines, grub_cfg) + + write_use_kdump(1) + + return changed + +## Command: Enable kdump on Next image only +# +# @param verbose If True, the function will display a few additional information +# @return True is the grub configuration has changed, and False if it has not +def cmd_kdump_config_next(verbose): + + kdump_enabled = get_kdump_administrative_mode() + memory = get_kdump_memory() + num_dumps = get_kdump_num_dumps() + if verbose: + print("configDB: kdump_enabled=%d memory=[%s] num_nums=%d" % (kdump_enabled, memory, num_dumps)) + + if os.path.exists(grub_cfg): + return kdump_config_next_grub(verbose, kdump_enabled, memory, num_dumps) + else: + return False + +## Command: Disable kdump - Grub mode +# +# @param verbose If True, the function will display a few additional information +def kdump_disable_grub(verbose, kdump_enabled, memory, num_dumps): + write_use_kdump(0) + + current_img = get_current_image(); + if verbose: + print("Current image=[%s]\n" % current_img) + lines = [line.rstrip('\n') for line in open(grub_cfg)] + current_img_index = locate_image(lines, "loop=image-"+current_img) + + changed = False + curr_crash_kernel_mem = search_for_crash_kernel(lines[current_img_index]) + if curr_crash_kernel_mem == None: + print("kdump is already disabled") + else: + lines[current_img_index] = lines[current_img_index].replace("crashkernel="+curr_crash_kernel_mem, "") + changed = True + if verbose: + print("Removed [%s] in grub.cfg" % ("crashkernel="+curr_crash_kernel_mem)) + + if changed: + rewrite_grub_cfg(lines, grub_cfg) + + +## Command: Disable kdump +# +# @param verbose If True, the function will display a few additional information +def cmd_kdump_disable(verbose): + + kdump_enabled = get_kdump_administrative_mode() + memory = get_kdump_memory() + num_dumps = get_kdump_num_dumps() + if verbose: + print("configDB: kdump_enabled=%d memory=[%s] num_nums=%d" % (kdump_enabled, memory, num_dumps)) + + if os.path.exists(grub_cfg): + return kdump_disable_grub(verbose, kdump_enabled, memory, num_dumps) + else: + return False + +## Command: Set / Get memory +# +# @param verbose If True, the function will display a few additional information +# @param memory If not None, new value to set. +# If None, display current value read from running configuration +def cmd_kdump_memory(verbose, memory): + if memory == None: + (rc, lines, err_str) = run_command("/usr/bin/show kdump memory", use_shell=False); + print('\n'.join(lines)) + else: + use_kdump_in_cfg = read_use_kdump() + if use_kdump_in_cfg: + crash_kernel_in_cmdline = search_for_crash_kernel_in_cmdline() + if memory != crash_kernel_in_cmdline: + cmd_kdump_enable(verbose, False) + print("kdump updated memory will be only operational after the system reboots") + +## Command: Set / Get num_dumps +# +# @param verbose If True, the function will display a few additional information +# @param memory If not None, new value to set. +# If None, display current value read from running configuration +def cmd_kdump_num_dumps(verbose, num_dumps): + if num_dumps == None: + (rc, lines, err_str) = run_command("/usr/bin/show kdump num_dumps", use_shell=False); + print('\n'.join(lines)) + else: + write_num_dumps(num_dumps) + +## Command: Display kdump status +def cmd_kdump_status(): + print 'Kdump Administrative Mode: ', + kdump_enabled = get_kdump_administrative_mode() + if kdump_enabled: + print('Enabled') + else: + print('Disabled') + + print 'Kdump Operational State: ', + (rc, lines, err_str) = run_command("/usr/sbin/kdump-config status", use_shell=False); + if len(lines) >= 1 and ": ready to kdump" in lines[0]: + use_kdump_in_cfg = read_use_kdump() + if use_kdump_in_cfg: + print('Ready') + else: + print('Not Ready') + elif not kdump_enabled: + print('Disabled') + else: + print('Ready after Reboot') + +## Get the current number of kernel dump files stored +# +# @param The number of kdump files stored in /var/crash +def get_nb_dumps_in_var_crash(): + (rc, lines, err_str) = run_command("find /var/crash/ -name 'kdump.*'", use_shell=False); + if rc == 0: + return len(lines) + return 0 + +## Command: Display kdump files +def cmd_kdump_files(): + nb_dumps = get_nb_dumps_in_var_crash() + if nb_dumps == 0: + print("No kernel core dump files") + else: + (rc1, lines1, err_str) = run_command("find /var/crash/ -name 'dmesg.*'", use_shell=False); + lines1.sort(reverse=True) + (rc2, lines2, err_str) = run_command("find /var/crash/ -name 'kdump.*'", use_shell=False); + lines2.sort(reverse=True) + print("Record Key Filename") + print("-------------------------------------------------------------") + for n in range(len(lines1)): + print("%6d %s %s\n %s" % (n+1, lines1[n][11:23], lines1[n], lines2[n])) + +## Command: Display kdump file (kernel log) +# +# @param num_lines Number of last lines displayed +# @param filename Name or index of the kernel log file (dmesg) +def cmd_kdump_file(num_lines, filename): + fname = None + nb_dumps = get_nb_dumps_in_var_crash() + if nb_dumps == 0: + print("There is no kernel core file stored") + else: + (rc, lines, err_str) = run_command("find /var/crash/ -name 'dmesg.*'", use_shell=False); + if rc == 0 and nb_dumps == len(lines): + if filename.isdigit() and len(filename) <= 2: + num = int(filename) + if num < 1 or num > nb_dumps: + if nb_dumps == 1: + print("Invalid record number - Should be 1") + else: + print("Invalid record number - Should be between 1 and %d" % nb_dumps) + sys.exit(1) + fname = lines[num-1] + else: + lines.sort(reverse=True) + for x in lines: + if x.find(filename) != -1: + fname = x + break + if fname == None: + print("Invalid key") + sys.exit(1) + (rc, lines, err_str) = run_command("/usr/bin/tail -n %d %s" % (num_lines, fname), use_shell=False); + if rc == 0: + print('File: %s' % fname) + print('\n'.join(lines)) + +def main(): + + # Only privileged users can execute this command + if os.geteuid() != 0: + sys.exit("Root privileges required for this operation") + + # Add allowed arguments + parser = argparse.ArgumentParser(description="kdump configuration and status tool", + formatter_class=RawTextHelpFormatter) + + # Enable kdump on Current image + parser.add_argument('--enable', action='store_true', + help='Enable kdump (Current image)') + + # Enable kdump on the Next image only + parser.add_argument('--config-next', action='store_true', + help='Enable kdump (Next image)') + + # Disable kdump on Current Image + parser.add_argument('--disable', action='store_true', + help='Disable kdump') + + # kdump status on Current Image + parser.add_argument('--status', action='store_true', + help='Show kdump status') + + # Maximum number of kernel core dumps + parser.add_argument('--num_dumps', nargs='?', type=int, action='store', default=False, + help='Maximum number of kernel dump files stored') + + # Memory allocated for capture kernel on Current Image + parser.add_argument('--memory', nargs='?', type=str, action='store', default=False, + help='Amount of memory reserved for the capture kernel') + + # Capture kernel files + parser.add_argument('--files', action='store_true', + help='Show stored capture kernel files') + + # Capture kernel file + parser.add_argument('--file', nargs=1, type=str, + help='Show stored capture kernel file') + + # Show more information (used for sonic-kdump-config status) + parser.add_argument("-v", "--verbose", action='store_true', + help='displays detailed kdump status information. Used with status command.') + + # How many lines should we display from the kernel log + parser.add_argument("-l", "--lines", default=75, type=int, + help="Number of lines displayed from the kernel log") + + # Validate input + if len(sys.argv[1:]) == 0: + parser.print_help() + sys.exit(1) + + # Parse command arguments + options = parser.parse_args() + + # Execute the command + changed = False + try: + if options.enable: + changed = cmd_kdump_enable(options.verbose) + elif options.config_next: + changed = cmd_kdump_config_next(options.verbose) + elif options.disable: + changed = cmd_kdump_disable(options.verbose) + elif options.memory != False: + cmd_kdump_memory(options.verbose, options.memory) + elif options.num_dumps != False: + cmd_kdump_num_dumps(options.verbose, options.num_dumps) + elif options.status: + cmd_kdump_status() + elif options.files != False: + cmd_kdump_files() + elif options.file: + cmd_kdump_file(options.lines, options.file[0]) + else: + parser.print_help() + sys.exit(1) + except Exception as e: + print_err('Error! Exception[%s] occured while processing the command sonic-kdump-config %s.' %(str(e), sys.argv[1])) + sys.exit(1) + + if changed: + print("Kdump configuration changes will be applied after the system reboots") + + sys.exit(0) + +if __name__== "__main__": + main() diff --git a/scripts/syseeprom-to-json b/scripts/syseeprom-to-json new file mode 100755 index 0000000000..89b7256f50 --- /dev/null +++ b/scripts/syseeprom-to-json @@ -0,0 +1,31 @@ +#!/usr/bin/awk -f + +BEGIN { print "{"; n = 0 } + +function sep() +{ + if (n > 0) print ", "; + ++n; +} + +/Product Name/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Part Number/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Serial Number/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Base MAC Address/ { sep(); print "\"" $1 " " $2 " " $3 "\": \"" $6 "\""; } +/Manufacture Date/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Device Version/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Label Revision/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Platform Name/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/ONIE Version/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/MAC Addresses/ { sep(); print "\"" $1 " " $2 "\": " $5; } +/Manfacturer/ { sep(); print "\"" $1 "\": \"" $4 "\""; } +/Manfacture Country/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Vendor Name/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Diag Version/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Service Tag/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Hardware Version/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Software Version/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Manfacture Date/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } +/Model Name/ { sep(); print "\"" $1 " " $2 "\": \"" $5 "\""; } + +END { print "}" } diff --git a/scripts/tempershow b/scripts/tempershow new file mode 100644 index 0000000000..d8ba6a645b --- /dev/null +++ b/scripts/tempershow @@ -0,0 +1,64 @@ +#!/usr/bin/python +""" + Script to show fan status. +""" +from __future__ import print_function + +import argparse + +from tabulate import tabulate +from swsssdk import SonicV2Connector + + +header = ['NAME', 'Temperature', 'High Threshold', 'Low Threshold', 'Critical High Threshold', 'Critical Low Threshold', 'Warning Status', 'Timestamp'] + +TEMPER_TABLE_NAME = 'TEMPERATURE_INFO' +TEMPER_FIELD_NAME = 'temperature' +TIMESTAMP_FIELD_NAME = 'timestamp' +HIGH_THRESH_FIELD_NAME = 'high_threshold' +LOW_THRESH_FIELD_NAME = 'low_threshold' +CRIT_HIGH_THRESH_FIELD_NAME = 'critical_high_threshold' +CRIT_LOW_THRESH_FIELD_NAME = 'critical_low_threshold' +WARNING_STATUS_FIELD_NAME = 'warning_status' + + +class TemperShow(object): + def __init__(self): + self.db = SonicV2Connector(host="127.0.0.1") + self.db.connect(self.db.STATE_DB) + + def show(self): + keys = self.db.keys(self.db.STATE_DB, TEMPER_TABLE_NAME + '*') + if not keys: + print('Thermal Not detected\n') + return + + table = [] + for key in keys: + key_list = key.split('|') + if len(key_list) != 2: # error data in DB, log it and ignore + print('Warn: Invalid key in table {}: {}'.format(TEMPER_TABLE_NAME, key)) + continue + + name = key_list[1] + data_dict = self.db.get_all(self.db.STATE_DB, key) + table.append((name, + data_dict[TEMPER_FIELD_NAME], + data_dict[HIGH_THRESH_FIELD_NAME], + data_dict[LOW_THRESH_FIELD_NAME], + data_dict[CRIT_HIGH_THRESH_FIELD_NAME], + data_dict[CRIT_LOW_THRESH_FIELD_NAME], + data_dict[WARNING_STATUS_FIELD_NAME], + data_dict[TIMESTAMP_FIELD_NAME] + )) + + if table: + table.sort() + print(tabulate(table, header, tablefmt='simple', stralign='right')) + else: + print('No tempeature data available\n') + + +if __name__ == "__main__": + temperShow = TemperShow() + temperShow.show() diff --git a/scripts/update_json.py b/scripts/update_json.py new file mode 100755 index 0000000000..a42603e8fe --- /dev/null +++ b/scripts/update_json.py @@ -0,0 +1,55 @@ +#! /usr/bin/env python + +import os +import sys +import json +import argparse + +TMP_SUFFIX = ".tmp" +BAK_SUFFIX = ".bak" + +def dict_update(dst, patch): + for k in patch.keys(): + if type(patch[k]) == dict: + dst[k] = dict_update(dst[k], patch[k]) + else: + dst[k] = patch[k] + return dst + +def do_update(rcf, patchf): + dst = {} + patch = {} + + tmpf = rcf + TMP_SUFFIX + bakf = rcf + BAK_SUFFIX + + with open(rcf, "r") as f: + dst = json.load(f) + + with open(patchf, "r") as f: + patch = json.load(f) + + dst = dict_update(dst, patch) + + with open(tmpf, "w") as f: + json.dump(dst, f, indent = 4) + + os.rename(rcf, bakf) + os.rename(tmpf, rcf) + + +def main(): + parser = argparse.ArgumentParser(description="Update JSON based file") + parser.add_argument("-u", "--update", help="JSON file to be updated") + parser.add_argument("-p", "--patch", help="JSON file holding patch") + args = parser.parse_args() + + if not args.update or not args.patch: + raise Exception("check usage") + + do_update(args.update, args.patch) + +if __name__ == '__main__': + main() + + diff --git a/setup.py b/setup.py index b2abcc1208..766707e451 100644 --- a/setup.py +++ b/setup.py @@ -41,6 +41,10 @@ 'ssdutil', 'pfc', 'psuutil', + 'pddf_fanutil', + 'pddf_psuutil', + 'pddf_thermalutil', + 'pddf_ledutil', 'show', 'sonic_installer', 'sonic-utilities-tests', @@ -62,7 +66,9 @@ 'scripts/dropcheck', 'scripts/dropconfig', 'scripts/dropstat', + 'scripts/dump_nat_entries.py', 'scripts/ecnconfig', + 'scripts/fanshow', 'scripts/fast-reboot', 'scripts/fast-reboot-dump.py', 'scripts/fdbclear', @@ -72,6 +78,9 @@ 'scripts/intfstat', 'scripts/lldpshow', 'scripts/mmuconfig', + 'scripts/natclear', + 'scripts/natconfig', + 'scripts/natshow', 'scripts/nbrshow', 'scripts/neighbor_advertiser', 'scripts/pcmping', @@ -85,10 +94,14 @@ 'scripts/route_check.py', 'scripts/route_check_test.sh', 'scripts/sfpshow', + 'scripts/syseeprom-to-json', 'scripts/teamshow', + 'scripts/tempershow', + 'scripts/update_json.py', 'scripts/warm-reboot', 'scripts/watermarkstat', - 'scripts/watermarkcfg' + 'scripts/watermarkcfg', + 'scripts/sonic-kdump-config' ], data_files=[ ('/etc/bash_completion.d', glob.glob('data/etc/bash_completion.d/*')), @@ -107,6 +120,10 @@ 'ssdutil = ssdutil.main:ssdutil', 'pfc = pfc.main:cli', 'psuutil = psuutil.main:cli', + 'pddf_fanutil = pddf_fanutil.main:cli', + 'pddf_psuutil = pddf_psuutil.main:cli', + 'pddf_thermalutil = pddf_thermalutil.main:cli', + 'pddf_ledutil = pddf_ledutil.main:cli', 'show = show.main:cli', 'sonic-clear = clear.main:cli', 'sonic_installer = sonic_installer.main:cli', diff --git a/show/bgp_frr_v6.py b/show/bgp_frr_v6.py index 47c66aaed0..9ff2ded3b6 100644 --- a/show/bgp_frr_v6.py +++ b/show/bgp_frr_v6.py @@ -19,7 +19,11 @@ def bgp(): @bgp.command() def summary(): """Show summarized information of IPv6 BGP state""" - run_command('sudo vtysh -c "show bgp ipv6 summary"') + try: + device_output = run_command('sudo vtysh -c "show bgp ipv6 summary"', return_cmd=True) + get_bgp_summary_extended(device_output) + except: + run_command('sudo vtysh -c "show bgp ipv6 summary"') # 'neighbors' subcommand ("show ipv6 bgp neighbors") diff --git a/show/bgp_quagga_v4.py b/show/bgp_quagga_v4.py index d543b6a1b9..4883880682 100644 --- a/show/bgp_quagga_v4.py +++ b/show/bgp_quagga_v4.py @@ -19,7 +19,11 @@ def bgp(): @bgp.command() def summary(): """Show summarized information of IPv4 BGP state""" - run_command('sudo vtysh -c "show ip bgp summary"') + try: + device_output = run_command('sudo vtysh -c "show ip bgp summary"', return_cmd=True) + get_bgp_summary_extended(device_output) + except: + run_command('sudo vtysh -c "show ip bgp summary"') # 'neighbors' subcommand ("show ip bgp neighbors") diff --git a/show/bgp_quagga_v6.py b/show/bgp_quagga_v6.py index 06ef6a4df8..e2afe0f13e 100644 --- a/show/bgp_quagga_v6.py +++ b/show/bgp_quagga_v6.py @@ -19,7 +19,11 @@ def bgp(): @bgp.command() def summary(): """Show summarized information of IPv6 BGP state""" - run_command('sudo vtysh -c "show ipv6 bgp summary"') + try: + device_output = run_command('sudo vtysh -c "show ipv6 bgp summary"', return_cmd=True) + get_bgp_summary_extended(device_output) + except: + run_command('sudo vtysh -c "show ipv6 bgp summary"') # 'neighbors' subcommand ("show ipv6 bgp neighbors") diff --git a/show/main.py b/show/main.py index 7c784a6bb2..6cc838662e 100755 --- a/show/main.py +++ b/show/main.py @@ -8,6 +8,7 @@ import re import subprocess import sys +import ipaddress import click from click_default_group import DefaultGroup @@ -188,7 +189,7 @@ def get_routing_stack(): routing_stack = get_routing_stack() -def run_command(command, display_cmd=False): +def run_command(command, display_cmd=False, return_cmd=False): if display_cmd: click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) @@ -201,6 +202,9 @@ def run_command(command, display_cmd=False): proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) while True: + if return_cmd: + output = proc.communicate()[0].decode("utf-8") + return output output = proc.stdout.readline() if output == "" and proc.poll() is not None: break @@ -393,6 +397,146 @@ def run_command_in_alias_mode(command): sys.exit(rc) +def get_bgp_summary_extended(command_output): + """ + Adds Neighbor name to the show ip[v6] bgp summary command + :param command: command to get bgp summary + """ + static_neighbors, dynamic_neighbors = get_bgp_neighbors_dict() + modified_output = [] + my_list = iter(command_output.splitlines()) + for element in my_list: + if element.startswith("Neighbor"): + element = "{}\tNeighborName".format(element) + modified_output.append(element) + elif not element or element.startswith("Total number "): + modified_output.append(element) + elif re.match(r"(\*?([0-9A-Fa-f]{1,4}:|\d+.\d+.\d+.\d+))", element.split()[0]): + first_element = element.split()[0] + ip = first_element[1:] if first_element.startswith("*") else first_element + name = get_bgp_neighbor_ip_to_name(ip, static_neighbors, dynamic_neighbors) + if len(element.split()) == 1: + modified_output.append(element) + element = next(my_list) + element = "{}\t{}".format(element, name) + modified_output.append(element) + else: + modified_output.append(element) + click.echo("\n".join(modified_output)) + + +def connect_config_db(): + """ + Connects to config_db + """ + config_db = ConfigDBConnector() + config_db.connect() + return config_db + + +def get_neighbor_dict_from_table(db,table_name): + """ + returns a dict with bgp neighbor ip as key and neighbor name as value + :param table_name: config db table name + :param db: config_db + """ + neighbor_dict = {} + neighbor_data = db.get_table(table_name) + try: + for entry in neighbor_data.keys(): + neighbor_dict[entry] = neighbor_data[entry].get( + 'name') if 'name' in neighbor_data[entry].keys() else 'NotAvailable' + return neighbor_dict + except: + return neighbor_dict + + +def is_ipv4_address(ipaddress): + """ + Checks if given ip is ipv4 + :param ipaddress: unicode ipv4 + :return: bool + """ + try: + ipaddress.IPv4Address(ipaddress) + return True + except ipaddress.AddressValueError as err: + return False + + +def is_ipv6_address(ipaddress): + """ + Checks if given ip is ipv6 + :param ipaddress: unicode ipv6 + :return: bool + """ + try: + ipaddress.IPv6Address(ipaddress) + return True + except ipaddress.AddressValueError as err: + return False + + +def get_dynamic_neighbor_subnet(db): + """ + Returns dict of description and subnet info from bgp_peer_range table + :param db: config_db + """ + dynamic_neighbor = {} + v4_subnet = {} + v6_subnet = {} + neighbor_data = db.get_table('BGP_PEER_RANGE') + try: + for entry in neighbor_data.keys(): + new_key = neighbor_data[entry]['ip_range'][0] + new_value = neighbor_data[entry]['name'] + if is_ipv4_address(unicode(neighbor_data[entry]['src_address'])): + v4_subnet[new_key] = new_value + elif is_ipv6_address(unicode(neighbor_data[entry]['src_address'])): + v6_subnet[new_key] = new_value + dynamic_neighbor["v4"] = v4_subnet + dynamic_neighbor["v6"] = v6_subnet + return dynamic_neighbor + except: + return neighbor_data + + +def get_bgp_neighbors_dict(): + """ + Uses config_db to get the bgp neighbors and names in dictionary format + :return: + """ + dynamic_neighbors = {} + config_db = connect_config_db() + static_neighbors = get_neighbor_dict_from_table(config_db, 'BGP_NEIGHBOR') + bgp_monitors = get_neighbor_dict_from_table(config_db, 'BGP_MONITORS') + static_neighbors.update(bgp_monitors) + dynamic_neighbors = get_dynamic_neighbor_subnet(config_db) + return static_neighbors, dynamic_neighbors + + +def get_bgp_neighbor_ip_to_name(ip, static_neighbors, dynamic_neighbors): + """ + return neighbor name for the ip provided + :param ip: ip address unicode + :param static_neighbors: statically defined bgp neighbors dict + :param dynamic_neighbors: subnet of dynamically defined neighbors dict + :return: name of neighbor + """ + if ip in static_neighbors.keys(): + return static_neighbors[ip] + elif is_ipv4_address(unicode(ip)): + for subnet in dynamic_neighbors["v4"].keys(): + if ipaddress.IPv4Address(unicode(ip)) in ipaddress.IPv4Network(unicode(subnet)): + return dynamic_neighbors["v4"][subnet] + elif is_ipv6_address(unicode(ip)): + for subnet in dynamic_neighbors["v6"].keys(): + if ipaddress.IPv6Address(unicode(ip)) in ipaddress.IPv6Network(unicode(subnet)): + return dynamic_neighbors["v6"][subnet] + else: + return "NotAvailable" + + CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?']) # @@ -406,6 +550,47 @@ def cli(): """SONiC command line - 'show' command""" pass +# +# 'vrf' command ("show vrf") +# + +def get_interface_bind_to_vrf(config_db, vrf_name): + """Get interfaces belong to vrf + """ + tables = ['INTERFACE', 'PORTCHANNEL_INTERFACE', 'VLAN_INTERFACE', 'LOOPBACK_INTERFACE'] + data = [] + for table_name in tables: + interface_dict = config_db.get_table(table_name) + if interface_dict: + for interface in interface_dict.keys(): + if interface_dict[interface].has_key('vrf_name') and vrf_name == interface_dict[interface]['vrf_name']: + data.append(interface) + return data + +@cli.command() +@click.argument('vrf_name', required=False) +def vrf(vrf_name): + """Show vrf config""" + config_db = ConfigDBConnector() + config_db.connect() + header = ['VRF', 'Interfaces'] + body = [] + vrf_dict = config_db.get_table('VRF') + if vrf_dict: + vrfs = [] + if vrf_name is None: + vrfs = vrf_dict.keys() + elif vrf_name in vrf_dict.keys(): + vrfs = [vrf_name] + for vrf in vrfs: + intfs = get_interface_bind_to_vrf(config_db, vrf) + if len(intfs) == 0: + body.append([vrf, ""]) + else: + body.append([vrf, intfs[0]]) + for intf in intfs[1:]: + body.append(["", intf]) + click.echo(tabulate(body, header)) # # 'arp' command ("show arp") @@ -1117,17 +1302,32 @@ def get_if_oper_state(iface): return "down" +# +# get_if_master +# +# Given an interface name, return its master reported by the kernel. +# +def get_if_master(iface): + oper_file = "/sys/class/net/{0}/master" + + if os.path.exists(oper_file.format(iface)): + real_path = os.path.realpath(oper_file.format(iface)) + return os.path.basename(real_path) + else: + return "" + + # # 'show ip interfaces' command # -# Display all interfaces with an IPv4 address, admin/oper states, their BGP neighbor name and peer ip. +# Display all interfaces with master, an IPv4 address, admin/oper states, their BGP neighbor name and peer ip. # Addresses from all scopes are included. Interfaces with no addresses are # excluded. # @ip.command() def interfaces(): """Show interfaces IPv4 address""" - header = ['Interface', 'IPv4 address/mask', 'Admin/Oper', 'BGP Neighbor', 'Neighbor IP'] + header = ['Interface', 'Master', 'IPv4 address/mask', 'Admin/Oper', 'BGP Neighbor', 'Neighbor IP'] data = [] bgp_peer = get_bgp_peer() @@ -1156,14 +1356,14 @@ def interfaces(): oper = get_if_oper_state(iface) else: oper = "down" - + master = get_if_master(iface) if get_interface_mode() == "alias": iface = iface_alias_converter.name_to_alias(iface) - data.append([iface, ifaddresses[0][1], admin + "/" + oper, neighbor_name, neighbor_ip]) + data.append([iface, master, ifaddresses[0][1], admin + "/" + oper, neighbor_name, neighbor_ip]) for ifaddr in ifaddresses[1:]: - data.append(["", ifaddr[1], ""]) + data.append(["", "", ifaddr[1], ""]) print tabulate(data, header, tablefmt="simple", stralign='left', missingval="") @@ -1192,14 +1392,14 @@ def get_bgp_peer(): # @ip.command() -@click.argument('ipaddress', required=False) +@click.argument('args', metavar='[IPADDRESS] [vrf ] [...]', nargs=-1, required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def route(ipaddress, verbose): +def route(args, verbose): """Show IP (IPv4) routing table""" cmd = 'sudo vtysh -c "show ip route' - if ipaddress is not None: - cmd += ' {}'.format(ipaddress) + for arg in args: + cmd += " " + str(arg) cmd += '"' @@ -1260,14 +1460,14 @@ def prefix_list(prefix_list_name, verbose): # # 'show ipv6 interfaces' command # -# Display all interfaces with an IPv6 address, admin/oper states, their BGP neighbor name and peer ip. +# Display all interfaces with master, an IPv6 address, admin/oper states, their BGP neighbor name and peer ip. # Addresses from all scopes are included. Interfaces with no addresses are # excluded. # @ipv6.command() def interfaces(): """Show interfaces IPv6 address""" - header = ['Interface', 'IPv6 address/mask', 'Admin/Oper', 'BGP Neighbor', 'Neighbor IP'] + header = ['Interface', 'Master', 'IPv6 address/mask', 'Admin/Oper', 'BGP Neighbor', 'Neighbor IP'] data = [] bgp_peer = get_bgp_peer() @@ -1296,11 +1496,12 @@ def interfaces(): oper = get_if_oper_state(iface) else: oper = "down" + master = get_if_master(iface) if get_interface_mode() == "alias": iface = iface_alias_converter.name_to_alias(iface) - data.append([iface, ifaddresses[0][1], admin + "/" + oper, neighbor_name, neighbor_ip]) + data.append([iface, master, ifaddresses[0][1], admin + "/" + oper, neighbor_name, neighbor_ip]) for ifaddr in ifaddresses[1:]: - data.append(["", ifaddr[1], ""]) + data.append(["", "", ifaddr[1], ""]) print tabulate(data, header, tablefmt="simple", stralign='left', missingval="") @@ -1310,14 +1511,14 @@ def interfaces(): # @ipv6.command() -@click.argument('ipaddress', required=False) +@click.argument('args', metavar='[IPADDRESS] [vrf ] [...]', nargs=-1, required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def route(ipaddress, verbose): +def route(args, verbose): """Show IPv6 routing table""" cmd = 'sudo vtysh -c "show ipv6 route' - if ipaddress is not None: - cmd += ' {}'.format(ipaddress) + for arg in args: + cmd += " " + str(arg) cmd += '"' @@ -1469,6 +1670,20 @@ def ssdhealth(device, verbose, vendor): options += " -e" if vendor else "" run_command(cmd + options, display_cmd=verbose) +# 'fan' subcommand ("show platform fan") +@platform.command() +def fan(): + """Show fan status information""" + cmd = 'fanshow' + run_command(cmd) + +# 'temperature' subcommand ("show platform temperature") +@platform.command() +def temperature(): + """Show device temperature information""" + cmd = 'tempershow' + run_command(cmd) + # # 'logging' command ("show logging") # @@ -1792,6 +2007,87 @@ def vlan(): """Show VLAN information""" pass +# +# 'kdump command ("show kdump ...") +# +@cli.group(cls=AliasedGroup, default_if_no_args=True, ) +def kdump(): + """Show kdump configuration, status and information """ + pass + +@kdump.command('enabled') +def enabled(): + """Show if kdump is enabled or disabled""" + kdump_is_enabled = False + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + table_data = config_db.get_table('KDUMP') + if table_data is not None: + config_data = table_data.get('config') + if config_data is not None: + if config_data.get('enabled').lower() == 'true': + kdump_is_enabled = True + if kdump_is_enabled: + click.echo("kdump is enabled") + else: + click.echo("kdump is disabled") + +@kdump.command('status', default=True) +def status(): + """Show kdump status""" + run_command("sonic-kdump-config --status") + run_command("sonic-kdump-config --memory") + run_command("sonic-kdump-config --num_dumps") + run_command("sonic-kdump-config --files") + +@kdump.command('memory') +def memory(): + """Show kdump memory information""" + kdump_memory = "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + table_data = config_db.get_table('KDUMP') + if table_data is not None: + config_data = table_data.get('config') + if config_data is not None: + kdump_memory_from_db = config_data.get('memory') + if kdump_memory_from_db is not None: + kdump_memory = kdump_memory_from_db + click.echo("Memory Reserved: %s" % kdump_memory) + +@kdump.command('num_dumps') +def num_dumps(): + """Show kdump max number of dump files""" + kdump_num_dumps = "3" + config_db = ConfigDBConnector() + if config_db is not None: + config_db.connect() + table_data = config_db.get_table('KDUMP') + if table_data is not None: + config_data = table_data.get('config') + if config_data is not None: + kdump_num_dumps_from_db = config_data.get('num_dumps') + if kdump_num_dumps_from_db is not None: + kdump_num_dumps = kdump_num_dumps_from_db + click.echo("Maximum number of Kernel Core files Stored: %s" % kdump_num_dumps) + +@kdump.command('files') +def files(): + """Show kdump kernel core dump files""" + run_command("sonic-kdump-config --files") + +@kdump.command() +@click.argument('record', required=True) +@click.argument('lines', metavar='', required=False) +def log(record, lines): + """Show kdump kernel core dump file kernel log""" + if lines == None: + run_command("sonic-kdump-config --file %s" % record) + else: + run_command("sonic-kdump-config --file %s --lines %s" % (record, lines)) + @vlan.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") def brief(verbose): @@ -2389,6 +2685,112 @@ def tablelize(keys, data, enable_table_keys, prefix): click.echo(tabulate(tablelize(keys, data, enable_table_keys, prefix), header)) state_db.close(state_db.STATE_DB) +# +# 'nat' group ("show nat ...") +# + +@cli.group(cls=AliasedGroup, default_if_no_args=False) +def nat(): + """Show details of the nat """ + pass + +# 'statistics' subcommand ("show nat statistics") +@nat.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def statistics(verbose): + """ Show NAT statistics """ + + cmd = "sudo natshow -s" + run_command(cmd, display_cmd=verbose) + +# 'translations' subcommand ("show nat translations") +@nat.group(invoke_without_command=True) +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def translations(ctx, verbose): + """ Show NAT translations """ + + if ctx.invoked_subcommand is None: + cmd = "sudo natshow -t" + run_command(cmd, display_cmd=verbose) + +# 'count' subcommand ("show nat translations count") +@translations.command() +def count(): + """ Show NAT translations count """ + + cmd = "sudo natshow -c" + run_command(cmd) + +# 'config' subcommand ("show nat config") +@nat.group(invoke_without_command=True) +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def config(ctx, verbose): + """Show NAT config related information""" + if ctx.invoked_subcommand is None: + click.echo("\nGlobal Values") + cmd = "sudo natconfig -g" + run_command(cmd, display_cmd=verbose) + click.echo("Static Entries") + cmd = "sudo natconfig -s" + run_command(cmd, display_cmd=verbose) + click.echo("Pool Entries") + cmd = "sudo natconfig -p" + run_command(cmd, display_cmd=verbose) + click.echo("NAT Bindings") + cmd = "sudo natconfig -b" + run_command(cmd, display_cmd=verbose) + click.echo("NAT Zones") + cmd = "sudo natconfig -z" + run_command(cmd, display_cmd=verbose) + +# 'static' subcommand ("show nat config static") +@config.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def static(verbose): + """Show static NAT configuration""" + + cmd = "sudo natconfig -s" + run_command(cmd, display_cmd=verbose) + +# 'pool' subcommand ("show nat config pool") +@config.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def pool(verbose): + """Show NAT Pool configuration""" + + cmd = "sudo natconfig -p" + run_command(cmd, display_cmd=verbose) + + +# 'bindings' subcommand ("show nat config bindings") +@config.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def bindings(verbose): + """Show NAT binding configuration""" + + cmd = "sudo natconfig -b" + run_command(cmd, display_cmd=verbose) + +# 'globalvalues' subcommand ("show nat config globalvalues") +@config.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def globalvalues(verbose): + """Show NAT Global configuration""" + + cmd = "sudo natconfig -g" + run_command(cmd, display_cmd=verbose) + +# 'zones' subcommand ("show nat config zones") +@config.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def zones(verbose): + """Show NAT Zone configuration""" + + cmd = "sudo natconfig -z" + run_command(cmd, display_cmd=verbose) + # # 'ztp status' command ("show ztp status") # diff --git a/sonic-utilities-tests/mock_tables/state_db.json b/sonic-utilities-tests/mock_tables/state_db.json index f3fdf3ec24..14f60801a7 100644 --- a/sonic-utilities-tests/mock_tables/state_db.json +++ b/sonic-utilities-tests/mock_tables/state_db.json @@ -67,11 +67,11 @@ "ACL_ACTION|PACKET_ACTION": "FORWARD" }, "DEBUG_COUNTER_CAPABILITIES|PORT_INGRESS_DROPS": { - "reasons": "[SAI_IN_DROP_REASON_IP_HEADER_ERROR,SAI_IN_DROP_REASON_NO_L3_HEADER]", + "reasons": "[IP_HEADER_ERROR,NO_L3_HEADER]", "count": "4" }, "DEBUG_COUNTER_CAPABILITIES|SWITCH_EGRESS_DROPS": { - "reasons": "[SAI_IN_DROP_REASON_ACL_ANY,SAI_IN_DROP_REASON_L2_ANY,SAI_IN_DROP_REASON_L3_ANY]", + "reasons": "[ACL_ANY,L2_ANY,L3_ANY]", "count": "2" } } diff --git a/sonic_installer/main.py b/sonic_installer/main.py index b47703c736..8696c65122 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -13,6 +13,7 @@ from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector import collections +import platform HOST_PATH = '/host' IMAGE_PREFIX = 'SONiC-OS-' @@ -22,6 +23,10 @@ IMAGE_TYPE_ABOOT = 'aboot' IMAGE_TYPE_ONIE = 'onie' ABOOT_BOOT_CONFIG = '/boot-config' +BOOTLOADER_TYPE_GRUB = 'grub' +BOOTLOADER_TYPE_UBOOT = 'uboot' +ARCH = platform.machine() +BOOTLOADER = BOOTLOADER_TYPE_UBOOT if "arm" in ARCH else BOOTLOADER_TYPE_GRUB # # Helper functions @@ -106,9 +111,15 @@ def set_default_image(image): if get_running_image_type() == IMAGE_TYPE_ABOOT: image_path = aboot_image_path(image) aboot_boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) - else: + elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: command = 'grub-set-default --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) run_command(command) + elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') + return True def aboot_read_boot_config(path): @@ -156,7 +167,7 @@ def get_installed_images(): for filename in os.listdir(HOST_PATH): if filename.startswith(IMAGE_DIR_PREFIX): images.append(filename.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX)) - else: + elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: config = open(HOST_PATH + '/grub/grub.cfg', 'r') for line in config: if line.startswith('menuentry'): @@ -164,6 +175,17 @@ def get_installed_images(): if IMAGE_PREFIX in image: images.append(image) config.close() + elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: + proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, stdout=subprocess.PIPE) + (out, err) = proc.communicate() + image = out.rstrip() + if IMAGE_PREFIX in image: + images.append(image) + proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, stdout=subprocess.PIPE) + (out, err) = proc.communicate() + image = out.rstrip() + if IMAGE_PREFIX in image: + images.append(image) return images # Returns name of current image @@ -179,7 +201,7 @@ def get_next_image(): config = open(HOST_PATH + ABOOT_BOOT_CONFIG, 'r') next_image = re.search("SWI=flash:(\S+)/", config.read()).group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) config.close() - else: + elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: images = get_installed_images() grubenv = subprocess.check_output(["/usr/bin/grub-editenv", HOST_PATH + "/grub/grubenv", "list"]) m = re.search("next_entry=(\d+)", grubenv) @@ -192,6 +214,16 @@ def get_next_image(): else: next_image_index = 0 next_image = images[next_image_index] + elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: + images = get_installed_images() + proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, stdout=subprocess.PIPE) + (out, err) = proc.communicate() + image = out.rstrip() + if "sonic_image_2" in image: + next_image_index = 1 + else: + next_image_index = 0 + next_image = images[next_image_index] return next_image def remove_image(image): @@ -207,7 +239,7 @@ def remove_image(image): click.echo('Removing image root filesystem...') subprocess.call(['rm','-rf', os.path.join(HOST_PATH, image_dir)]) click.echo('Image removed') - else: + elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: click.echo('Updating GRUB...') config = open(HOST_PATH + '/grub/grub.cfg', 'r') old_config = config.read() @@ -226,6 +258,19 @@ def remove_image(image): run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') click.echo('Image removed') + elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: + click.echo('Updating next boot ...') + images = get_installed_images() + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') + run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') + run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + click.echo('Removing image root filesystem...') + subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) + click.echo('Done') # TODO: Embed tag name info into docker image meta data at build time, # and extract tag name from docker image file. @@ -312,8 +357,10 @@ def cli(): expose_value=False, prompt='New image will be installed, continue?') @click.option('-f', '--force', is_flag=True, help="Force installation of an image of a type which differs from that of the current running image") +@click.option('--skip_migration', is_flag=True, + help="Do not migrate current configuration to the newly installed image") @click.argument('url') -def install(url, force): +def install(url, force, skip_migration=False): """ Install image from local binary or URL""" cleanup_image = False if get_running_image_type() == IMAGE_TYPE_ABOOT: @@ -360,10 +407,13 @@ def install(url, force): run_command("swipath=%s target_path=/host sonic_upgrade=1 . /tmp/boot0" % image_path) else: run_command("bash " + image_path) - run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') - run_command("rm -rf /host/old_config") - # copy directories and preserve original file structure, attributes and associated metadata - run_command("cp -ar /etc/sonic /host/old_config") + if BOOTLOADER == BOOTLOADER_TYPE_GRUB: + run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + # Take a backup of current configuration + if skip_migration: + click.echo("Skipping configuration migration as requested in the command option.") + else: + run_command('config-setup backup') # Finally, sync filesystem run_command("sync;sync;sync") @@ -406,9 +456,14 @@ def set_next_boot(image): if get_running_image_type() == IMAGE_TYPE_ABOOT: image_path = aboot_image_path(image) aboot_boot_config_set(SWI=image_path) - else: + elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: command = 'grub-reboot --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) run_command(command) + elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"') # Uninstall image diff --git a/utilities_common/util_base.py b/utilities_common/util_base.py new file mode 100644 index 0000000000..8ac7286391 --- /dev/null +++ b/utilities_common/util_base.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python2 + +try: + import imp + import signal + import subprocess + import os + import sys + import syslog +except ImportError, e: + raise ImportError (str(e) + " - required module not found") + +# +# Constants ==================================================================== +# +# Platform root directory inside docker +PLATFORM_ROOT_DOCKER = '/usr/share/sonic/platform' +SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' +HWSKU_KEY = 'DEVICE_METADATA.localhost.hwsku' +PLATFORM_KEY = 'DEVICE_METADATA.localhost.platform' +PDDF_FILE_PATH = '/usr/share/sonic/platform/pddf_support' + +# Port config information +PORT_CONFIG = 'port_config.ini' +PORTMAP = 'portmap.ini' + + +EEPROM_MODULE_NAME = 'eeprom' +EEPROM_CLASS_NAME = 'board' + +class UtilLogger(object): + def __init__(self, syslog_identifier): + self.syslog = syslog + self.syslog.openlog(ident=syslog_identifier, logoption=self.syslog.LOG_NDELAY, facility=self.syslog.LOG_DAEMON) + + def __del__(self): + self.syslog.closelog() + + def log_error(self, msg, print_to_console=False): + self.syslog.syslog(self.syslog.LOG_ERR, msg) + + if print_to_console: + print msg + + def log_warning(self, msg, print_to_console=False): + self.syslog.syslog(self.syslog.LOG_WARNING, msg) + + if print_to_console: + print msg + + def log_notice(self, msg, print_to_console=False): + self.syslog.syslog(self.syslog.LOG_NOTICE, msg) + + if print_to_console: + print msg + + def log_info(self, msg, print_to_console=False): + self.syslog.syslog(self.syslog.LOG_INFO, msg) + + if print_to_console: + print msg + + def log_debug(self, msg, print_to_console=False): + self.syslog.syslog(self.syslog.LOG_DEBUG, msg) + + if print_to_console: + print msg + + +class UtilHelper(object): + def __init__(self): + pass + + # Returns platform and hwsku + def get_platform_and_hwsku(self): + try: + proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-H', '-v', PLATFORM_KEY], + stdout=subprocess.PIPE, + shell=False, + stderr=subprocess.STDOUT) + stdout = proc.communicate()[0] + proc.wait() + platform = stdout.rstrip('\n') + + proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-d', '-v', HWSKU_KEY], + stdout=subprocess.PIPE, + shell=False, + stderr=subprocess.STDOUT) + stdout = proc.communicate()[0] + proc.wait() + hwsku = stdout.rstrip('\n') + except OSError, e: + raise OSError("Failed to detect platform: %s" % (str(e))) + + return (platform, hwsku) + + # Returns path to platform and hwsku + def get_path_to_platform_and_hwsku(self): + # Get platform and hwsku + (platform, hwsku) = self.get_platform_and_hwsku() + + # Load platform module from source + platform_path = PLATFORM_ROOT_DOCKER + hwsku_path = "/".join([platform_path, hwsku]) + + return (platform_path, hwsku_path) + + # Returns path to port config file + def get_path_to_port_config_file(self): + # Get platform and hwsku path + (platform_path, hwsku_path) = self.get_path_to_platform_and_hwsku() + + # First check for the presence of the new 'port_config.ini' file + port_config_file_path = "/".join([hwsku_path, PORT_CONFIG]) + if not os.path.isfile(port_config_file_path): + # port_config.ini doesn't exist. Try loading the legacy 'portmap.ini' file + port_config_file_path = "/".join([hwsku_path, PORTMAP]) + if not os.path.isfile(port_config_file_path): + raise IOError("Failed to detect port config file: %s" % (port_config_file_path)) + + return port_config_file_path + + # Loads platform specific psuutil module from source + def load_platform_util(self, module_name, class_name): + platform_util = None + + # Get path to platform and hwsku + (platform_path, hwsku_path) = self.get_path_to_platform_and_hwsku() + + try: + module_file = "/".join([platform_path, "plugins", module_name + ".py"]) + module = imp.load_source(module_name, module_file) + except IOError, e: + raise IOError("Failed to load platform module '%s': %s" % (module_name, str(e))) + + try: + platform_util_class = getattr(module, class_name) + # board class of eeprom requires 4 paramerters, need special treatment here. + if module_name == EEPROM_MODULE_NAME and class_name == EEPROM_CLASS_NAME: + platform_util = platform_util_class('','','','') + else: + platform_util = platform_util_class() + except AttributeError, e: + raise AttributeError("Failed to instantiate '%s' class: %s" % (class_name, str(e))) + + return platform_util + + def check_pddf_mode(self): + if os.path.exists(PDDF_FILE_PATH): + return True + else: + return False +