Skip to content

Commit

Permalink
SDK-291: Add root disk volume for cluster info v1.3 and v2 (#259)
Browse files Browse the repository at this point in the history
  • Loading branch information
akaranjkar-qu authored and msumit committed Jan 24, 2019
1 parent c527090 commit fbfdc11
Show file tree
Hide file tree
Showing 5 changed files with 103 additions and 2 deletions.
1 change: 1 addition & 0 deletions bin/qds.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,7 @@ def _create_cluster_info(arguments, api_version):
ebs_volume_count=arguments.ebs_volume_count,
ebs_volume_type=arguments.ebs_volume_type,
ebs_volume_size=arguments.ebs_volume_size,
root_volume_size=arguments.root_volume_size,
fairscheduler_config_xml=fairscheduler_config_xml,
default_pool=arguments.default_pool,
encrypted_ephemerals=arguments.encrypted_ephemerals,
Expand Down
13 changes: 11 additions & 2 deletions qds_sdk/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,10 @@ def _parse_create_update(cls, args, action, api_version):
dest="slave_request_type",
choices=["ondemand", "spot", "hybrid", "spotblock"],
help="purchasing option for slave instaces",)
node_config_group.add_argument("--root-volume-size",
dest="root_volume_size",
type=int,
help="size of root volume in GB")
hadoop_group.add_argument("--custom-config",
dest="custom_config_file",
help="location of file containg custom" +
Expand Down Expand Up @@ -987,6 +991,7 @@ def set_cluster_info(self, aws_access_key_id=None,
ebs_volume_count=None,
ebs_volume_type=None,
ebs_volume_size=None,
root_volume_size=None,
fairscheduler_config_xml=None,
default_pool=None,
encrypted_ephemerals=None,
Expand Down Expand Up @@ -1096,6 +1101,8 @@ def set_cluster_info(self, aws_access_key_id=None,
`ebs_volume_size`: Size of each EBS volume, in GB.
`root_volume_size`: Size of root volume, in GB.
`fairscheduler_config_xml`: XML string with custom configuration
parameters for the fair scheduler.
Expand Down Expand Up @@ -1129,7 +1136,7 @@ def set_cluster_info(self, aws_access_key_id=None,
self.node_bootstrap_file = node_bootstrap_file
self.set_node_configuration(master_instance_type, slave_instance_type, initial_nodes, max_nodes,
slave_request_type, fallback_to_ondemand, custom_ec2_tags,
node_base_cooldown_period, node_spot_cooldown_period)
node_base_cooldown_period, node_spot_cooldown_period, root_volume_size)
self.set_ec2_settings(aws_access_key_id, aws_secret_access_key, aws_region, aws_availability_zone, vpc_id, subnet_id,
master_elastic_ip, bastion_node_public_dns, role_instance_profile)
self.set_hadoop_settings(custom_config, use_hbase, use_hadoop2, use_spark, use_qubole_placement_policy, is_ha)
Expand Down Expand Up @@ -1170,7 +1177,8 @@ def set_node_configuration(self, master_instance_type=None,
fallback_to_ondemand=None,
custom_ec2_tags=None,
node_base_cooldown_period=None,
node_spot_cooldown_period=None):
node_spot_cooldown_period=None,
root_volume_size=None):
self.node_configuration['master_instance_type'] = master_instance_type
self.node_configuration['slave_instance_type'] = slave_instance_type
self.node_configuration['initial_nodes'] = initial_nodes
Expand All @@ -1179,6 +1187,7 @@ def set_node_configuration(self, master_instance_type=None,
self.node_configuration['fallback_to_ondemand'] = fallback_to_ondemand
self.node_configuration['node_base_cooldown_period'] = node_base_cooldown_period
self.node_configuration['node_spot_cooldown_period'] = node_spot_cooldown_period
self.node_configuration['root_volume_size'] = root_volume_size

if custom_ec2_tags and custom_ec2_tags.strip():
try:
Expand Down
11 changes: 11 additions & 0 deletions qds_sdk/clusterv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def get_cluster_create_clone_update(arguments, action):
disk_count=arguments.count,
disk_type=arguments.disk_type,
disk_size=arguments.size,
root_disk_size=arguments.root_disk_size,
upscaling_config=arguments.upscaling_config,
enable_encryption=arguments.encrypted_ephemerals,
customer_ssh_key=customer_ssh_key,
Expand Down Expand Up @@ -182,6 +183,7 @@ def set_cluster_info(self,
disk_count=None,
disk_type=None,
disk_size=None,
root_disk_size=None,
upscaling_config=None,
enable_encryption=None,
customer_ssh_key=None,
Expand Down Expand Up @@ -259,6 +261,8 @@ def set_cluster_info(self,
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
Expand Down Expand Up @@ -312,6 +316,9 @@ def set_cluster_info(self,
self.cluster_info['idle_cluster_timeout'] = idle_cluster_timeout
self.cluster_info['spot_settings'] = {}

self.cluster_info['rootdisk'] = {}
self.cluster_info['rootdisk']['size'] = root_disk_size

self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage)
self.set_stable_spot_bid_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback)
self.set_spot_block_settings(spot_block_duration)
Expand Down Expand Up @@ -441,6 +448,10 @@ def cluster_info_parser(argparser, action):
should be in stored in S3 at
<account-default-location>/scripts/hadoop/NODE_BOOTSTRAP_FILE
""", )
cluster_info.add_argument("--root-disk-size",
dest="root_disk_size",
type=int,
help="size of the root volume in GB")
termination = cluster_info.add_mutually_exclusive_group()
termination.add_argument("--disallow-cluster-termination",
dest="disallow_cluster_termination",
Expand Down
45 changes: 45 additions & 0 deletions tests/test_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -1597,6 +1597,29 @@ def test_env_settings_v13(self):
'python_version': '2.7',
'r_version': '3.3'}}})

def test_root_volume_size_v13(self):
sys.argv = ['qds.py', '--version', 'v1.3', 'cluster', 'create', '--label', 'test_label',
'--access-key-id', 'aki', '--secret-access-key', 'sak',
'--root-volume-size', '100']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with('POST', 'clusters',
{'label': ['test_label'],
'ec2_settings': {'compute_secret_key': 'sak',
'compute_access_key': 'aki'},
'node_configuration': {'root_volume_size': 100}
})

def test_root_volume_size_invalid_v13(self):
sys.argv = ['qds.py', '--version', 'v1.3', 'cluster', 'create', '--label', 'test_label',
'--access-key-id', 'aki', '--secret-access-key', 'sak',
'--root-volume-size', 'invalid_value']
print_command()
with self.assertRaises(SystemExit):
qds.main()


class TestClusterUpdate(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'cluster', 'update', '123']
Expand Down Expand Up @@ -2411,6 +2434,28 @@ def test_node_spot_cooldown_period_invalid_v13(self):
with self.assertRaises(SystemExit):
qds.main()

def test_root_volume_size_v13(self):
sys.argv = ['qds.py', '--version', 'v1.3', 'cluster', 'update', '123',
'--access-key-id', 'aki', '--secret-access-key', 'sak',
'--root-volume-size', '100']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with('PUT', 'clusters/123',
{
'ec2_settings': {'compute_secret_key': 'sak',
'compute_access_key': 'aki'},
'node_configuration': {'root_volume_size': 100}
})

def test_root_volume_size_invalid_v13(self):
sys.argv = ['qds.py', '--version', 'v1.3', 'cluster', 'update', '123',
'--access-key-id', 'aki', '--secret-access-key', 'sak',
'--root-volume-size', 'invalid_value']
print_command()
with self.assertRaises(SystemExit):
qds.main()


class TestClusterClone(QdsCliTestCase):
def test_minimal(self):
Expand Down
35 changes: 35 additions & 0 deletions tests/test_clusterv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,23 @@ def test_env_settings_v2(self):
'python_version':'2.7',
'r_version':'3.3'}}})

def test_root_disk_size_v2(self):
sys.argv = ['qds.py', '--version', 'v2', 'cluster', 'create', '--label', 'test_label',
'--root-disk-size', '100']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with('POST', 'clusters',
{'cluster_info': {'label': ['test_label'],
'rootdisk': {'size': 100}}})

def test_root_disk_size_invalid_v2(self):
sys.argv = ['qds.py', '--version', 'v2', 'cluster', 'create', '--label', 'test_label',
'--root-disk-size', 'invalid_value']
print_command()
with self.assertRaises(SystemExit):
qds.main()


class TestClusterUpdate(QdsCliTestCase):
def test_minimal(self):
Expand Down Expand Up @@ -731,6 +748,24 @@ def test_node_spot_cooldown_period_invalid_v2(self):
with self.assertRaises(SystemExit):
qds.main()

def test_root_disk_size_v2(self):
sys.argv = ['qds.py', '--version', 'v2', 'cluster', 'update', '123',
'--root-disk-size', '100']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with('PUT', 'clusters/123',
{'cluster_info': {
'rootdisk': {'size': 100}}})

def test_root_disk_size_invalid_v2(self):
sys.argv = ['qds.py', '--version', 'v2', 'cluster', 'update', '123',
'--root-disk-size', 'invalid_value']
print_command()
with self.assertRaises(SystemExit):
qds.main()


class TestClusterClone(QdsCliTestCase):

def test_minimal(self):
Expand Down

0 comments on commit fbfdc11

Please sign in to comment.