Skip to content

Commit

Permalink
V1.9.8
Browse files Browse the repository at this point in the history
  • Loading branch information
shahharsh87 committed Aug 18, 2017
2 parents d11b051 + fad184a commit d86001d
Show file tree
Hide file tree
Showing 9 changed files with 521 additions and 24 deletions.
6 changes: 3 additions & 3 deletions bin/qds.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,15 +473,15 @@ def clustermain(args, api_version):
def clustermainv2(args):
action = args[0]
actionset = set(
["create", "delete", "update", "clone", "list", "start", "terminate", "status", "reassign_label", "add_node",
["create", "delete", "update", "list", "clone", "start", "terminate", "status", "reassign_label", "add_node",
"remove_node", "update_node", "snapshot", "restore_point", "get_snapshot_schedule",
"update_snapshot_schedule"])

result = None
if action not in actionset:
sys.stderr.write("action must be one of <%s>\n" % "|".join(actionset))
usage()
elif action in set(["create", "update", "clone"]):
elif action in set(["create", "update", "clone", "list"]):
result = ClusterCmdLine.run(args)
else:
result = globals()["cluster_" + action + "_action"](Cluster, args)
Expand Down Expand Up @@ -558,7 +558,7 @@ def main():

optparser.add_option("--cloud_name", dest="cloud_name",
default=os.getenv('CLOUD_PROVIDER'),
help="cloud", choices=["AWS", "AZURE", "ORACLE_BMC"])
help="cloud", choices=["AWS", "AZURE", "ORACLE_BMC", "ORACLE_OPC"])

optparser.add_option("-v", dest="verbose", action="store_true",
default=False,
Expand Down
151 changes: 151 additions & 0 deletions qds_sdk/cloud/oracle_opc_cloud.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
from qds_sdk.cloud.cloud import Cloud
class OracleOpcCloud(Cloud):
'''
qds_sdk.cloud.OracleOpcCloud is the class which stores information about oracle opc cloud config settings.
The objects of this class can be use to set oracle opc cloud_config settings while create/update/clone a cluster.
'''

def __init__(self):
self.compute_config = {}
self.network_config = {}
self.storage_config = {}

def set_cloud_config(self,
username=None,
password=None,
rest_api_endpoint=None,
use_account_compute_creds=None,
storage_rest_api_endpoint=None,
storage_username=None,
storage_password=None,
acl=None,
ip_network=None,
data_disk_count=None,
data_disk_size=None):
'''
Args:
username: Username for customers oracle opc account. This
is required for creating the cluster.
password: Password for customers oracle opc account. This
is required for creating the cluster.
rest_api_endpoint: Rest API endpoint for customers oracle opc cloud.
use_account_compute_creds: Set it to true to use the accounts compute
credentials for all clusters of the account.The default value is false
storage_rest_api_endpoint: Rest API endpoint for storage related operations.
storage_username: Username for customers oracle opc account. This
is required for creating the cluster.
storage_password: Password for customers oracle opc account. This
is required for creating the cluster.
acl: acl for oracle opc.
ip_network: subnet name for oracle opc
'''

self.set_compute_config(use_account_compute_creds, username,
password, rest_api_endpoint)
self.set_network_config(acl, ip_network)
self.set_storage_config(storage_username, storage_password,
storage_rest_api_endpoint, data_disk_count, data_disk_size)

def set_compute_config(self,
use_account_compute_creds=None,
username=None,
password=None,
rest_api_endpoint=None):
self.compute_config['use_account_compute_creds'] = use_account_compute_creds
self.compute_config['username'] = username
self.compute_config['password'] = password
self.compute_config['rest_api_endpoint'] = rest_api_endpoint


def set_network_config(self,
acl=None,
ip_network=None):
self.network_config['acl'] = acl
self.network_config['ip_network'] = ip_network

def set_storage_config(self,
storage_username=None,
storage_password=None,
storage_rest_api_endpoint=None,
data_disk_count=None,
data_disk_size=None):
self.storage_config['storage_username'] = storage_username
self.storage_config['storage_password'] = storage_password
self.storage_config['storage_rest_api_endpoint'] = storage_rest_api_endpoint
self.storage_config['data_disk_count'] = data_disk_count
self.storage_config['data_disk_size'] =data_disk_size

def set_cloud_config_from_arguments(self, arguments):
self.set_cloud_config(username=arguments.username,
password=arguments.password,
rest_api_endpoint=arguments.rest_api_endpoint,
use_account_compute_creds=arguments.use_account_compute_creds,
storage_rest_api_endpoint=arguments.storage_rest_api_endpoint,
storage_username=arguments.storage_username,
storage_password=arguments.storage_password,
acl=arguments.acl,
ip_network=arguments.ip_network,
data_disk_count=arguments.count,
data_disk_size=arguments.size)

def create_parser(self, argparser):

# compute settings parser
compute_config = argparser.add_argument_group("compute config settings")
compute_creds = compute_config.add_mutually_exclusive_group()
compute_creds.add_argument("--enable-account-compute-creds",
dest="use_account_compute_creds",
action="store_true",
default=None,
help="to use account compute credentials")
compute_creds.add_argument("--disable-account-compute-creds",
dest="use_account_compute_creds",
action="store_false",
default=None,
help="to disable account compute credentials")
compute_config.add_argument("--username",
dest="username",
default=None,
help="username for opc cloud account")
compute_config.add_argument("--password",
dest="password",
default=None,
help="password for opc cloud account")
compute_config.add_argument("--rest-api-endpoint",
dest="rest_api_endpoint",
default=None,
help="Rest API endpoint for oracle opc account")

# network settings parser
network_config_group = argparser.add_argument_group("network config settings")
network_config_group.add_argument("--acl",
dest="acl",
help="acl for opc", )
network_config_group.add_argument("--ip-network",
dest="ip_network",
help="subnet name for opc")

# storage config settings parser
storage_config = argparser.add_argument_group("storage config settings")
storage_config.add_argument("--storage-rest-api-endpoint",
dest="storage_rest_api_endpoint",
default=None,
help="REST API endpoint for storage cloud")
storage_config.add_argument("--storage-username",
dest="storage_username",
default=None,
help="username for opc cloud account")
storage_config.add_argument("--storage-password",
dest="storage_password",
default=None,
help="password for opc cloud account")
2 changes: 1 addition & 1 deletion qds_sdk/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def show(cls, cluster_id_label):
"""
Show information about the cluster with id/label `cluster_id_label`.
"""
conn = Qubole.agent()
conn = Qubole.agent(version=Cluster.api_version)
return conn.get(cls.element_path(cluster_id_label))

@classmethod
Expand Down
86 changes: 79 additions & 7 deletions qds_sdk/clusterv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,18 @@ def parsers(action):
ClusterCmdLine.create_update_clone_parser(clone, action="clone")
clone.set_defaults(func=ClusterV2.clone)

if action == "list":
li = subparsers.add_parser("list", help="list clusters from existing clusters depending upon state")
ClusterCmdLine.list_parser(li, action="list")
li.set_defaults(func=ClusterV2.list)
return argparser

@staticmethod
def list_parser(subparser, action=None):

# cluster info parser
ClusterInfoV2.list_info_parser(subparser, action)

@staticmethod
def create_update_clone_parser(subparser, action=None):
# cloud config parser
Expand All @@ -51,8 +61,14 @@ def create_update_clone_parser(subparser, action=None):
def run(args):
parser = ClusterCmdLine.parsers(args[0])
arguments = parser.parse_args(args)
customer_ssh_key = util._read_file(arguments.customer_ssh_key_file)
if args[0] in ["create", "clone", "update"]:
ClusterCmdLine.get_cluster_create_clone_update(arguments, args[0])
else:
return arguments.func(arguments.label, arguments.cluster_id, arguments.state)

@staticmethod
def get_cluster_create_clone_update(arguments, action):
customer_ssh_key = util._read_file(arguments.customer_ssh_key_file)
# This will set cluster info and monitoring settings
cluster_info = ClusterInfoV2(arguments.label)
cluster_info.set_cluster_info(disallow_cluster_termination=arguments.disallow_cluster_termination,
Expand Down Expand Up @@ -80,7 +96,8 @@ def run(args):
disk_size=arguments.size,
upscaling_config=arguments.upscaling_config,
enable_encryption=arguments.encrypted_ephemerals,
customer_ssh_key=customer_ssh_key)
customer_ssh_key=customer_ssh_key,
image_uri_overrides=arguments.image_uri_overrides)

# This will set cloud config settings
cloud_config = Qubole.get_cloud()
Expand All @@ -92,7 +109,7 @@ def run(args):

cluster_request = ClusterCmdLine.get_cluster_request_parameters(cluster_info, cloud_config, engine_config)

action = args[0]
action = action
if action == "create":
return arguments.func(cluster_request)
else:
Expand Down Expand Up @@ -160,7 +177,8 @@ def set_cluster_info(self,
enable_encryption=None,
customer_ssh_key=None,
cluster_name=None,
force_tunnel=None):
force_tunnel=None,
image_uri_overrides=None):
"""
Args:
Expand Down Expand Up @@ -240,6 +258,8 @@ def set_cluster_info(self,
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters
Expand Down Expand Up @@ -269,6 +289,7 @@ def set_cluster_info(self,
self.set_stable_spot_bid_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback)
self.set_data_disk(disk_size, disk_count, disk_type, upscaling_config, enable_encryption)
self.set_monitoring(enable_ganglia_monitoring, datadog_api_token, datadog_app_token)
self.set_internal(image_uri_overrides)

def set_datadog_setting(self,
datadog_api_token=None,
Expand Down Expand Up @@ -320,6 +341,20 @@ def set_data_disk(self,
self.cluster_info['datadisk']['upscaling_config'] = upscaling_config
self.cluster_info['datadisk']['encryption'] = enable_encryption

def set_internal(self, image_uri_overrides=None):
self.internal['image_uri_overrides'] = image_uri_overrides

@staticmethod
def list_info_parser(argparser, action):
argparser.add_argument("--id", dest="cluster_id",
help="show cluster with this id")

argparser.add_argument("--label", dest="label",
help="show cluster with this label")
argparser.add_argument("--state", dest="state",
choices=['invalid', 'up', 'down', 'pending', 'terminating'],
help="State of the cluster")

@staticmethod
def cluster_info_parser(argparser, action):
create_required = False
Expand Down Expand Up @@ -509,6 +544,12 @@ def cluster_info_parser(argparser, action):
default=None,
help="overrides for airflow cluster", )

internal_group = argparser.add_argument_group("internal settings")
internal_group.add_argument("--image-overrides",
dest="image_uri_overrides",
default=None,
help="overrides for image", )

class ClusterV2(Resource):

rest_entity_path = "clusters"
Expand Down Expand Up @@ -539,7 +580,38 @@ def clone(cls, cluster_id_label, cluster_info):
conn = Qubole.agent(version="v2")
return conn.post(cls.element_path(cluster_id_label) + '/clone', data=cluster_info)

# implementation needed
@classmethod
def list(self, state=None):
pass
def list(cls, label=None, cluster_id=None, state=None):
"""
List existing clusters present in your account.
Kwargs:
`state`: list only those clusters which are in this state
Returns:
List of clusters satisfying the given criteria
"""
if cluster_id is not None:
return cls.show(cluster_id)
if label is not None:
return cls.show(label)
conn = Qubole.agent(version="v2")
cluster_list = conn.get(cls.rest_entity_path)
if state is None:
# return the complete list since state is None
return conn.get(cls.rest_entity_path)
# filter clusters based on state
result = []
if 'clusters' in cluster_list:
for cluster in cluster_list['clusters']:
if state.lower() == cluster['state'].lower():
result.append(cluster)
return result

@classmethod
def show(cls, cluster_id_label):
"""
Show information about the cluster with id/label `cluster_id_label`.
"""
conn = Qubole.agent()
return conn.get(cls.element_path(cluster_id_label))
8 changes: 8 additions & 0 deletions qds_sdk/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -977,6 +977,10 @@ class DbExportCommand(Command):
help="Modes 1 and 2: DbTap Id of the target database in Qubole")
optparser.add_option("--db_table", dest="db_table",
help="Modes 1 and 2: Table to export to in the target database")
optparser.add_option("--use_customer_cluster", dest="use_customer_cluster", default=False,
help="Modes 1 and 2: To use cluster to run command ")
optparser.add_option("--customer_cluster_label", dest="customer_cluster_label",
help="Modes 1 and 2: the label of the cluster to run the command on")
optparser.add_option("--db_update_mode", dest="db_update_mode",
help="Modes 1 and 2: (optional) can be 'allowinsert' or "
"'updateonly'. If updateonly is "
Expand Down Expand Up @@ -1085,6 +1089,10 @@ class DbImportCommand(Command):
help="Modes 1 and 2: DbTap Id of the target database in Qubole")
optparser.add_option("--db_table", dest="db_table",
help="Modes 1 and 2: Table to export to in the target database")
optparser.add_option("--use_customer_cluster", dest="use_customer_cluster", default=False,
help="Modes 1 and 2: To use cluster to run command ")
optparser.add_option("--customer_cluster_label", dest="customer_cluster_label",
help="Modes 1 and 2: the label of the cluster to run the command on")
optparser.add_option("--where_clause", dest="db_where",
help="Mode 1: where clause to be applied to the table before extracting rows to be imported")
optparser.add_option("--parallelism", dest="db_parallelism",
Expand Down
9 changes: 6 additions & 3 deletions qds_sdk/qubole.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ def agent(cls, version=None):

@classmethod
def get_cloud(cls, cloud_name=None):
if cloud_name and cloud_name.lower() not in ["aws", "oracle_bmc", "azure"]:
raise Exception("cloud should be 'aws', 'oracle_bmc' or 'azure'")
if cloud_name and cloud_name.lower() not in ["aws", "oracle_bmc", "azure", "oracle_opc"]:
raise Exception("cloud should be 'aws', 'oracle_bmc', 'azure' or 'oracle_opc'")

if cloud_name:
return Qubole.get_cloud_object(cloud_name)
Expand All @@ -116,4 +116,7 @@ def get_cloud_object(cls, cloud_name):
return qds_sdk.cloud.oracle_bmc_cloud.OracleBmcCloud()
elif cloud_name.lower() == "azure":
import qds_sdk.cloud.azure_cloud
return qds_sdk.cloud.azure_cloud.AzureCloud()
return qds_sdk.cloud.azure_cloud.AzureCloud()
elif cloud_name.lower() == "oracle_opc":
import qds_sdk.cloud.oracle_opc_cloud
return qds_sdk.cloud.oracle_opc_cloud.OracleOpcCloud()
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def read(fname):

setup(
name="qds_sdk",
version="1.9.7",
version="1.9.8",
author="Qubole",
author_email="dev@qubole.com",
description=("Python SDK for coding to the Qubole Data Service API"),
Expand Down
Loading

0 comments on commit d86001d

Please sign in to comment.