diff --git a/common/python/ax/cluster_management/app/cluster_uninstaller.py b/common/python/ax/cluster_management/app/cluster_uninstaller.py index 393e0147ff3e..adbe28f2b9d4 100644 --- a/common/python/ax/cluster_management/app/cluster_uninstaller.py +++ b/common/python/ax/cluster_management/app/cluster_uninstaller.py @@ -182,8 +182,6 @@ def _clean_up_kubernetes_cluster(self): if self._cfg.cloud_profile: env["AWS_DEFAULT_PROFILE"] = self._cfg.cloud_profile - else: - env["AWS_DEFAULT_PROFILE"] = AWS_DEFAULT_PROFILE logger.info("\n\n%sCalling kube-down ...%s\n", COLOR_GREEN, COLOR_NORM) AXKubeUpDown(cluster_name_id=self._name_id, env=env, aws_profile=self._cfg.cloud_profile).down() diff --git a/common/python/ax/cluster_management/app/cluster_upgrader.py b/common/python/ax/cluster_management/app/cluster_upgrader.py index 62403daff52c..f68543ff9b3d 100644 --- a/common/python/ax/cluster_management/app/cluster_upgrader.py +++ b/common/python/ax/cluster_management/app/cluster_upgrader.py @@ -209,8 +209,6 @@ def _upgrade_kube(self): if self._cfg.cloud_profile: env["ARGO_AWS_PROFILE"] = self._cfg.cloud_profile - else: - env["ARGO_AWS_PROFILE"] = AWS_DEFAULT_PROFILE logger.info("Upgrading Kubernetes with environments %s", pformat(env)) env.update(os.environ) diff --git a/common/python/ax/cluster_management/app/options/common.py b/common/python/ax/cluster_management/app/options/common.py index a872ff9867d8..dd307e150a53 100644 --- a/common/python/ax/cluster_management/app/options/common.py +++ b/common/python/ax/cluster_management/app/options/common.py @@ -8,9 +8,13 @@ import os from future.utils import with_metaclass -from ax.cloud.aws import AWS_DEFAULT_PROFILE from ax.platform.component_config import AXPlatformConfigDefaults, SoftwareInfo +# We should set aws profile to None if user does not provide one +# because in python None type is different from str, we convert None +# to string first and then finally it to None +AWS_NO_PROFILE = "None" + def typed_raw_input_with_default(prompt, default, type_converter): real_prompt = prompt + " (Default: {}): ".format(default) @@ -20,7 +24,7 @@ def typed_raw_input_with_default(prompt, default, type_converter): class ClusterOperationDefaults: CLOUD_PROVIDER = "aws" - CLOUD_PROFILE = AWS_DEFAULT_PROFILE + CLOUD_PROFILE = AWS_NO_PROFILE PLATFORM_SERVICE_MANIFEST_ROOT = AXPlatformConfigDefaults.DefaultManifestRoot PLATFORM_BOOTSTRAP_CONFIG_FILE = AXPlatformConfigDefaults.DefaultPlatformConfigFile @@ -71,7 +75,7 @@ def default_or_wizard(self): if self.cloud_profile is None: self.cloud_profile = typed_raw_input_with_default( prompt="Please enter your cloud provider profile. If you don't provide one, we are going to use the default you configured on host.", - default=AWS_DEFAULT_PROFILE, + default=AWS_NO_PROFILE, type_converter=str ) @@ -82,6 +86,10 @@ def default_or_wizard(self): confirmation += "\n\nPlease press ENTER to continue or press Ctrl-C to terminate:" raw_input(confirmation) + # TODO: revise this once we bring GCP into picture + if self.cloud_profile == AWS_NO_PROFILE: + self.cloud_profile = None + def validate_software_info(software_info): assert isinstance(software_info, SoftwareInfo) diff --git a/common/python/ax/cluster_management/app/options/install_options.py b/common/python/ax/cluster_management/app/options/install_options.py index 133f609602f8..4587e17f7971 100644 --- a/common/python/ax/cluster_management/app/options/install_options.py +++ b/common/python/ax/cluster_management/app/options/install_options.py @@ -11,11 +11,11 @@ from netaddr import IPAddress from ax.cloud import Cloud -from ax.cloud.aws import EC2, AWS_DEFAULT_PROFILE +from ax.cloud.aws import EC2 from ax.platform.cluster_config import AXClusterSize, AXClusterType, SpotInstanceOption from ax.platform.component_config import SoftwareInfo from .common import add_common_flags, add_software_info_flags, validate_software_info, \ - ClusterManagementOperationConfigBase, typed_raw_input_with_default + ClusterManagementOperationConfigBase, typed_raw_input_with_default, AWS_NO_PROFILE logger = logging.getLogger(__name__) @@ -26,7 +26,7 @@ class ClusterInstallDefaults: CLUSTER_SIZE = "small" CLUSTER_TYPE = "standard" CLOUD_REGION = "us-west-2" - CLOUD_PROFILE = AWS_DEFAULT_PROFILE + CLOUD_PROFILE = AWS_NO_PROFILE CLOUD_PLACEMENT = "us-west-2a" VPC_CIDR_BASE = "172.20" SUBNET_MASK_SIZE = 22 @@ -121,7 +121,7 @@ def default_or_wizard(self): if self.cloud_profile is None: self.cloud_profile = typed_raw_input_with_default( prompt="Please enter your cloud provider profile. If you don't provide one, we are going to use the default you configured on host.", - default=AWS_DEFAULT_PROFILE, + default=AWS_NO_PROFILE, type_converter=str ) @@ -192,9 +192,13 @@ def default_or_wizard(self): confirmation += "Trusted CIDRs: {}\n".format(self.trusted_cidrs) confirmation += "Spot Instance Option: {}\n".format(self.spot_instances_option) confirmation += "User On-Demand Nodes: {}\n".format(self.user_on_demand_nodes) - confirmation += "\n\nPlease press ENTER to continue or press Ctrl-C to terminate the program if these configurations are not what you want:\n" + confirmation += "\n\nPlease press ENTER to continue or press Ctrl-C to terminate the program if these configurations are not what you want:" raw_input(confirmation) + # TODO: revise this once we bring GCP into picture + if self.cloud_profile == AWS_NO_PROFILE: + self.cloud_profile = None + def validate(self): diff --git a/platform/cluster/aws/util.sh b/platform/cluster/aws/util.sh index 6d13ecb26471..a0bc3634e463 100755 --- a/platform/cluster/aws/util.sh +++ b/platform/cluster/aws/util.sh @@ -747,11 +747,19 @@ function delete-tag { # Creates the IAM roles (if they do not already exist) function create-iam-profiles { - /ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION} + local aws_profile_arg="" + if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then + aws_profile_arg="--aws-profile ${AWS_DEFAULT_PROFILE}" + fi + /ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-region ${AWS_REGION} ${aws_profile_arg} } function delete-iam-profiles { - /ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION} + local aws_profile_arg="" + if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then + aws_profile_arg="--aws-profile ${AWS_DEFAULT_PROFILE}" + fi + /ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-region ${AWS_REGION} ${aws_profile_arg} } # Wait for instance to be in specified state diff --git a/platform/source/lib/ax/platform/kube_env_config.py b/platform/source/lib/ax/platform/kube_env_config.py index 09d0085be350..d93f8b32c32f 100644 --- a/platform/source/lib/ax/platform/kube_env_config.py +++ b/platform/source/lib/ax/platform/kube_env_config.py @@ -201,8 +201,6 @@ def prepare_kube_install_config(name_id, aws_profile, cluster_info, cluster_conf if aws_profile: env["AWS_DEFAULT_PROFILE"] = aws_profile - else: - env["AWS_DEFAULT_PROFILE"] = AWS_DEFAULT_PROFILE optional_env = { # Start off directly with all spot instances only for dev clusters. diff --git a/platform/source/tools/ax-upgrade-misc.py b/platform/source/tools/ax-upgrade-misc.py index 4e2999130709..730872ebc372 100755 --- a/platform/source/tools/ax-upgrade-misc.py +++ b/platform/source/tools/ax-upgrade-misc.py @@ -47,7 +47,6 @@ if args.ensure_aws_iam or args.delete_aws_iam: from ax.platform.cluster_instance_profile import AXClusterInstanceProfile assert args.cluster_name_id, "Missing cluster name id to ensure aws iam" - assert args.aws_region, "Missing AWS region to ensure aws iam" if args.ensure_aws_iam: AXClusterInstanceProfile(args.cluster_name_id, args.aws_region, aws_profile=args.aws_profile).update() elif args.delete_aws_iam: @@ -58,10 +57,8 @@ name_id = args.cluster_name_id aws_profile = args.aws_profile aws_region = args.aws_region - assert name_id and aws_profile and aws_region, \ - "Missing parameters to ensure s3. name_id: {}, aws_profile: {}, aws_region: {}".format(name_id, - aws_profile, - aws_region) + assert name_id and aws_region, \ + "Missing parameters to ensure s3. name_id: {}, aws_region: {}".format(name_id, aws_region) AXClusterBuckets(name_id, aws_profile, aws_region).update() diff --git a/platform/source/tools/master_manager.py b/platform/source/tools/master_manager.py index 6db2016c3703..48b300f7632b 100755 --- a/platform/source/tools/master_manager.py +++ b/platform/source/tools/master_manager.py @@ -17,7 +17,7 @@ def run(): parser.add_argument("cluster_name_id", help="Name of the cluster") parser.add_argument("command", help="Command, server or upgrade") parser.add_argument("--region", help="Region name") - parser.add_argument("--profile", help="Profile name") + parser.add_argument("--profile", default=None, help="Profile name") parser.add_argument('--version', action='version', version="%(prog)s {}".format(__version__)) usr_args = parser.parse_args() diff --git a/platform/source/tools/upgrade-kubernetes.sh b/platform/source/tools/upgrade-kubernetes.sh index a59dd09f3db9..8aafc116600c 100755 --- a/platform/source/tools/upgrade-kubernetes.sh +++ b/platform/source/tools/upgrade-kubernetes.sh @@ -39,7 +39,7 @@ ensure-aws-envs () { echo "Setting AWS profile to ${ARGO_AWS_PROFILE}" export AWS_DEFAULT_PROFILE="${ARGO_AWS_PROFILE}" else - export AWS_DEFAULT_PROFILE="default" + echo "Not setting AWS profile" fi echo "Setting AWS region to ${ARGO_AWS_REGION}" aws configure set region ${ARGO_AWS_REGION} @@ -209,14 +209,18 @@ get-instance-counts() { upgrade-launch-config() { echo "Upgrading launch configurations ..." + local aws_profile_arg="" + if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then + aws_profile_arg="--profile ${AWS_DEFAULT_PROFILE}" + fi /ax/bin/minion_upgrade --new-kube-version ${NEW_KUBE_VERSION} \ --new-kube-server-hash ${NEW_KUBE_SERVER_SHA1} \ --new-cluster-install-version ${NEW_CLUSTER_INSTALL_VERSION} \ --new-kube-salt-hash ${NEW_KUBE_SALT_SHA1} \ - --profile ${AWS_DEFAULT_PROFILE} \ --region ${ARGO_AWS_REGION} \ --ax-vol-disk-type ${AX_VOL_DISK_TYPE} \ - --cluster-name-id ${CLUSTER_NAME_ID} + --cluster-name-id ${CLUSTER_NAME_ID} \ + ${aws_profile_arg} } @@ -226,7 +230,13 @@ upgrade-master () { echo echo "=== Step 2. Configure Kubernetes master." echo - /ax/bin/master_manager ${CLUSTER_NAME_ID} upgrade --region ${ARGO_AWS_REGION} --profile ${AWS_DEFAULT_PROFILE} + + local aws_profile_arg="" + if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then + aws_profile_arg="--profile ${AWS_DEFAULT_PROFILE}" + fi + + /ax/bin/master_manager ${CLUSTER_NAME_ID} upgrade --region ${ARGO_AWS_REGION} ${aws_profile_arg} rm -f ~/.ssh/known_hosts }