Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add extra lints to custom pipeline #21

Merged
merged 2 commits into from
Nov 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions src/connectedk8s/azext_connectedk8s/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1195,7 +1195,7 @@ def helm_install_release(
arm_metadata: dict[str, Any],
helm_content_values: dict[str, Any],
registry_path: str,
aad_identity_principal_id: str,
aad_identity_principal_id: str | None,
onboarding_timeout: str = consts.DEFAULT_MAX_ONBOARDING_TIMEOUT_HELMVALUE_SECONDS,
) -> None:
cmd_helm_install = [
Expand Down Expand Up @@ -1241,8 +1241,10 @@ def helm_install_release(
relay_endpoint = arm_metadata["suffixes"]["relayEndpointSuffix"]
active_directory = arm_metadata["authentication"]["loginEndpoint"]
if not aad_identity_principal_id:
raise CLIInternalError("Failed to create the kubeAadEndpoint endpoint. The identity principal ID of "
"the created connected cluster is empty.")
raise CLIInternalError(
"Failed to create the kubeAadEndpoint endpoint. The identity "
"principal ID of the created connected cluster is empty."
)
kube_aad_endpoint = f"{aad_identity_principal_id}.k8sproxysvc.connectrp.azs"
cmd_helm_install.extend(
[
Expand Down
59 changes: 28 additions & 31 deletions src/connectedk8s/azext_connectedk8s/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,11 @@ def create_connectedk8s(
raise ManualInterrupt("Process terminated externally.")

# If the checks didnt pass then stop the onboarding
if diagnostic_checks != consts.Diagnostic_Check_Passed and not azure_local_disconnected and not lowbandwidth:
if (
diagnostic_checks != consts.Diagnostic_Check_Passed
and not azure_local_disconnected
and not lowbandwidth
):
if storage_space_available:
logger.warning(
"The pre-check result logs logs have been saved at this path: "
Expand Down Expand Up @@ -903,7 +907,7 @@ def create_connectedk8s(
)

helm_content_values = helm_values_dp["helmValuesContent"]
aad_identity_principal_id = put_cc_response.identity.principal_id,
aad_identity_principal_id = put_cc_response.identity.principal_id

# Substitute any protected helm values as the value for that will be 'redacted-<feature>-<protectedSetting>'
for helm_parameter, helm_value in helm_content_values.items():
Expand Down Expand Up @@ -2274,7 +2278,9 @@ def update_connected_cluster(
for helm_parameter, helm_value in helm_content_values.items():
if "redacted" in helm_value:
_, feature, protectedSetting = helm_value.split(":")
helm_content_values[helm_parameter] = configuration_protected_settings[feature][protectedSetting]
helm_content_values[helm_parameter] = configuration_protected_settings[
feature
][protectedSetting]

# Disable proxy if disable_proxy flag is set
if disable_proxy:
Expand Down Expand Up @@ -3364,25 +3370,10 @@ def merge_kubernetes_configurations(
except (KeyError, TypeError):
continue

if addition is None:
telemetry.set_exception(
exception="Failed to load additional configuration",
fault_type=consts.Failed_To_Load_K8s_Configuration_Fault_Type,
summary="failed to load additional configuration from {}".format(
addition_file
),
)
raise CLIInternalError(
f"Failed to load additional configuration from {addition_file}"
)

if existing is None:
existing = addition
else:
handle_merge(existing, addition, "clusters", replace)
handle_merge(existing, addition, "users", replace)
handle_merge(existing, addition, "contexts", replace)
existing["current-context"] = addition["current-context"]
handle_merge(existing, addition, "clusters", replace)
handle_merge(existing, addition, "users", replace)
handle_merge(existing, addition, "contexts", replace)
existing["current-context"] = addition["current-context"]

# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != "Windows":
Expand Down Expand Up @@ -3629,12 +3620,12 @@ def client_side_proxy_wrapper(
# initializations
user_type = "sat"
creds = ""
dict_file = {
dict_file: dict[str, Any] = {
"server": {
"httpPort": int(client_proxy_port),
"httpsPort": int(api_server_port)
"httpsPort": int(api_server_port),
},
"identity": {"tenantID": tenant_id}
"identity": {"tenantID": tenant_id},
}

# if service account token is not passed
Expand All @@ -3644,7 +3635,6 @@ def client_side_proxy_wrapper(
account = Profile().get_subscription(subscription_id)
user_type = account["user"]["type"]

dict_file: dict[str, Any]
if user_type == "user":
dict_file["identity"]["clientID"] = consts.CLIENTPROXY_CLIENT_ID
else:
Expand Down Expand Up @@ -3704,13 +3694,19 @@ def client_side_proxy_wrapper(
arm_metadata = utils.get_metadata(cmd.cli_ctx.cloud.endpoints.resource_manager)
if "dataplaneEndpoints" in arm_metadata:
dict_file["cloudConfig"] = {}
dict_file["cloudConfig"]["resourceManagerEndpoint"] = arm_metadata["resourceManager"]
dict_file["cloudConfig"]["resourceManagerEndpoint"] = arm_metadata[
"resourceManager"
]
relay_endpoint_suffix = arm_metadata["suffixes"]["relayEndpointSuffix"]
if relay_endpoint_suffix[0] == ".":
dict_file["cloudConfig"]["serviceBusEndpointSuffix"] = (relay_endpoint_suffix)[1:]
dict_file["cloudConfig"]["serviceBusEndpointSuffix"] = (
relay_endpoint_suffix
)[1:]
else:
dict_file["cloudConfig"]["serviceBusEndpointSuffix"] = relay_endpoint_suffix
dict_file["cloudConfig"]["activeDirectoryEndpoint"] = arm_metadata["authentication"]["loginEndpoint"]
dict_file["cloudConfig"]["activeDirectoryEndpoint"] = arm_metadata[
"authentication"
]["loginEndpoint"]

telemetry.set_debug_info("User type is ", user_type)

Expand Down Expand Up @@ -4545,8 +4541,9 @@ def install_kubectl_client() -> str:
f"Step: {utils.get_utctimestring()}: Install Kubectl client if it does not exist"
)
# Return kubectl client path set by user
if os.getenv("KUBECTL_CLIENT_PATH"):
return os.getenv("KUBECTL_CLIENT_PATH")
kubectl_client_path = os.getenv("KUBECTL_CLIENT_PATH")
if kubectl_client_path:
return kubectl_client_path

try:
# Fetching the current directory where the cli installs the kubectl executable
Expand Down
72 changes: 72 additions & 0 deletions testing/pipeline/k8s-custom-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -256,3 +256,75 @@ stages:
env:
ADO_PULL_REQUEST_LATEST_COMMIT: $(System.PullRequest.SourceCommitId)
ADO_PULL_REQUEST_TARGET_BRANCH: $(System.PullRequest.TargetBranch)

- job: RuffCheck
displayName: "Lint connectedk8s with ruff check"
pool:
vmImage: 'ubuntu-latest'
steps:
- task: UsePythonVersion@0
displayName: 'Use Python 3.12'
inputs:
versionSpec: 3.12
- bash: |
set -ev

# prepare and activate virtualenv
cd src/connectedk8s
python -m venv env
source ./env/bin/activate

pip install --upgrade pip
pip install azure-cli --editable .[linting]

ruff check

displayName: "ruff check"

- job: RuffFormat
displayName: "Check connected8ks formatting with ruff"
pool:
vmImage: 'ubuntu-latest'
steps:
- task: UsePythonVersion@0
displayName: 'Use Python 3.12'
inputs:
versionSpec: 3.12
- bash: |
set -ev

# prepare and activate virtualenv
cd src/connectedk8s
python -m venv env
source ./env/bin/activate

pip install --upgrade pip
pip install azure-cli --editable .[linting]

ruff format --check

displayName: "ruff format check"

- job: TypeChecking
displayName: "Typecheck connected8ks with mypy"
pool:
vmImage: 'ubuntu-latest'
steps:
- task: UsePythonVersion@0
displayName: 'Use Python 3.12'
inputs:
versionSpec: 3.12
- bash: |
set -ev

# prepare and activate virtualenv
cd src/connectedk8s
python -m venv env
source ./env/bin/activate

pip install --upgrade pip
pip install azure-cli --editable .[linting]

mypy

displayName: "mypy"
Loading