From 313331e2bbc10118982be337b4ebee0da9fe9d9f Mon Sep 17 00:00:00 2001 From: David Eads Date: Thu, 29 Oct 2020 13:55:57 +0100 Subject: [PATCH] UPSTREAM: : openshift-kube-apiserver: add kube-apiserver patches Origin-commit: 170dd7d25cca990fd7683eaf424d00bcd776c39c Origin-commit: 35ef039cb099dc609c576cf594aadd849212a00b UPSTREAM: : openshift-kube-apiserver: enabled conversion gen for admission configs --- cmd/kube-apiserver/app/patch_openshift.go | 19 +- cmd/kube-apiserver/app/server.go | 32 +- .../admissionenablement/admission.go | 15 + .../admissionenablement/admission_config.go | 30 ++ .../admission/admissionenablement/register.go | 111 ++++ .../admissionenablement/register_test.go | 54 ++ .../restrictusers/groupcache_test.go | 28 + .../restrictusers/intializers.go | 28 + .../restrictusers/restrictusers.go | 234 ++++++++ .../restrictusers/restrictusers_test.go | 404 ++++++++++++++ .../restrictusers/subjectchecker.go | 312 +++++++++++ .../restrictusers/subjectchecker_test.go | 349 ++++++++++++ .../restrictusers/usercache/groups.go | 55 ++ .../apis/clusterresourceoverride/doc.go | 4 + .../apis/clusterresourceoverride/name.go | 4 + .../apis/clusterresourceoverride/register.go | 23 + .../apis/clusterresourceoverride/types.go | 24 + .../apis/clusterresourceoverride/v1/doc.go | 5 + .../clusterresourceoverride/v1/register.go | 27 + .../clusterresourceoverride/v1/swagger_doc.go | 17 + .../apis/clusterresourceoverride/v1/types.go | 24 + .../v1/zz_generated.deepcopy.go | 34 ++ .../validation/validation.go | 27 + .../zz_generated.deepcopy.go | 34 ++ .../autoscaling/apis/runonceduration/doc.go | 4 + .../apis/runonceduration/register.go | 34 ++ .../autoscaling/apis/runonceduration/types.go | 26 + .../apis/runonceduration/v1/conversion.go | 26 + .../apis/runonceduration/v1/doc.go | 5 + .../apis/runonceduration/v1/register.go | 29 + .../apis/runonceduration/v1/swagger_doc.go | 15 + .../apis/runonceduration/v1/types.go | 22 + .../v1/zz_generated.deepcopy.go | 39 ++ .../runonceduration/validation/validation.go | 18 + .../validation/validation_test.go | 29 + .../runonceduration/zz_generated.deepcopy.go | 39 ++ .../clusterresourceoverride/admission.go | 348 ++++++++++++ .../clusterresourceoverride/admission_test.go | 507 ++++++++++++++++++ .../clusterresourceoverride/doc.go | 8 + .../autoscaling/runonceduration/admission.go | 148 +++++ .../runonceduration/admission_test.go | 215 ++++++++ .../autoscaling/runonceduration/doc.go | 22 + .../apiserver/validate_apiserver.go | 149 +++++ .../apiserver/validate_apiserver_test.go | 117 ++++ .../apiserver/validation_wrapper.go | 76 +++ .../customresourcevalidation/attributes.go | 53 ++ .../authentication/validate_authentication.go | 133 +++++ .../validate_authentication_test.go | 189 +++++++ .../clusterresourcequota/validate_crq.go | 83 +++ .../validation/validation.go | 68 +++ .../validation/validation_test.go | 173 ++++++ .../deny_delete_cluster_config_resource.go | 54 ++ ...eny_delete_cluster_config_resource_test.go | 73 +++ .../console/validate_console.go | 118 ++++ .../cr_validation_registration.go | 60 +++ .../customresourcevalidator.go | 98 ++++ .../customresourcevalidator_test.go | 278 ++++++++++ .../features/validate_features.go | 129 +++++ .../features/validate_features_test.go | 127 +++++ .../customresourcevalidation/helpers.go | 40 ++ .../image/validate_image.go | 94 ++++ .../customresourcevalidation/oauth/helpers.go | 33 ++ .../oauth/validate_github.go | 69 +++ .../oauth/validate_github_test.go | 249 +++++++++ .../oauth/validate_gitlab.go | 26 + .../oauth/validate_gitlab_test.go | 104 ++++ .../oauth/validate_google.go | 23 + .../oauth/validate_google_test.go | 90 ++++ .../oauth/validate_idp.go | 215 ++++++++ .../oauth/validate_idp_test.go | 421 +++++++++++++++ .../oauth/validate_keystone.go | 23 + .../oauth/validate_keystone_test.go | 96 ++++ .../oauth/validate_ldap.go | 66 +++ .../oauth/validate_ldap_test.go | 101 ++++ .../oauth/validate_oauth.go | 110 ++++ .../oauth/validate_openid.go | 54 ++ .../oauth/validate_openid_test.go | 125 +++++ .../oauth/validate_requestheader.go | 85 +++ .../oauth/validate_requestheader_test.go | 193 +++++++ .../project/validate_project.go | 111 ++++ .../rolebindingrestriction/validate_rbr.go | 83 +++ .../validation/validation.go | 113 ++++ .../scheduler/validate_scheduler.go | 106 ++++ .../defaulting_scc.go | 93 ++++ .../defaulting_scc_test.go | 274 ++++++++++ .../securitycontextconstraints/defaults.go | 100 ++++ .../validate_scc.go | 79 +++ .../validation/validation.go | 275 ++++++++++ .../validation/validation_test.go | 343 ++++++++++++ .../namespaceconditions/decorator.go | 60 +++ .../namespaceconditions/labelcondition.go | 125 +++++ .../labelcondition_test.go | 97 ++++ .../namespaceconditions/namecondition.go | 60 +++ .../network/apis/externalipranger/doc.go | 4 + .../network/apis/externalipranger/register.go | 20 + .../network/apis/externalipranger/types.go | 20 + .../network/apis/externalipranger/v1/doc.go | 5 + .../apis/externalipranger/v1/register.go | 24 + .../network/apis/externalipranger/v1/types.go | 20 + .../v1/zz_generated.deepcopy.go | 55 ++ .../externalipranger/zz_generated.deepcopy.go | 39 ++ .../network/apis/restrictedendpoints/doc.go | 4 + .../apis/restrictedendpoints/register.go | 20 + .../network/apis/restrictedendpoints/types.go | 15 + .../apis/restrictedendpoints/v1/doc.go | 5 + .../apis/restrictedendpoints/v1/register.go | 24 + .../apis/restrictedendpoints/v1/types.go | 15 + .../v1/zz_generated.deepcopy.go | 39 ++ .../zz_generated.deepcopy.go | 39 ++ .../externalipranger/externalip_admission.go | 208 +++++++ .../externalip_admission_test.go | 321 +++++++++++ .../restrictedendpoints/endpoint_admission.go | 191 +++++++ .../route/apis/ingressadmission/doc.go | 4 + .../route/apis/ingressadmission/register.go | 33 ++ .../route/apis/ingressadmission/types.go | 22 + .../apis/ingressadmission/v1/defaults_test.go | 59 ++ .../route/apis/ingressadmission/v1/doc.go | 5 + .../apis/ingressadmission/v1/register.go | 27 + .../apis/ingressadmission/v1/swagger_doc.go | 15 + .../route/apis/ingressadmission/v1/types.go | 22 + .../v1/zz_generated.deepcopy.go | 34 ++ .../ingressadmission/zz_generated.deepcopy.go | 34 ++ .../admission/route/ingress_admission.go | 162 ++++++ .../admission/route/ingress_admission_test.go | 171 ++++++ .../scheduler/apis/podnodeconstraints/doc.go | 4 + .../apis/podnodeconstraints/register.go | 33 ++ .../apis/podnodeconstraints/types.go | 19 + .../apis/podnodeconstraints/v1/defaults.go | 19 + .../podnodeconstraints/v1/defaults_test.go | 59 ++ .../apis/podnodeconstraints/v1/doc.go | 5 + .../apis/podnodeconstraints/v1/register.go | 28 + .../apis/podnodeconstraints/v1/swagger_doc.go | 15 + .../apis/podnodeconstraints/v1/types.go | 20 + .../v1/zz_generated.deepcopy.go | 39 ++ .../zz_generated.deepcopy.go | 39 ++ .../admission/scheduler/nodeenv/admission.go | 148 +++++ .../scheduler/nodeenv/admission_test.go | 160 ++++++ .../scheduler/nodeenv/intializers.go | 28 + .../nodeenv/labelselector/labelselector.go | 359 +++++++++++++ .../labelselector/labelselector_test.go | 181 +++++++ .../scheduler/podnodeconstraints/admission.go | 205 +++++++ .../podnodeconstraints/admission_test.go | 283 ++++++++++ .../scheduler/podnodeconstraints/doc.go | 44 ++ .../oauth/bootstrapauthenticator.go | 102 ++++ .../oauth/expirationvalidator.go | 31 ++ .../oauth/expirationvalidator_test.go | 72 +++ .../authentication/oauth/interfaces.go | 41 ++ .../oauth/rankedset/rankedset.go | 162 ++++++ .../oauth/rankedset/rankedset_test.go | 273 ++++++++++ .../authentication/oauth/timeoutvalidator.go | 233 ++++++++ .../oauth/tokenauthenticator.go | 84 +++ .../oauth/tokenauthenticator_test.go | 354 ++++++++++++ .../authentication/oauth/uidvalidator.go | 21 + .../authorization/browsersafe/authorizer.go | 107 ++++ .../browsersafe/authorizer_test.go | 80 +++ .../scopeauthorizer/authorizer.go | 49 ++ .../scopeauthorizer/authorizer_test.go | 150 ++++++ .../configdefault/kubecontrolplane_default.go | 115 ++++ .../configdefault/kubecontrolplane_refs.go | 122 +++++ .../enablement/enablement.go | 71 +++ .../enablement/intialization.go | 81 +++ .../openshiftkubeapiserver/flags.go | 222 ++++++++ .../openshiftkubeapiserver/flags_test.go | 26 + .../paramtoken/paramtoken.go | 45 ++ .../openshiftkubeapiserver/patch.go | 157 ++++++ .../patch_authorizer.go | 52 ++ .../patch_handlerchain.go | 97 ++++ .../openshiftkubeapiserver/wellknown_oauth.go | 57 ++ pkg/kubeapiserver/authenticator/config.go | 7 + .../authenticator/patch_authenticator.go | 86 +++ 170 files changed, 15752 insertions(+), 31 deletions(-) create mode 100644 openshift-kube-apiserver/admission/admissionenablement/admission.go create mode 100644 openshift-kube-apiserver/admission/admissionenablement/admission_config.go create mode 100644 openshift-kube-apiserver/admission/admissionenablement/register.go create mode 100644 openshift-kube-apiserver/admission/admissionenablement/register_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go create mode 100644 openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go create mode 100644 openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/attributes.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/helpers.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go create mode 100644 openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/decorator.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go create mode 100644 openshift-kube-apiserver/admission/namespaceconditions/namecondition.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/types.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/network/apis/externalipranger/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/network/apis/restrictedendpoints/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go create mode 100644 openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go create mode 100644 openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/route/apis/ingressadmission/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/route/ingress_admission.go create mode 100644 openshift-kube-apiserver/admission/route/ingress_admission_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/zz_generated.deepcopy.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector.go create mode 100644 openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go create mode 100644 openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go create mode 100644 openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go create mode 100644 openshift-kube-apiserver/authentication/oauth/bootstrapauthenticator.go create mode 100644 openshift-kube-apiserver/authentication/oauth/expirationvalidator.go create mode 100644 openshift-kube-apiserver/authentication/oauth/expirationvalidator_test.go create mode 100644 openshift-kube-apiserver/authentication/oauth/interfaces.go create mode 100644 openshift-kube-apiserver/authentication/oauth/rankedset/rankedset.go create mode 100644 openshift-kube-apiserver/authentication/oauth/rankedset/rankedset_test.go create mode 100644 openshift-kube-apiserver/authentication/oauth/timeoutvalidator.go create mode 100644 openshift-kube-apiserver/authentication/oauth/tokenauthenticator.go create mode 100644 openshift-kube-apiserver/authentication/oauth/tokenauthenticator_test.go create mode 100644 openshift-kube-apiserver/authentication/oauth/uidvalidator.go create mode 100644 openshift-kube-apiserver/authorization/browsersafe/authorizer.go create mode 100644 openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go create mode 100644 openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go create mode 100644 openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go create mode 100644 openshift-kube-apiserver/configdefault/kubecontrolplane_default.go create mode 100644 openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go create mode 100644 openshift-kube-apiserver/enablement/enablement.go create mode 100644 openshift-kube-apiserver/enablement/intialization.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/flags.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/paramtoken/paramtoken.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/patch.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/patch_authorizer.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go create mode 100644 openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go create mode 100644 pkg/kubeapiserver/authenticator/patch_authenticator.go diff --git a/cmd/kube-apiserver/app/patch_openshift.go b/cmd/kube-apiserver/app/patch_openshift.go index c1183c3e7f474..8744ae69c4731 100644 --- a/cmd/kube-apiserver/app/patch_openshift.go +++ b/cmd/kube-apiserver/app/patch_openshift.go @@ -5,29 +5,14 @@ import ( genericapiserver "k8s.io/apiserver/pkg/server" clientgoinformers "k8s.io/client-go/informers" "k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver" - "k8s.io/kubernetes/pkg/master" ) var OpenShiftKubeAPIServerConfigPatch openshiftkubeapiserver.KubeAPIServerConfigFunc = nil -type KubeAPIServerServerFunc func(server *master.Master) error - -func PatchKubeAPIServerConfig(config *genericapiserver.Config, versionedInformers clientgoinformers.SharedInformerFactory, pluginInitializers *[]admission.PluginInitializer) (genericapiserver.DelegationTarget, error) { +func PatchKubeAPIServerConfig(config *genericapiserver.Config, versionedInformers clientgoinformers.SharedInformerFactory, pluginInitializers *[]admission.PluginInitializer) error { if OpenShiftKubeAPIServerConfigPatch == nil { - return genericapiserver.NewEmptyDelegate(), nil - } - - return OpenShiftKubeAPIServerConfigPatch(config, versionedInformers, pluginInitializers) -} - -var OpenShiftKubeAPIServerServerPatch KubeAPIServerServerFunc = nil - -func PatchKubeAPIServerServer(server *master.Master) error { - if OpenShiftKubeAPIServerServerPatch == nil { return nil } - return OpenShiftKubeAPIServerServerPatch(server) + return OpenShiftKubeAPIServerConfigPatch(config, versionedInformers, pluginInitializers) } - -var StartingDelegate genericapiserver.DelegationTarget = genericapiserver.NewEmptyDelegate() diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 4fbc408751487..819ced410946f 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -30,7 +30,7 @@ import ( "strings" "time" - "k8s.io/kubernetes/openshift-kube-apiserver/configdefault" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/admissionenablement" "k8s.io/kubernetes/openshift-kube-apiserver/enablement" "k8s.io/kubernetes/openshift-kube-apiserver/openshiftkubeapiserver" @@ -128,31 +128,38 @@ cluster's shared state through which all other components interact.`, RunE: func(cmd *cobra.Command, args []string) error { verflag.PrintAndExitIfRequested() fs := cmd.Flags() - cliflag.PrintFlags(fs) if len(s.OpenShiftConfig) > 0 { - enablement.ForceOpenShift() openshiftConfig, err := enablement.GetOpenshiftConfig(s.OpenShiftConfig) if err != nil { klog.Fatal(err) } + enablement.ForceOpenShift(openshiftConfig) // this forces a patch to be called // TODO we're going to try to remove bits of the patching. - configPatchFn, serverPatchContext := openshiftkubeapiserver.NewOpenShiftKubeAPIServerConfigPatch(genericapiserver.NewEmptyDelegate(), openshiftConfig) + configPatchFn := openshiftkubeapiserver.NewOpenShiftKubeAPIServerConfigPatch(openshiftConfig) OpenShiftKubeAPIServerConfigPatch = configPatchFn - OpenShiftKubeAPIServerServerPatch = serverPatchContext.PatchServer args, err := openshiftkubeapiserver.ConfigToFlags(openshiftConfig) if err != nil { return err } + // hopefully this resets the flags? if err := cmd.ParseFlags(args); err != nil { return err } - enablement.ForceGlobalInitializationForOpenShift(s) + // print merged flags (merged from OpenshiftConfig) + cliflag.PrintFlags(cmd.Flags()) + + enablement.ForceGlobalInitializationForOpenShift() + admissionenablement.InstallOpenShiftAdmissionPlugins(s) + + } else { + // print default flags + cliflag.PrintFlags(cmd.Flags()) } err := checkNonZeroInsecurePort(fs) @@ -242,7 +249,7 @@ func CreateServerChain(completedOptions completedServerRunOptions, stopCh <-chan if err != nil { return nil, err } - apiExtensionsServer, err := createAPIExtensionsServer(apiExtensionsConfig, StartingDelegate) + apiExtensionsServer, err := createAPIExtensionsServer(apiExtensionsConfig, genericapiserver.NewEmptyDelegate()) if err != nil { return nil, err } @@ -252,10 +259,6 @@ func CreateServerChain(completedOptions completedServerRunOptions, stopCh <-chan return nil, err } - if err := PatchKubeAPIServerServer(kubeAPIServer); err != nil { - return nil, err - } - // aggregator comes last in the chain aggregatorConfig, err := createAggregatorConfig(*kubeAPIServerConfig.GenericConfig, completedOptions.ServerRunOptions, kubeAPIServerConfig.ExtraConfig.VersionedInformers, serviceResolver, proxyTransport, pluginInitializer) if err != nil { @@ -546,6 +549,8 @@ func buildGenericConfig( // on a fast local network genericConfig.LoopbackClientConfig.DisableCompression = true + enablement.SetLoopbackClientConfig(genericConfig.LoopbackClientConfig) + kubeClientConfig := genericConfig.LoopbackClientConfig clientgoExternalClient, err := clientgoclientset.NewForConfig(kubeClientConfig) if err != nil { @@ -585,14 +590,13 @@ func buildGenericConfig( return } - StartingDelegate, err = PatchKubeAPIServerConfig(genericConfig, versionedInformers, &pluginInitializers) - if err != nil { + if err := PatchKubeAPIServerConfig(genericConfig, versionedInformers, &pluginInitializers); err != nil { lastErr = fmt.Errorf("failed to patch: %v", err) return } if enablement.IsOpenShift() { - configdefault.SetAdmissionDefaults(s, versionedInformers, clientgoExternalClient) + admissionenablement.SetAdmissionDefaults(s, versionedInformers, clientgoExternalClient) } err = s.Admission.ApplyTo( genericConfig, diff --git a/openshift-kube-apiserver/admission/admissionenablement/admission.go b/openshift-kube-apiserver/admission/admissionenablement/admission.go new file mode 100644 index 0000000000000..a701f6d285cae --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/admission.go @@ -0,0 +1,15 @@ +package admissionenablement + +import ( + "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" +) + +func InstallOpenShiftAdmissionPlugins(o *options.ServerRunOptions) { + existingAdmissionOrder := o.Admission.GenericAdmission.RecommendedPluginOrder + o.Admission.GenericAdmission.RecommendedPluginOrder = NewOrderedKubeAdmissionPlugins(existingAdmissionOrder) + RegisterOpenshiftKubeAdmissionPlugins(o.Admission.GenericAdmission.Plugins) + customresourcevalidationregistration.RegisterCustomResourceValidation(o.Admission.GenericAdmission.Plugins) + existingDefaultOff := o.Admission.GenericAdmission.DefaultOffPlugins + o.Admission.GenericAdmission.DefaultOffPlugins = NewDefaultOffPluginsFunc(existingDefaultOff)() +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/admission_config.go b/openshift-kube-apiserver/admission/admissionenablement/admission_config.go new file mode 100644 index 0000000000000..e99d69047fd80 --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/admission_config.go @@ -0,0 +1,30 @@ +package admissionenablement + +import ( + "time" + + "github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/namespaceconditions" +) + +func SetAdmissionDefaults(o *options.ServerRunOptions, informers informers.SharedInformerFactory, kubeClient kubernetes.Interface) { + // set up the decorators we need. This is done late and out of order because our decorators currently require informers which are not + // present until we start running + namespaceLabelDecorator := namespaceconditions.NamespaceLabelConditions{ + NamespaceClient: kubeClient.CoreV1(), + NamespaceLister: informers.Core().V1().Namespaces().Lister(), + + SkipLevelZeroNames: SkipRunLevelZeroPlugins, + SkipLevelOneNames: SkipRunLevelOnePlugins, + } + o.Admission.GenericAdmission.Decorators = append(o.Admission.GenericAdmission.Decorators, + admission.Decorators{ + admission.DecoratorFunc(namespaceLabelDecorator.WithNamespaceLabelConditions), + admission.DecoratorFunc(admissiontimeout.AdmissionTimeout{Timeout: 13 * time.Second}.WithTimeout), + }, + ) +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/register.go b/openshift-kube-apiserver/admission/admissionenablement/register.go new file mode 100644 index 0000000000000..cd5de4c3fc192 --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/register.go @@ -0,0 +1,111 @@ +package admissionenablement + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/plugin/resourcequota" + mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating" + + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy" + imagepolicyapiv1 "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/apis/imagepolicy/v1" + quotaclusterresourcequota "github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission" + authorizationrestrictusers "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers" + quotaclusterresourceoverride "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride" + quotarunonceduration "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/runonceduration" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/externalipranger" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/restrictedendpoints" + ingressadmission "k8s.io/kubernetes/openshift-kube-apiserver/admission/route" + projectnodeenv "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv" + schedulerpodnodeconstraints "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/podnodeconstraints" +) + +func RegisterOpenshiftKubeAdmissionPlugins(plugins *admission.Plugins) { + authorizationrestrictusers.Register(plugins) + imagepolicy.Register(plugins) + ingressadmission.Register(plugins) + projectnodeenv.Register(plugins) + quotaclusterresourceoverride.Register(plugins) + quotaclusterresourcequota.Register(plugins) + quotarunonceduration.Register(plugins) + schedulerpodnodeconstraints.Register(plugins) + sccadmission.Register(plugins) + sccadmission.RegisterSCCExecRestrictions(plugins) + externalipranger.RegisterExternalIP(plugins) + restrictedendpoints.RegisterRestrictedEndpoints(plugins) +} + +var ( + + // these are admission plugins that cannot be applied until after the kubeapiserver starts. + // TODO if nothing comes to mind in 3.10, kill this + SkipRunLevelZeroPlugins = sets.NewString() + // these are admission plugins that cannot be applied until after the openshiftapiserver apiserver starts. + SkipRunLevelOnePlugins = sets.NewString( + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "quota.openshift.io/ClusterResourceQuota", + "security.openshift.io/SecurityContextConstraint", + "security.openshift.io/SCCExecRestrictions", + ) + + // openshiftAdmissionPluginsForKubeBeforeMutating are the admission plugins to add after kube admission, before mutating webhooks + openshiftAdmissionPluginsForKubeBeforeMutating = []string{ + "autoscaling.openshift.io/ClusterResourceOverride", + "authorization.openshift.io/RestrictSubjectBindings", + "autoscaling.openshift.io/RunOnceDuration", + "scheduling.openshift.io/PodNodeConstraints", + "scheduling.openshift.io/OriginPodNodeEnvironment", + "network.openshift.io/ExternalIPRanger", + "network.openshift.io/RestrictedEndpointsAdmission", + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "security.openshift.io/SecurityContextConstraint", + "security.openshift.io/SCCExecRestrictions", + "route.openshift.io/IngressAdmission", + } + + // openshiftAdmissionPluginsForKubeAfterResourceQuota are the plugins to add after ResourceQuota plugin + openshiftAdmissionPluginsForKubeAfterResourceQuota = []string{ + "quota.openshift.io/ClusterResourceQuota", + } + + // additionalDefaultOnPlugins is a list of plugins we turn on by default that core kube does not. + additionalDefaultOnPlugins = sets.NewString( + "NodeRestriction", + "OwnerReferencesPermissionEnforcement", + "PersistentVolumeLabel", + "PodNodeSelector", + "PodTolerationRestriction", + "Priority", + imagepolicyapiv1.PluginName, // "image.openshift.io/ImagePolicy" + "StorageObjectInUseProtection", + ) +) + +func NewOrderedKubeAdmissionPlugins(kubeAdmissionOrder []string) []string { + ret := []string{} + for _, curr := range kubeAdmissionOrder { + if curr == mutatingwebhook.PluginName { + ret = append(ret, openshiftAdmissionPluginsForKubeBeforeMutating...) + ret = append(ret, customresourcevalidationregistration.AllCustomResourceValidators...) + } + + ret = append(ret, curr) + + if curr == resourcequota.PluginName { + ret = append(ret, openshiftAdmissionPluginsForKubeAfterResourceQuota...) + } + } + return ret +} + +func NewDefaultOffPluginsFunc(kubeDefaultOffAdmission sets.String) func() sets.String { + return func() sets.String { + kubeOff := sets.NewString(kubeDefaultOffAdmission.UnsortedList()...) + kubeOff.Delete(additionalDefaultOnPlugins.List()...) + kubeOff.Delete(openshiftAdmissionPluginsForKubeBeforeMutating...) + kubeOff.Delete(openshiftAdmissionPluginsForKubeAfterResourceQuota...) + kubeOff.Delete(customresourcevalidationregistration.AllCustomResourceValidators...) + return kubeOff + } +} diff --git a/openshift-kube-apiserver/admission/admissionenablement/register_test.go b/openshift-kube-apiserver/admission/admissionenablement/register_test.go new file mode 100644 index 0000000000000..3f1182c706f65 --- /dev/null +++ b/openshift-kube-apiserver/admission/admissionenablement/register_test.go @@ -0,0 +1,54 @@ +package admissionenablement + +import ( + "reflect" + "testing" + + "k8s.io/apiserver/pkg/admission" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/kubernetes/pkg/kubeapiserver/options" + + "github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration" +) + +func TestAdmissionRegistration(t *testing.T) { + orderedAdmissionChain := NewOrderedKubeAdmissionPlugins(options.AllOrderedPlugins) + defaultOffPlugins := NewDefaultOffPluginsFunc(options.DefaultOffAdmissionPlugins())() + registerAllAdmissionPlugins := func(plugins *admission.Plugins) { + genericapiserver.RegisterAllAdmissionPlugins(plugins) + options.RegisterAllAdmissionPlugins(plugins) + RegisterOpenshiftKubeAdmissionPlugins(plugins) + customresourcevalidationregistration.RegisterCustomResourceValidation(plugins) + } + plugins := admission.NewPlugins() + registerAllAdmissionPlugins(plugins) + + err := admissionregistrationtesting.AdmissionRegistrationTest(plugins, orderedAdmissionChain, defaultOffPlugins) + if err != nil { + t.Fatal(err) + } +} + +// TestResourceQuotaBeforeClusterResourceQuota simply test wheather ResourceQuota plugin is before ClusterResourceQuota plugin +func TestResourceQuotaBeforeClusterResourceQuota(t *testing.T) { + orderedAdmissionChain := NewOrderedKubeAdmissionPlugins(options.AllOrderedPlugins) + + expectedOrderedAdmissionSubChain := []string{"ResourceQuota", "quota.openshift.io/ClusterResourceQuota", "AlwaysDeny"} + actualOrderedAdmissionChain := extractSubChain(orderedAdmissionChain, expectedOrderedAdmissionSubChain[0]) + + if !reflect.DeepEqual(actualOrderedAdmissionChain, expectedOrderedAdmissionSubChain) { + t.Fatalf("expected %v, got %v ", expectedOrderedAdmissionSubChain, actualOrderedAdmissionChain) + } +} + +func extractSubChain(admissionChain []string, takeFrom string) []string { + indexOfTake := 0 + for index, admission := range admissionChain { + if admission == takeFrom { + indexOfTake = index + break + } + } + return admissionChain[indexOfTake:] +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go new file mode 100644 index 0000000000000..1dde83cbce2a2 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/groupcache_test.go @@ -0,0 +1,28 @@ +package restrictusers + +import ( + userv1 "github.com/openshift/api/user/v1" +) + +type fakeGroupCache struct { + groups []userv1.Group +} + +func (g fakeGroupCache) GroupsFor(user string) ([]*userv1.Group, error) { + ret := []*userv1.Group{} + for i := range g.groups { + group := &g.groups[i] + for _, currUser := range group.Users { + if user == currUser { + ret = append(ret, group) + break + } + } + + } + return ret, nil +} + +func (g fakeGroupCache) HasSynced() bool { + return true +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go b/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go new file mode 100644 index 0000000000000..d3fdcde4a5113 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/intializers.go @@ -0,0 +1,28 @@ +package restrictusers + +import ( + "k8s.io/apiserver/pkg/admission" + + userinformer "github.com/openshift/client-go/user/informers/externalversions" +) + +func NewInitializer(userInformer userinformer.SharedInformerFactory) admission.PluginInitializer { + return &localInitializer{userInformer: userInformer} +} + +type WantsUserInformer interface { + SetUserInformer(userinformer.SharedInformerFactory) + admission.InitializationValidator +} + +type localInitializer struct { + userInformer userinformer.SharedInformerFactory +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsUserInformer); ok { + wants.SetUserInformer(i.userInformer) + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go new file mode 100644 index 0000000000000..4c78858203181 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers.go @@ -0,0 +1,234 @@ +package restrictusers + +import ( + "context" + "errors" + "fmt" + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/apis/rbac" + + userv1 "github.com/openshift/api/user/v1" + authorizationtypedclient "github.com/openshift/client-go/authorization/clientset/versioned/typed/authorization/v1" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers/usercache" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("authorization.openshift.io/RestrictSubjectBindings", + func(config io.Reader) (admission.Interface, error) { + return NewRestrictUsersAdmission() + }) +} + +type GroupCache interface { + GroupsFor(string) ([]*userv1.Group, error) + HasSynced() bool +} + +// restrictUsersAdmission implements admission.ValidateInterface and enforces +// restrictions on adding rolebindings in a project to permit only designated +// subjects. +type restrictUsersAdmission struct { + *admission.Handler + + roleBindingRestrictionsGetter authorizationtypedclient.RoleBindingRestrictionsGetter + userClient userclient.Interface + kubeClient kubernetes.Interface + groupCache GroupCache +} + +var _ = admissionrestconfig.WantsRESTClientConfig(&restrictUsersAdmission{}) +var _ = WantsUserInformer(&restrictUsersAdmission{}) +var _ = initializer.WantsExternalKubeClientSet(&restrictUsersAdmission{}) +var _ = admission.ValidationInterface(&restrictUsersAdmission{}) + +// NewRestrictUsersAdmission configures an admission plugin that enforces +// restrictions on adding role bindings in a project. +func NewRestrictUsersAdmission() (admission.Interface, error) { + return &restrictUsersAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + }, nil +} + +func (q *restrictUsersAdmission) SetExternalKubeClientSet(c kubernetes.Interface) { + q.kubeClient = c +} + +func (q *restrictUsersAdmission) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + + // RoleBindingRestriction is served using CRD resource any status update must use JSON + jsonClientConfig := rest.CopyConfig(&restClientConfig) + jsonClientConfig.ContentConfig.AcceptContentTypes = "application/json" + jsonClientConfig.ContentConfig.ContentType = "application/json" + + q.roleBindingRestrictionsGetter, err = authorizationtypedclient.NewForConfig(jsonClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } + + q.userClient, err = userclient.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (q *restrictUsersAdmission) SetUserInformer(userInformers userinformer.SharedInformerFactory) { + q.groupCache = usercache.NewGroupCache(userInformers.User().V1().Groups()) +} + +// subjectsDelta returns the relative complement of elementsToIgnore in +// elements (i.e., elements∖elementsToIgnore). +func subjectsDelta(elementsToIgnore, elements []rbac.Subject) []rbac.Subject { + result := []rbac.Subject{} + + for _, el := range elements { + keep := true + for _, skipEl := range elementsToIgnore { + if el == skipEl { + keep = false + break + } + } + if keep { + result = append(result, el) + } + } + + return result +} + +// Admit makes admission decisions that enforce restrictions on adding +// project-scoped role-bindings. In order for a role binding to be permitted, +// each subject in the binding must be matched by some rolebinding restriction +// in the namespace. +func (q *restrictUsersAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + + // We only care about rolebindings + if a.GetResource().GroupResource() != rbac.Resource("rolebindings") { + return nil + } + + // Ignore all operations that correspond to subresource actions. + if len(a.GetSubresource()) != 0 { + return nil + } + + ns := a.GetNamespace() + // Ignore cluster-level resources. + if len(ns) == 0 { + return nil + } + + var oldSubjects []rbac.Subject + + obj, oldObj := a.GetObject(), a.GetOldObject() + + rolebinding, ok := obj.(*rbac.RoleBinding) + if !ok { + return admission.NewForbidden(a, + fmt.Errorf("wrong object type for new rolebinding: %T", obj)) + } + + if len(rolebinding.Subjects) == 0 { + klog.V(4).Infof("No new subjects; admitting") + return nil + } + + if oldObj != nil { + oldrolebinding, ok := oldObj.(*rbac.RoleBinding) + if !ok { + return admission.NewForbidden(a, + fmt.Errorf("wrong object type for old rolebinding: %T", oldObj)) + } + oldSubjects = oldrolebinding.Subjects + } + + klog.V(4).Infof("Handling rolebinding %s/%s", + rolebinding.Namespace, rolebinding.Name) + + newSubjects := subjectsDelta(oldSubjects, rolebinding.Subjects) + if len(newSubjects) == 0 { + klog.V(4).Infof("No new subjects; admitting") + return nil + } + + // RoleBindingRestrictions admission plugin is DefaultAllow, hence RBRs can't use an informer, + // because it's impossible to know if cache is up-to-date + roleBindingRestrictionList, err := q.roleBindingRestrictionsGetter.RoleBindingRestrictions(ns). + List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not list rolebinding restrictions: %v", err)) + } + if len(roleBindingRestrictionList.Items) == 0 { + klog.V(4).Infof("No rolebinding restrictions specified; admitting") + return nil + } + + checkers := []SubjectChecker{} + for _, rbr := range roleBindingRestrictionList.Items { + checker, err := NewSubjectChecker(&rbr.Spec) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not create rolebinding restriction subject checker: %v", err)) + } + checkers = append(checkers, checker) + } + + roleBindingRestrictionContext, err := newRoleBindingRestrictionContext(ns, + q.kubeClient, q.userClient.UserV1(), q.groupCache) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("could not create rolebinding restriction context: %v", err)) + } + + checker := NewUnionSubjectChecker(checkers) + + errs := []error{} + for _, subject := range newSubjects { + allowed, err := checker.Allowed(subject, roleBindingRestrictionContext) + if err != nil { + errs = append(errs, err) + } + if !allowed { + errs = append(errs, + fmt.Errorf("rolebindings to %s %q are not allowed in project %q", + subject.Kind, subject.Name, ns)) + } + } + if len(errs) != 0 { + return admission.NewForbidden(a, kerrors.NewAggregate(errs)) + } + + klog.V(4).Infof("All new subjects are allowed; admitting") + + return nil +} + +func (q *restrictUsersAdmission) ValidateInitialization() error { + if q.kubeClient == nil { + return errors.New("RestrictUsersAdmission plugin requires a Kubernetes client") + } + if q.roleBindingRestrictionsGetter == nil { + return errors.New("RestrictUsersAdmission plugin requires an OpenShift client") + } + if q.userClient == nil { + return errors.New("RestrictUsersAdmission plugin requires an OpenShift user client") + } + if q.groupCache == nil { + return errors.New("RestrictUsersAdmission plugin requires a group cache") + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go new file mode 100644 index 0000000000000..50dd6eb5faea9 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/restrictusers_test.go @@ -0,0 +1,404 @@ +package restrictusers + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + fakeauthorizationclient "github.com/openshift/client-go/authorization/clientset/versioned/fake" + fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func TestAdmission(t *testing.T) { + var ( + userAlice = userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Alice", + Labels: map[string]string{"foo": "bar"}, + }, + } + userAliceSubj = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Alice", + } + + userBob = userv1.User{ + ObjectMeta: metav1.ObjectMeta{Name: "Bob"}, + Groups: []string{"group"}, + } + userBobSubj = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Bob", + } + + group = userv1.Group{ + ObjectMeta: metav1.ObjectMeta{ + Name: "group", + Labels: map[string]string{"baz": "quux"}, + }, + Users: []string{userBobSubj.Name}, + } + groupSubj = rbac.Subject{ + Kind: rbac.GroupKind, + Name: "group", + } + + serviceaccount = corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "serviceaccount", + Labels: map[string]string{"xyzzy": "thud"}, + }, + } + serviceaccountSubj = rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "namespace", + Name: "serviceaccount", + } + ) + + testCases := []struct { + name string + expectedErr string + + object runtime.Object + oldObject runtime.Object + kind schema.GroupVersionKind + resource schema.GroupVersionResource + namespace string + subresource string + kubeObjects []runtime.Object + authorizationObjects []runtime.Object + userObjects []runtime.Object + }{ + { + name: "ignore (allow) if subresource is nonempty", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{userAliceSubj}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "subresource", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "ignore (allow) cluster-scoped rolebinding", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{userAliceSubj}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "allow if the namespace has no rolebinding restrictions", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + userBobSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + }, + { + name: "allow if any rolebinding with the subject already exists", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + groupSubj, + serviceaccountSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bogus-matcher", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{}, + }, + }, + }, + }, + { + name: "allow a user, group, or service account in a rolebinding if a literal matches", + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + serviceaccountSubj, + groupSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-users", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userAlice.Name}, + }, + }, + }, + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-groups", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Groups: []string{group.Name}, + }, + }, + }, + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-serviceaccounts", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccount.Name, + Namespace: serviceaccount.Namespace, + }, + }, + }, + }, + }, + }, + }, + { + name: "prohibit user without a matching user literal", + expectedErr: fmt.Sprintf("rolebindings to %s %q are not allowed", + userAliceSubj.Kind, userAliceSubj.Name), + object: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{ + userAliceSubj, + }, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + oldObject: &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "rolebinding", + }, + Subjects: []rbac.Subject{}, + RoleRef: rbac.RoleRef{Name: "name"}, + }, + kind: rbac.Kind("RoleBinding").WithVersion("version"), + resource: rbac.Resource("rolebindings").WithVersion("version"), + namespace: "namespace", + subresource: "", + kubeObjects: []runtime.Object{ + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "namespace", + }, + }, + }, + authorizationObjects: []runtime.Object{ + &authorizationv1.RoleBindingRestriction{ + ObjectMeta: metav1.ObjectMeta{ + Name: "match-users-bob", + Namespace: "namespace", + }, + Spec: authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userBobSubj.Name}, + }, + }, + }, + }, + userObjects: []runtime.Object{ + &userAlice, + &userBob, + }, + }, + } + + stopCh := make(chan struct{}) + defer close(stopCh) + + for _, tc := range testCases { + kclientset := fake.NewSimpleClientset(tc.kubeObjects...) + fakeUserClient := fakeuserclient.NewSimpleClientset(tc.userObjects...) + fakeAuthorizationClient := fakeauthorizationclient.NewSimpleClientset(tc.authorizationObjects...) + + plugin, err := NewRestrictUsersAdmission() + if err != nil { + t.Errorf("unexpected error initializing admission plugin: %v", err) + } + + plugin.(*restrictUsersAdmission).kubeClient = kclientset + plugin.(*restrictUsersAdmission).roleBindingRestrictionsGetter = fakeAuthorizationClient.AuthorizationV1() + plugin.(*restrictUsersAdmission).userClient = fakeUserClient + plugin.(*restrictUsersAdmission).groupCache = fakeGroupCache{} + + err = admission.ValidateInitialization(plugin) + if err != nil { + t.Errorf("unexpected error validating admission plugin: %v", err) + } + + attributes := admission.NewAttributesRecord( + tc.object, + tc.oldObject, + tc.kind, + tc.namespace, + tc.name, + tc.resource, + tc.subresource, + admission.Create, + nil, + false, + &user.DefaultInfo{}, + ) + + err = plugin.(admission.ValidationInterface).Validate(context.TODO(), attributes, nil) + switch { + case len(tc.expectedErr) == 0 && err == nil: + case len(tc.expectedErr) == 0 && err != nil: + t.Errorf("%s: unexpected error: %v", tc.name, err) + case len(tc.expectedErr) != 0 && err == nil: + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErr) + case len(tc.expectedErr) != 0 && err != nil && + !strings.Contains(err.Error(), tc.expectedErr): + t.Errorf("%s: missing error: expected %v, got %v", + tc.name, tc.expectedErr, err) + } + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go new file mode 100644 index 0000000000000..2e10e182b9de9 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker.go @@ -0,0 +1,312 @@ +package restrictusers + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + userclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" +) + +// SubjectChecker determines whether rolebindings on a subject (user, group, or +// service account) are allowed in a project. +type SubjectChecker interface { + Allowed(rbac.Subject, *RoleBindingRestrictionContext) (bool, error) +} + +// UnionSubjectChecker represents the union of zero or more SubjectCheckers. +type UnionSubjectChecker []SubjectChecker + +// NewUnionSubjectChecker returns a new UnionSubjectChecker. +func NewUnionSubjectChecker(checkers []SubjectChecker) UnionSubjectChecker { + return UnionSubjectChecker(checkers) +} + +// Allowed determines whether the given subject is allowed in rolebindings in +// the project. +func (checkers UnionSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + errs := []error{} + for _, checker := range []SubjectChecker(checkers) { + allowed, err := checker.Allowed(subject, ctx) + if err != nil { + errs = append(errs, err) + } else if allowed { + return true, nil + } + } + + return false, kerrors.NewAggregate(errs) +} + +// RoleBindingRestrictionContext holds context that is used when determining +// whether a RoleBindingRestriction allows rolebindings on a particular subject. +type RoleBindingRestrictionContext struct { + userClient userclient.UserV1Interface + kclient kubernetes.Interface + + // groupCache maps user name to groups. + groupCache GroupCache + + // userToLabels maps user name to labels.Set. + userToLabelSet map[string]labels.Set + + // groupToLabels maps group name to labels.Set. + groupToLabelSet map[string]labels.Set + + // namespace is the namespace for which the RoleBindingRestriction makes + // determinations. + namespace string +} + +// NewRoleBindingRestrictionContext returns a new RoleBindingRestrictionContext +// object. +func newRoleBindingRestrictionContext(ns string, kc kubernetes.Interface, userClient userclient.UserV1Interface, groupCache GroupCache) (*RoleBindingRestrictionContext, error) { + return &RoleBindingRestrictionContext{ + namespace: ns, + kclient: kc, + userClient: userClient, + groupCache: groupCache, + userToLabelSet: map[string]labels.Set{}, + groupToLabelSet: map[string]labels.Set{}, + }, nil +} + +// labelSetForUser returns the label set for the given user subject. +func (ctx *RoleBindingRestrictionContext) labelSetForUser(subject rbac.Subject) (labels.Set, error) { + if subject.Kind != rbac.UserKind { + return labels.Set{}, fmt.Errorf("not a user: %q", subject.Name) + } + + labelSet, ok := ctx.userToLabelSet[subject.Name] + if ok { + return labelSet, nil + } + + user, err := ctx.userClient.Users().Get(context.TODO(), subject.Name, metav1.GetOptions{}) + if err != nil { + return labels.Set{}, err + } + + ctx.userToLabelSet[subject.Name] = labels.Set(user.Labels) + + return ctx.userToLabelSet[subject.Name], nil +} + +// groupsForUser returns the groups for the given user subject. +func (ctx *RoleBindingRestrictionContext) groupsForUser(subject rbac.Subject) ([]*userv1.Group, error) { + if subject.Kind != rbac.UserKind { + return []*userv1.Group{}, fmt.Errorf("not a user: %q", subject.Name) + } + + err := wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) { + return ctx.groupCache.HasSynced(), nil + }) + if err != nil { + return nil, fmt.Errorf("groups.user.openshift.io cache is not synchronized") + } + + return ctx.groupCache.GroupsFor(subject.Name) +} + +// labelSetForGroup returns the label set for the given group subject. +func (ctx *RoleBindingRestrictionContext) labelSetForGroup(subject rbac.Subject) (labels.Set, error) { + if subject.Kind != rbac.GroupKind { + return labels.Set{}, fmt.Errorf("not a group: %q", subject.Name) + } + + labelSet, ok := ctx.groupToLabelSet[subject.Name] + if ok { + return labelSet, nil + } + + group, err := ctx.userClient.Groups().Get(context.TODO(), subject.Name, metav1.GetOptions{}) + if err != nil { + return labels.Set{}, err + } + + ctx.groupToLabelSet[subject.Name] = labels.Set(group.Labels) + + return ctx.groupToLabelSet[subject.Name], nil +} + +// UserSubjectChecker determines whether a user subject is allowed in +// rolebindings in the project. +type UserSubjectChecker struct { + userRestriction *authorizationv1.UserRestriction +} + +// NewUserSubjectChecker returns a new UserSubjectChecker. +func NewUserSubjectChecker(userRestriction *authorizationv1.UserRestriction) UserSubjectChecker { + return UserSubjectChecker{userRestriction: userRestriction} +} + +// Allowed determines whether the given user subject is allowed in rolebindings +// in the project. +func (checker UserSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.UserKind { + return false, nil + } + + for _, userName := range checker.userRestriction.Users { + if subject.Name == userName { + return true, nil + } + } + + if len(checker.userRestriction.Groups) != 0 { + subjectGroups, err := ctx.groupsForUser(subject) + if err != nil { + return false, err + } + + for _, groupName := range checker.userRestriction.Groups { + for _, group := range subjectGroups { + if group.Name == groupName { + return true, nil + } + } + } + } + + if len(checker.userRestriction.Selectors) != 0 { + labelSet, err := ctx.labelSetForUser(subject) + if err != nil { + return false, err + } + + for _, labelSelector := range checker.userRestriction.Selectors { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return false, err + } + + if selector.Matches(labelSet) { + return true, nil + } + } + } + + return false, nil +} + +// GroupSubjectChecker determines whether a group subject is allowed in +// rolebindings in the project. +type GroupSubjectChecker struct { + groupRestriction *authorizationv1.GroupRestriction +} + +// NewGroupSubjectChecker returns a new GroupSubjectChecker. +func NewGroupSubjectChecker(groupRestriction *authorizationv1.GroupRestriction) GroupSubjectChecker { + return GroupSubjectChecker{groupRestriction: groupRestriction} +} + +// Allowed determines whether the given group subject is allowed in rolebindings +// in the project. +func (checker GroupSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.GroupKind { + return false, nil + } + + for _, groupName := range checker.groupRestriction.Groups { + if subject.Name == groupName { + return true, nil + } + } + + if len(checker.groupRestriction.Selectors) != 0 { + labelSet, err := ctx.labelSetForGroup(subject) + if err != nil { + return false, err + } + + for _, labelSelector := range checker.groupRestriction.Selectors { + selector, err := metav1.LabelSelectorAsSelector(&labelSelector) + if err != nil { + return false, err + } + + if selector.Matches(labelSet) { + return true, nil + } + } + } + + return false, nil +} + +// ServiceAccountSubjectChecker determines whether a serviceaccount subject is +// allowed in rolebindings in the project. +type ServiceAccountSubjectChecker struct { + serviceAccountRestriction *authorizationv1.ServiceAccountRestriction +} + +// NewServiceAccountSubjectChecker returns a new ServiceAccountSubjectChecker. +func NewServiceAccountSubjectChecker(serviceAccountRestriction *authorizationv1.ServiceAccountRestriction) ServiceAccountSubjectChecker { + return ServiceAccountSubjectChecker{ + serviceAccountRestriction: serviceAccountRestriction, + } +} + +// Allowed determines whether the given serviceaccount subject is allowed in +// rolebindings in the project. +func (checker ServiceAccountSubjectChecker) Allowed(subject rbac.Subject, ctx *RoleBindingRestrictionContext) (bool, error) { + if subject.Kind != rbac.ServiceAccountKind { + return false, nil + } + + subjectNamespace := subject.Namespace + if len(subjectNamespace) == 0 { + // If a RoleBinding has a subject that is a ServiceAccount with + // no namespace specified, the namespace will be defaulted to + // that of the RoleBinding. However, admission control plug-ins + // execute before this happens, so in order not to reject such + // subjects erroneously, we copy the logic here of using the + // RoleBinding's namespace if the subject's is empty. + subjectNamespace = ctx.namespace + } + + for _, namespace := range checker.serviceAccountRestriction.Namespaces { + if subjectNamespace == namespace { + return true, nil + } + } + + for _, serviceAccountRef := range checker.serviceAccountRestriction.ServiceAccounts { + serviceAccountNamespace := serviceAccountRef.Namespace + if len(serviceAccountNamespace) == 0 { + serviceAccountNamespace = ctx.namespace + } + + if subject.Name == serviceAccountRef.Name && + subjectNamespace == serviceAccountNamespace { + return true, nil + } + } + + return false, nil +} + +// NewSubjectChecker returns a new SubjectChecker. +func NewSubjectChecker(spec *authorizationv1.RoleBindingRestrictionSpec) (SubjectChecker, error) { + switch { + case spec.UserRestriction != nil: + return NewUserSubjectChecker(spec.UserRestriction), nil + + case spec.GroupRestriction != nil: + return NewGroupSubjectChecker(spec.GroupRestriction), nil + + case spec.ServiceAccountRestriction != nil: + return NewServiceAccountSubjectChecker(spec.ServiceAccountRestriction), nil + } + + return nil, fmt.Errorf("invalid RoleBindingRestrictionSpec: %v", spec) +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go new file mode 100644 index 0000000000000..4580d3582f93e --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/subjectchecker_test.go @@ -0,0 +1,349 @@ +package restrictusers + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/apis/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + fakeuserclient "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func mustNewSubjectChecker(t *testing.T, spec *authorizationv1.RoleBindingRestrictionSpec) SubjectChecker { + checker, err := NewSubjectChecker(spec) + if err != nil { + t.Errorf("unexpected error from NewChecker: %v, spec: %#v", err, spec) + } + + return checker +} + +func TestSubjectCheckers(t *testing.T) { + var ( + userBobRef = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Bob", + } + userAliceRef = rbac.Subject{ + Kind: rbac.UserKind, + Name: "Alice", + } + groupRef = rbac.Subject{ + Kind: rbac.GroupKind, + Name: "group", + } + serviceaccountRef = rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "namespace", + Name: "serviceaccount", + } + group = userv1.Group{ + ObjectMeta: metav1.ObjectMeta{ + Name: "group", + Labels: map[string]string{"baz": "quux"}, + }, + Users: []string{userBobRef.Name}, + } + userObjects = []runtime.Object{ + &userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Alice", + Labels: map[string]string{"foo": "bar"}, + }, + }, + &userv1.User{ + ObjectMeta: metav1.ObjectMeta{Name: "Bob"}, + Groups: []string{"group"}, + }, + &group, + } + kubeObjects = []runtime.Object{ + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "namespace", + Name: "serviceaccount", + Labels: map[string]string{"xyzzy": "thud"}, + }, + }, + } + ) + + testCases := []struct { + name string + checker SubjectChecker + subject rbac.Subject + shouldAllow bool + }{ + { + name: "allow regular user by literal name match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Users: []string{userAliceRef.Name}, + }, + }), + subject: userAliceRef, + shouldAllow: true, + }, + { + name: "allow regular user by group membership", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: userBobRef, + shouldAllow: true, + }, + { + name: "prohibit regular user when another user matches on group membership", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: userAliceRef, + shouldAllow: false, + }, + { + name: "allow regular user by label selector match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }), + subject: userAliceRef, + shouldAllow: true, + }, + { + name: "prohibit regular user when another user matches on label selector", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + UserRestriction: &authorizationv1.UserRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + }), + subject: userBobRef, + shouldAllow: false, + }, + { + name: "allow regular group by literal name match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Groups: []string{groupRef.Name}, + }, + }), + subject: groupRef, + shouldAllow: true, + }, + { + name: "allow regular group by label selector match", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + GroupRestriction: &authorizationv1.GroupRestriction{ + Selectors: []metav1.LabelSelector{ + {MatchLabels: map[string]string{"baz": "quux"}}, + }, + }, + }), + subject: groupRef, + shouldAllow: true, + }, + { + name: "allow service account with explicit namespace by match on literal name and explicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccountRef.Name, + Namespace: serviceaccountRef.Namespace, + }, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + { + name: "allow service account with explicit namespace by match on literal name and implicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + { + name: "prohibit service account with explicit namespace where literal name matches but explicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: serviceaccountRef.Namespace, + Name: serviceaccountRef.Name, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "prohibit service account with explicit namespace where literal name matches but implicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "allow service account with implicit namespace by match on literal name and explicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Name: serviceaccountRef.Name, + Namespace: serviceaccountRef.Namespace, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: true, + }, + { + name: "allow service account with implicit namespace by match on literal name and implicit namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + {Name: serviceaccountRef.Name}, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: true, + }, + { + name: "prohibit service account with implicit namespace where literal name matches but explicit namespace does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: "othernamespace", + Name: serviceaccountRef.Name, + }, + }, + }, + }), + subject: rbac.Subject{ + Kind: rbac.ServiceAccountKind, + Name: serviceaccountRef.Name, + }, + shouldAllow: false, + }, + { + name: "prohibit service account with explicit namespace where explicit namespace matches but literal name does not", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + ServiceAccounts: []authorizationv1.ServiceAccountReference{ + { + Namespace: serviceaccountRef.Namespace, + Name: "othername", + }, + }, + }, + }), + subject: serviceaccountRef, + shouldAllow: false, + }, + { + name: "allow service account by match on namespace", + checker: mustNewSubjectChecker(t, + &authorizationv1.RoleBindingRestrictionSpec{ + ServiceAccountRestriction: &authorizationv1.ServiceAccountRestriction{ + Namespaces: []string{serviceaccountRef.Namespace}, + }, + }), + subject: serviceaccountRef, + shouldAllow: true, + }, + } + + stopCh := make(chan struct{}) + defer close(stopCh) + + kclient := fake.NewSimpleClientset(kubeObjects...) + fakeUserClient := fakeuserclient.NewSimpleClientset(userObjects...) + groupCache := fakeGroupCache{groups: []userv1.Group{group}} + // This is a terrible, horrible, no-good, very bad hack to avoid a race + // condition between the test "allow regular user by group membership" + // and the group cache's initialisation. + for { + if groups, _ := groupCache.GroupsFor(group.Users[0]); len(groups) == 1 { + break + } + time.Sleep(10 * time.Millisecond) + } + + ctx, err := newRoleBindingRestrictionContext("namespace", + kclient, fakeUserClient.UserV1(), groupCache) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + for _, tc := range testCases { + allowed, err := tc.checker.Allowed(tc.subject, ctx) + if err != nil { + t.Errorf("test case %v: unexpected error: %v", tc.name, err) + } + if allowed && !tc.shouldAllow { + t.Errorf("test case %v: subject allowed but should be prohibited", tc.name) + } + if !allowed && tc.shouldAllow { + t.Errorf("test case %v: subject prohibited but should be allowed", tc.name) + } + } +} diff --git a/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go b/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go new file mode 100644 index 0000000000000..99a8156be3053 --- /dev/null +++ b/openshift-kube-apiserver/admission/authorization/restrictusers/usercache/groups.go @@ -0,0 +1,55 @@ +package usercache + +import ( + "fmt" + + "k8s.io/client-go/tools/cache" + + userapi "github.com/openshift/api/user/v1" + userinformer "github.com/openshift/client-go/user/informers/externalversions/user/v1" +) + +// GroupCache is a skin on an indexer to provide the reverse index from user to groups. +// Once we work out a cleaner way to extend a lister, this should live there. +type GroupCache struct { + indexer cache.Indexer + groupsSynced cache.InformerSynced +} + +const ByUserIndexName = "ByUser" + +// ByUserIndexKeys is cache.IndexFunc for Groups that will index groups by User, so that a direct cache lookup +// using a User.Name will return all Groups that User is a member of +func ByUserIndexKeys(obj interface{}) ([]string, error) { + group, ok := obj.(*userapi.Group) + if !ok { + return nil, fmt.Errorf("unexpected type: %v", obj) + } + + return group.Users, nil +} + +func NewGroupCache(groupInformer userinformer.GroupInformer) *GroupCache { + return &GroupCache{ + indexer: groupInformer.Informer().GetIndexer(), + groupsSynced: groupInformer.Informer().HasSynced, + } +} + +func (c *GroupCache) GroupsFor(username string) ([]*userapi.Group, error) { + objs, err := c.indexer.ByIndex(ByUserIndexName, username) + if err != nil { + return nil, err + } + + groups := make([]*userapi.Group, len(objs)) + for i := range objs { + groups[i] = objs[i].(*userapi.Group) + } + + return groups, nil +} + +func (c *GroupCache) HasSynced() bool { + return c.groupsSynced() +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go new file mode 100644 index 0000000000000..7f2a6f888d472 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package clusterresourceoverride diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go new file mode 100644 index 0000000000000..f136def581ed5 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/name.go @@ -0,0 +1,4 @@ +package clusterresourceoverride + +const PluginName = "autoscaling.openshift.io/ClusterResourceOverride" +const ConfigKind = "ClusterResourceOverrideConfig" diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go new file mode 100644 index 0000000000000..5308853cfd134 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/register.go @@ -0,0 +1,23 @@ +package clusterresourceoverride + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceOverrideConfig{}, + ) + return nil +} + +func (obj *ClusterResourceOverrideConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go new file mode 100644 index 0000000000000..3718e265caafa --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/types.go @@ -0,0 +1,24 @@ +package clusterresourceoverride + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride +// admission controller which overrides user-provided container request/limit values. +type ClusterResourceOverrideConfig struct { + metav1.TypeMeta + // For each of the following, if a non-zero ratio is specified then the initial + // value (if any) in the pod spec is overwritten according to the ratio. + // LimitRange defaults are merged prior to the override. + // + // LimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; + // 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request. + LimitCPUToMemoryPercent int64 + // CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit + CPURequestToLimitPercent int64 + // MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit + MemoryRequestToLimitPercent int64 +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go new file mode 100644 index 0000000000000..7397986b23605 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go new file mode 100644 index 0000000000000..91d44566e3476 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/register.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" +) + +func (obj *ClusterResourceOverrideConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + clusterresourceoverride.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceOverrideConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go new file mode 100644 index 0000000000000..f909b0db2ee4f --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/swagger_doc.go @@ -0,0 +1,17 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_ClusterResourceOverrideConfig = map[string]string{ + "": "ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride admission controller which overrides user-provided container request/limit values.", + "limitCPUToMemoryPercent": "For each of the following, if a non-zero ratio is specified then the initial value (if any) in the pod spec is overwritten according to the ratio. LimitRange defaults are merged prior to the override.\n\nLimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request.", + "cpuRequestToLimitPercent": "CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit", + "memoryRequestToLimitPercent": "MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit", +} + +func (ClusterResourceOverrideConfig) SwaggerDoc() map[string]string { + return map_ClusterResourceOverrideConfig +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go new file mode 100644 index 0000000000000..9a56034174e15 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/types.go @@ -0,0 +1,24 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceOverrideConfig is the configuration for the ClusterResourceOverride +// admission controller which overrides user-provided container request/limit values. +type ClusterResourceOverrideConfig struct { + metav1.TypeMeta `json:",inline"` + // For each of the following, if a non-zero ratio is specified then the initial + // value (if any) in the pod spec is overwritten according to the ratio. + // LimitRange defaults are merged prior to the override. + // + // LimitCPUToMemoryPercent (if > 0) overrides the CPU limit to a ratio of the memory limit; + // 100% overrides CPU to 1 core per 1GiB of RAM. This is done before overriding the CPU request. + LimitCPUToMemoryPercent int64 `json:"limitCPUToMemoryPercent"` + // CPURequestToLimitPercent (if > 0) overrides CPU request to a percentage of CPU limit + CPURequestToLimitPercent int64 `json:"cpuRequestToLimitPercent"` + // MemoryRequestToLimitPercent (if > 0) overrides memory request to a percentage of memory limit + MemoryRequestToLimitPercent int64 `json:"memoryRequestToLimitPercent"` +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..14255bad29a36 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1/zz_generated.deepcopy.go @@ -0,0 +1,34 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceOverrideConfig) DeepCopyInto(out *ClusterResourceOverrideConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceOverrideConfig. +func (in *ClusterResourceOverrideConfig) DeepCopy() *ClusterResourceOverrideConfig { + if in == nil { + return nil + } + out := new(ClusterResourceOverrideConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceOverrideConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go new file mode 100644 index 0000000000000..14cdcdd586abf --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation/validation.go @@ -0,0 +1,27 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" +) + +func Validate(config *clusterresourceoverride.ClusterResourceOverrideConfig) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + return allErrs + } + if config.LimitCPUToMemoryPercent == 0 && config.CPURequestToLimitPercent == 0 && config.MemoryRequestToLimitPercent == 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath(clusterresourceoverride.PluginName), "plugin enabled but no percentages were specified")) + } + if config.LimitCPUToMemoryPercent < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "LimitCPUToMemoryPercent"), config.LimitCPUToMemoryPercent, "must be positive")) + } + if config.CPURequestToLimitPercent < 0 || config.CPURequestToLimitPercent > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "CPURequestToLimitPercent"), config.CPURequestToLimitPercent, "must be between 0 and 100")) + } + if config.MemoryRequestToLimitPercent < 0 || config.MemoryRequestToLimitPercent > 100 { + allErrs = append(allErrs, field.Invalid(field.NewPath(clusterresourceoverride.PluginName, "MemoryRequestToLimitPercent"), config.MemoryRequestToLimitPercent, "must be between 0 and 100")) + } + return allErrs +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..09414272c2210 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/zz_generated.deepcopy.go @@ -0,0 +1,34 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package clusterresourceoverride + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceOverrideConfig) DeepCopyInto(out *ClusterResourceOverrideConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceOverrideConfig. +func (in *ClusterResourceOverrideConfig) DeepCopy() *ClusterResourceOverrideConfig { + if in == nil { + return nil + } + out := new(ClusterResourceOverrideConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceOverrideConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go new file mode 100644 index 0000000000000..2eb498613c0ad --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package runonceduration diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go new file mode 100644 index 0000000000000..379c2be1ed1a5 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/register.go @@ -0,0 +1,34 @@ +package runonceduration + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RunOnceDurationConfig{}, + ) + return nil +} + +func (obj *RunOnceDurationConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go new file mode 100644 index 0000000000000..1a9f5a112c90a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/types.go @@ -0,0 +1,26 @@ +package runonceduration + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. +// It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. +// The project that contains the pod may specify a different setting. That setting will +// take precedence over the one configured for the plugin here. +type RunOnceDurationConfig struct { + metav1.TypeMeta + + // ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods + // Only a positive value is valid. Absence of a value means that the plugin + // won't make any changes to the pod + ActiveDeadlineSecondsOverride *int64 +} + +// ActiveDeadlineSecondsLimitAnnotation can be set on a project to limit the number of +// seconds that a run-once pod can be active in that project +// TODO: this label needs to change to reflect its function. It's a limit, not an override. +// It is kept this way for compatibility. Only change it in a new version of the API. +const ActiveDeadlineSecondsLimitAnnotation = "openshift.io/active-deadline-seconds-override" diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go new file mode 100644 index 0000000000000..31253537849a6 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/conversion.go @@ -0,0 +1,26 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + + internal "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + err := scheme.AddConversionFunc((*RunOnceDurationConfig)(nil), (*internal.RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + in := a.(*RunOnceDurationConfig) + out := b.(*internal.RunOnceDurationConfig) + out.ActiveDeadlineSecondsOverride = in.ActiveDeadlineSecondsOverride + return nil + }) + if err != nil { + return err + } + return scheme.AddConversionFunc((*internal.RunOnceDurationConfig)(nil), (*RunOnceDurationConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + in := a.(*internal.RunOnceDurationConfig) + out := b.(*RunOnceDurationConfig) + out.ActiveDeadlineSecondsOverride = in.ActiveDeadlineSecondsOverride + return nil + }) +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go new file mode 100644 index 0000000000000..f70b886a67a72 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go new file mode 100644 index 0000000000000..b456123c9fab2 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/register.go @@ -0,0 +1,29 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func (obj *RunOnceDurationConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "autoscaling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + runonceduration.Install, + + addConversionFuncs, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RunOnceDurationConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go new file mode 100644 index 0000000000000..1cb7c3cdb319f --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_RunOnceDurationConfig = map[string]string{ + "": "RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. The project that contains the pod may specify a different setting. That setting will take precedence over the one configured for the plugin here.", + "activeDeadlineSecondsOverride": "ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods Only a positive value is valid. Absence of a value means that the plugin won't make any changes to the pod It is kept this way for compatibility. Only change it in a new version of the API.", +} + +func (RunOnceDurationConfig) SwaggerDoc() map[string]string { + return map_RunOnceDurationConfig +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go new file mode 100644 index 0000000000000..4cfa3823ba10b --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/types.go @@ -0,0 +1,22 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RunOnceDurationConfig is the configuration for the RunOnceDuration plugin. +// It specifies a maximum value for ActiveDeadlineSeconds for a run-once pod. +// The project that contains the pod may specify a different setting. That setting will +// take precedence over the one configured for the plugin here. +type RunOnceDurationConfig struct { + metav1.TypeMeta `json:",inline"` + + // ActiveDeadlineSecondsOverride is the maximum value to set on containers of run-once pods + // Only a positive value is valid. Absence of a value means that the plugin + // won't make any changes to the pod + // TODO: change the external name of this field to reflect that it is a limit, not an override + // It is kept this way for compatibility. Only change it in a new version of the API. + ActiveDeadlineSecondsOverride *int64 `json:"activeDeadlineSecondsOverride,omitempty" description:"maximum value for activeDeadlineSeconds in run-once pods"` +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..3000ba62bd174 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunOnceDurationConfig) DeepCopyInto(out *RunOnceDurationConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ActiveDeadlineSecondsOverride != nil { + in, out := &in.ActiveDeadlineSecondsOverride, &out.ActiveDeadlineSecondsOverride + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunOnceDurationConfig. +func (in *RunOnceDurationConfig) DeepCopy() *RunOnceDurationConfig { + if in == nil { + return nil + } + out := new(RunOnceDurationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RunOnceDurationConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go new file mode 100644 index 0000000000000..7ddcad869845a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation.go @@ -0,0 +1,18 @@ +package validation + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +// ValidateRunOnceDurationConfig validates the RunOnceDuration plugin configuration +func ValidateRunOnceDurationConfig(config *runonceduration.RunOnceDurationConfig) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil || config.ActiveDeadlineSecondsOverride == nil { + return allErrs + } + if *config.ActiveDeadlineSecondsOverride <= 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("activeDeadlineSecondsOverride"), config.ActiveDeadlineSecondsOverride, "must be greater than 0")) + } + return allErrs +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go new file mode 100644 index 0000000000000..19f6f6d70544b --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation/validation_test.go @@ -0,0 +1,29 @@ +package validation + +import ( + "testing" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func TestRunOnceDurationConfigValidation(t *testing.T) { + // Check invalid duration returns an error + var invalidSecs int64 = -1 + invalidConfig := &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: &invalidSecs, + } + errs := ValidateRunOnceDurationConfig(invalidConfig) + if len(errs) == 0 { + t.Errorf("Did not get expected error on invalid config") + } + + // Check that valid duration returns no error + var validSecs int64 = 5 + validConfig := &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: &validSecs, + } + errs = ValidateRunOnceDurationConfig(validConfig) + if len(errs) > 0 { + t.Errorf("Unexpected error on valid config") + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..b87ed7a25b492 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package runonceduration + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunOnceDurationConfig) DeepCopyInto(out *RunOnceDurationConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ActiveDeadlineSecondsOverride != nil { + in, out := &in.ActiveDeadlineSecondsOverride, &out.ActiveDeadlineSecondsOverride + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunOnceDurationConfig. +func (in *RunOnceDurationConfig) DeepCopy() *RunOnceDurationConfig { + if in == nil { + return nil + } + out := new(RunOnceDurationConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RunOnceDurationConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go new file mode 100644 index 0000000000000..6aed487fdef13 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission.go @@ -0,0 +1,348 @@ +package clusterresourceoverride + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/openshift/library-go/pkg/config/helpers" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1" + + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/plugin/pkg/admission/limitranger" + + api "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation" +) + +const ( + clusterResourceOverrideAnnotation = "autoscaling.openshift.io/cluster-resource-override-enabled" + cpuBaseScaleFactor = 1000.0 / (1024.0 * 1024.0 * 1024.0) // 1000 milliCores per 1GiB +) + +var ( + cpuFloor = resource.MustParse("1m") + memFloor = resource.MustParse("1Mi") +) + +func Register(plugins *admission.Plugins) { + plugins.Register(api.PluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := ReadConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", api.PluginName) + return nil, nil + } + return newClusterResourceOverride(pluginConfig) + }) +} + +type internalConfig struct { + limitCPUToMemoryRatio float64 + cpuRequestToLimitRatio float64 + memoryRequestToLimitRatio float64 +} +type clusterResourceOverridePlugin struct { + *admission.Handler + config *internalConfig + nsLister corev1listers.NamespaceLister + LimitRanger *limitranger.LimitRanger + limitRangesLister corev1listers.LimitRangeLister +} + +var _ = initializer.WantsExternalKubeInformerFactory(&clusterResourceOverridePlugin{}) +var _ = initializer.WantsExternalKubeClientSet(&clusterResourceOverridePlugin{}) +var _ = admission.MutationInterface(&clusterResourceOverridePlugin{}) +var _ = admission.ValidationInterface(&clusterResourceOverridePlugin{}) + +// newClusterResourceOverride returns an admission controller for containers that +// configurably overrides container resource request/limits +func newClusterResourceOverride(config *api.ClusterResourceOverrideConfig) (admission.Interface, error) { + klog.V(2).Infof("%s admission controller loaded with config: %v", api.PluginName, config) + var internal *internalConfig + if config != nil { + internal = &internalConfig{ + limitCPUToMemoryRatio: float64(config.LimitCPUToMemoryPercent) / 100, + cpuRequestToLimitRatio: float64(config.CPURequestToLimitPercent) / 100, + memoryRequestToLimitRatio: float64(config.MemoryRequestToLimitPercent) / 100, + } + } + + limitRanger, err := limitranger.NewLimitRanger(nil) + if err != nil { + return nil, err + } + + return &clusterResourceOverridePlugin{ + Handler: admission.NewHandler(admission.Create), + config: internal, + LimitRanger: limitRanger, + }, nil +} + +func (d *clusterResourceOverridePlugin) SetExternalKubeClientSet(c kubernetes.Interface) { + d.LimitRanger.SetExternalKubeClientSet(c) +} + +func (d *clusterResourceOverridePlugin) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + d.LimitRanger.SetExternalKubeInformerFactory(kubeInformers) + d.limitRangesLister = kubeInformers.Core().V1().LimitRanges().Lister() + d.nsLister = kubeInformers.Core().V1().Namespaces().Lister() +} + +func ReadConfig(configFile io.Reader) (*api.ClusterResourceOverrideConfig, error) { + obj, err := helpers.ReadYAMLToInternal(configFile, api.Install, v1.Install) + if err != nil { + klog.V(5).Infof("%s error reading config: %v", api.PluginName, err) + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*api.ClusterResourceOverrideConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + klog.V(5).Infof("%s config is: %v", api.PluginName, config) + if errs := validation.Validate(config); len(errs) > 0 { + return nil, errs.ToAggregate() + } + + return config, nil +} + +func (a *clusterResourceOverridePlugin) ValidateInitialization() error { + if a.nsLister == nil { + return fmt.Errorf("%s did not get a namespace lister", api.PluginName) + } + return a.LimitRanger.ValidateInitialization() +} + +// this a real shame to be special cased. +var ( + forbiddenNames = []string{"openshift", "kubernetes", "kube"} + forbiddenPrefixes = []string{"openshift-", "kubernetes-", "kube-"} +) + +func isExemptedNamespace(name string) bool { + for _, s := range forbiddenNames { + if name == s { + return true + } + } + for _, s := range forbiddenPrefixes { + if strings.HasPrefix(name, s) { + return true + } + } + return false +} + +func (a *clusterResourceOverridePlugin) Admit(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.admit(ctx, attr, true, o) +} + +func (a *clusterResourceOverridePlugin) Validate(ctx context.Context, attr admission.Attributes, o admission.ObjectInterfaces) error { + return a.admit(ctx, attr, false, o) +} + +// TODO this will need to update when we have pod requests/limits +func (a *clusterResourceOverridePlugin) admit(ctx context.Context, attr admission.Attributes, mutationAllowed bool, o admission.ObjectInterfaces) error { + klog.V(6).Infof("%s admission controller is invoked", api.PluginName) + if a.config == nil || attr.GetResource().GroupResource() != coreapi.Resource("pods") || attr.GetSubresource() != "" { + return nil // not applicable + } + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return admission.NewForbidden(attr, fmt.Errorf("unexpected object: %#v", attr.GetObject())) + } + klog.V(5).Infof("%s is looking at creating pod %s in project %s", api.PluginName, pod.Name, attr.GetNamespace()) + + // allow annotations on project to override + ns, err := a.nsLister.Get(attr.GetNamespace()) + if err != nil { + klog.Warningf("%s got an error retrieving namespace: %v", api.PluginName, err) + return admission.NewForbidden(attr, err) // this should not happen though + } + + projectEnabledPlugin, exists := ns.Annotations[clusterResourceOverrideAnnotation] + if exists && projectEnabledPlugin != "true" { + klog.V(5).Infof("%s is disabled for project %s", api.PluginName, attr.GetNamespace()) + return nil // disabled for this project, do nothing + } + + if isExemptedNamespace(ns.Name) { + klog.V(5).Infof("%s is skipping exempted project %s", api.PluginName, attr.GetNamespace()) + return nil // project is exempted, do nothing + } + + namespaceLimits := []*corev1.LimitRange{} + + if a.limitRangesLister != nil { + limits, err := a.limitRangesLister.LimitRanges(attr.GetNamespace()).List(labels.Everything()) + if err != nil { + return err + } + namespaceLimits = limits + } + + // Don't mutate resource requirements below the namespace + // limit minimums. + nsCPUFloor := minResourceLimits(namespaceLimits, corev1.ResourceCPU) + nsMemFloor := minResourceLimits(namespaceLimits, corev1.ResourceMemory) + + // Reuse LimitRanger logic to apply limit/req defaults from the project. Ignore validation + // errors, assume that LimitRanger will run after this plugin to validate. + klog.V(5).Infof("%s: initial pod limits are: %#v", api.PluginName, pod.Spec) + if err := a.LimitRanger.Admit(ctx, attr, o); err != nil { + klog.V(5).Infof("%s: error from LimitRanger: %#v", api.PluginName, err) + } + klog.V(5).Infof("%s: pod limits after LimitRanger: %#v", api.PluginName, pod.Spec) + for i := range pod.Spec.InitContainers { + if err := updateContainerResources(a.config, &pod.Spec.InitContainers[i], nsCPUFloor, nsMemFloor, mutationAllowed); err != nil { + return admission.NewForbidden(attr, fmt.Errorf("spec.initContainers[%d].%v", i, err)) + } + } + for i := range pod.Spec.Containers { + if err := updateContainerResources(a.config, &pod.Spec.Containers[i], nsCPUFloor, nsMemFloor, mutationAllowed); err != nil { + return admission.NewForbidden(attr, fmt.Errorf("spec.containers[%d].%v", i, err)) + } + } + klog.V(5).Infof("%s: pod limits after overrides are: %#v", api.PluginName, pod.Spec) + return nil +} + +func updateContainerResources(config *internalConfig, container *coreapi.Container, nsCPUFloor, nsMemFloor *resource.Quantity, mutationAllowed bool) error { + resources := container.Resources + memLimit, memFound := resources.Limits[coreapi.ResourceMemory] + if memFound && config.memoryRequestToLimitRatio != 0 { + // memory is measured in whole bytes. + // the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users. + amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100 + // TODO: move into resource.Quantity + var mod int64 + switch memLimit.Format { + case resource.BinarySI: + mod = 1024 * 1024 + default: + mod = 1000 * 1000 + } + if rem := amount % mod; rem != 0 { + amount = amount - rem + } + q := resource.NewQuantity(int64(amount), memLimit.Format) + if memFloor.Cmp(*q) > 0 { + clone := memFloor.DeepCopy() + q = &clone + } + if nsMemFloor != nil && q.Cmp(*nsMemFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceMemory, q.String(), nsMemFloor.String()) + clone := nsMemFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Requests, corev1.ResourceMemory, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.requests.%s %v", corev1.ResourceMemory, err) + } + } + if memFound && config.limitCPUToMemoryRatio != 0 { + amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor + q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI) + if cpuFloor.Cmp(*q) > 0 { + clone := cpuFloor.DeepCopy() + q = &clone + } + if nsCPUFloor != nil && q.Cmp(*nsCPUFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceCPU, q.String(), nsCPUFloor.String()) + clone := nsCPUFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Limits, corev1.ResourceCPU, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.limits.%s %v", corev1.ResourceCPU, err) + } + } + + cpuLimit, cpuFound := resources.Limits[coreapi.ResourceCPU] + if cpuFound && config.cpuRequestToLimitRatio != 0 { + amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio + q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format) + if cpuFloor.Cmp(*q) > 0 { + clone := cpuFloor.DeepCopy() + q = &clone + } + if nsCPUFloor != nil && q.Cmp(*nsCPUFloor) < 0 { + klog.V(5).Infof("%s: %s pod limit %q below namespace limit; setting limit to %q", api.PluginName, corev1.ResourceCPU, q.String(), nsCPUFloor.String()) + clone := nsCPUFloor.DeepCopy() + q = &clone + } + if err := applyQuantity(resources.Requests, corev1.ResourceCPU, *q, mutationAllowed); err != nil { + return fmt.Errorf("resources.requests.%s %v", corev1.ResourceCPU, err) + } + } + + return nil +} + +func applyQuantity(l coreapi.ResourceList, r corev1.ResourceName, v resource.Quantity, mutationAllowed bool) error { + if mutationAllowed { + l[coreapi.ResourceName(r)] = v + return nil + } + + if oldValue, ok := l[coreapi.ResourceName(r)]; !ok { + return fmt.Errorf("mutated, expected: %v, now absent", v) + } else if oldValue.Cmp(v) != 0 { + return fmt.Errorf("mutated, expected: %v, got %v", v, oldValue) + } + + return nil +} + +// minResourceLimits finds the Min limit for resourceName. Nil is +// returned if limitRanges is empty or limits contains no resourceName +// limits. +func minResourceLimits(limitRanges []*corev1.LimitRange, resourceName corev1.ResourceName) *resource.Quantity { + limits := []*resource.Quantity{} + + for _, limitRange := range limitRanges { + for _, limit := range limitRange.Spec.Limits { + if limit.Type == corev1.LimitTypeContainer { + if limit, found := limit.Min[resourceName]; found { + clone := limit.DeepCopy() + limits = append(limits, &clone) + } + } + } + } + + if len(limits) == 0 { + return nil + } + + return minQuantity(limits) +} + +func minQuantity(quantities []*resource.Quantity) *resource.Quantity { + min := quantities[0].DeepCopy() + + for i := range quantities { + if quantities[i].Cmp(min) < 0 { + min = quantities[i].DeepCopy() + } + } + + return &min +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go new file mode 100644 index 0000000000000..d1c54bb140aae --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/admission_test.go @@ -0,0 +1,507 @@ +package clusterresourceoverride + +import ( + "bytes" + "context" + "fmt" + "io" + "reflect" + "testing" + + "github.com/openshift/library-go/pkg/config/helpers" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride" + clusterresourceoverridev1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/clusterresourceoverride/validation" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + yamlConfig = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +limitCPUToMemoryPercent: 100 +cpuRequestToLimitPercent: 10 +memoryRequestToLimitPercent: 25 +` + invalidConfig = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +cpuRequestToLimitPercent: 200 +` + invalidConfig2 = ` +apiVersion: autoscaling.openshift.io/v1 +kind: ClusterResourceOverrideConfig +` +) + +var ( + deserializedYamlConfig = &clusterresourceoverride.ClusterResourceOverrideConfig{ + LimitCPUToMemoryPercent: 100, + CPURequestToLimitPercent: 10, + MemoryRequestToLimitPercent: 25, + } +) + +func TestConfigReader(t *testing.T) { + initialConfig := testConfig(10, 20, 30) + serializedConfig, serializationErr := helpers.WriteYAML(initialConfig, clusterresourceoverridev1.Install) + if serializationErr != nil { + t.Fatalf("WriteYAML: config serialize failed: %v", serializationErr) + } + + tests := []struct { + name string + config io.Reader + expectErr bool + expectNil bool + expectInvalid bool + expectedConfig *clusterresourceoverride.ClusterResourceOverrideConfig + }{ + { + name: "process nil config", + config: nil, + expectNil: true, + }, { + name: "deserialize initialConfig yaml", + config: bytes.NewReader(serializedConfig), + expectedConfig: initialConfig, + }, { + name: "completely broken config", + config: bytes.NewReader([]byte("asdfasdfasdF")), + expectErr: true, + }, { + name: "deserialize yamlConfig", + config: bytes.NewReader([]byte(yamlConfig)), + expectedConfig: deserializedYamlConfig, + }, { + name: "choke on out-of-bounds ratio", + config: bytes.NewReader([]byte(invalidConfig)), + expectInvalid: true, + expectErr: true, + }, { + name: "complain about no settings", + config: bytes.NewReader([]byte(invalidConfig2)), + expectInvalid: true, + expectErr: true, + }, + } + for _, test := range tests { + config, err := ReadConfig(test.config) + if test.expectErr && err == nil { + t.Errorf("%s: expected error", test.name) + } else if !test.expectErr && err != nil { + t.Errorf("%s: expected no error, saw %v", test.name, err) + } + if err == nil { + if test.expectNil && config != nil { + t.Errorf("%s: expected nil config, but saw: %v", test.name, config) + } else if !test.expectNil && config == nil { + t.Errorf("%s: expected config, but got nil", test.name) + } + } + if config != nil { + if test.expectedConfig != nil && *test.expectedConfig != *config { + t.Errorf("%s: expected %v from reader, but got %v", test.name, test.expectErr, config) + } + if err := validation.Validate(config); test.expectInvalid && len(err) == 0 { + t.Errorf("%s: expected validation to fail, but it passed", test.name) + } else if !test.expectInvalid && len(err) > 0 { + t.Errorf("%s: expected validation to pass, but it failed with %v", test.name, err) + } + } + } +} + +func TestLimitRequestAdmission(t *testing.T) { + tests := []struct { + name string + config *clusterresourceoverride.ClusterResourceOverrideConfig + pod *kapi.Pod + expectedMemRequest resource.Quantity + expectedCpuLimit resource.Quantity + expectedCpuRequest resource.Quantity + namespace *corev1.Namespace + namespaceLimits []*corev1.LimitRange + }{ + { + name: "ignore pods that have no memory limit specified", + config: testConfig(100, 50, 50), + pod: testBestEffortPod(), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("0"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, ignore pods that have no memory limit specified", + config: testConfig(100, 50, 50), + pod: testBestEffortPod(), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("0"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "test floor for memory and cpu", + config: testConfig(100, 50, 50), + pod: testPod("1Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("1Mi"), + expectedCpuLimit: resource.MustParse("1m"), + expectedCpuRequest: resource.MustParse("1m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, test floor for memory and cpu", + config: testConfig(100, 50, 50), + pod: testPod("1Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("567m"), + expectedCpuRequest: resource.MustParse("567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "nil config", + config: nil, + pod: testPod("1", "1", "1", "1"), + expectedMemRequest: resource.MustParse("1"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("1"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, nil config", + config: nil, + pod: testPod("1", "1", "1", "1"), + expectedMemRequest: resource.MustParse("1"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("1"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("567m"), + fakeMinCPULimitRange("678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "all values are adjusted", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("512Mi"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, all values are adjusted to floor of namespace limits", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("10567m"), + expectedCpuRequest: resource.MustParse("10567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "just requests are adjusted", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("5Mi"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("25m"), + namespace: fakeNamespace(true), + }, + { + name: "with namespace limits, all requests are adjusted to floor of namespace limits", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("456Gi"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("10567m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "project annotation disables overrides", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(false), + }, + { + name: "with namespace limits, project annotation disables overrides", + config: testConfig(0, 50, 50), + pod: testPod("10Mi", "0", "50m", "0"), + expectedMemRequest: resource.MustParse("0"), + expectedCpuLimit: resource.MustParse("50m"), + expectedCpuRequest: resource.MustParse("0"), + namespace: fakeNamespace(false), + namespaceLimits: []*corev1.LimitRange{ + fakeMinCPULimitRange("10567m"), + fakeMinCPULimitRange("20678m"), + fakeMinMemoryLimitRange("700Gi"), + fakeMinMemoryLimitRange("456Gi"), + }, + }, + { + name: "large values don't overflow", + config: testConfig(100, 50, 50), + pod: testPod("1Ti", "0", "0", "0"), + expectedMemRequest: resource.MustParse("512Gi"), + expectedCpuLimit: resource.MustParse("1024"), + expectedCpuRequest: resource.MustParse("512"), + namespace: fakeNamespace(true), + }, + { + name: "little values mess things up", + config: testConfig(500, 10, 10), + pod: testPod("1.024Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("1Mi"), + expectedCpuLimit: resource.MustParse("5m"), + expectedCpuRequest: resource.MustParse("1m"), + namespace: fakeNamespace(true), + }, + { + name: "test fractional memory requests round up", + config: testConfig(500, 10, 60), + pod: testPod("512Mi", "0", "0", "0"), + expectedMemRequest: resource.MustParse("307Mi"), + expectedCpuLimit: resource.MustParse("2.5"), + expectedCpuRequest: resource.MustParse("250m"), + namespace: fakeNamespace(true), + }, + { + name: "test only containers types are considered with namespace limits", + config: testConfig(100, 50, 50), + pod: testPod("1Gi", "0", "2000m", "0"), + expectedMemRequest: resource.MustParse("512Mi"), + expectedCpuLimit: resource.MustParse("1"), + expectedCpuRequest: resource.MustParse("500m"), + namespace: fakeNamespace(true), + namespaceLimits: []*corev1.LimitRange{ + fakeMinStorageLimitRange("1567Mi"), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := newClusterResourceOverride(test.config) + if err != nil { + t.Fatalf("%s: config de/serialize failed: %v", test.name, err) + } + // Override LimitRanger with limits from test case + c.(*clusterResourceOverridePlugin).limitRangesLister = fakeLimitRangeLister{ + namespaceLister: fakeLimitRangeNamespaceLister{ + limits: test.namespaceLimits, + }, + } + c.(*clusterResourceOverridePlugin).nsLister = fakeNamespaceLister(test.namespace) + attrs := admission.NewAttributesRecord(test.pod, nil, schema.GroupVersionKind{}, test.namespace.Name, "name", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + clone := test.pod.DeepCopy() + if err = c.(admission.MutationInterface).Admit(context.TODO(), attrs, nil); err != nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + if err = c.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil); err != nil { + t.Fatalf("%s: admission controller returned error: %v", test.name, err) + } + + if !reflect.DeepEqual(test.pod, clone) { + attrs := admission.NewAttributesRecord(clone, nil, schema.GroupVersionKind{}, test.namespace.Name, "name", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, fakeUser()) + if err = c.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil); err == nil { + t.Fatalf("%s: admission controller returned no error, but should", test.name) + } + } + + resources := test.pod.Spec.InitContainers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { + t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) + } + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 { + t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit) + } + + resources = test.pod.Spec.Containers[0].Resources // only test one container + if actual := resources.Requests[kapi.ResourceMemory]; test.expectedMemRequest.Cmp(actual) != 0 { + t.Errorf("%s: memory requests do not match; %v should be %v", test.name, actual, test.expectedMemRequest) + } + if actual := resources.Requests[kapi.ResourceCPU]; test.expectedCpuRequest.Cmp(actual) != 0 { + t.Errorf("%s: cpu requests do not match; %v should be %v", test.name, actual, test.expectedCpuRequest) + } + if actual := resources.Limits[kapi.ResourceCPU]; test.expectedCpuLimit.Cmp(actual) != 0 { + t.Errorf("%s: cpu limits do not match; %v should be %v", test.name, actual, test.expectedCpuLimit) + } + }) + } +} + +func testBestEffortPod() *kapi.Pod { + return &kapi.Pod{ + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{}, + }, + }, + Containers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{}, + }, + }, + }, + } +} + +func testPod(memLimit string, memRequest string, cpuLimit string, cpuRequest string) *kapi.Pod { + return &kapi.Pod{ + Spec: kapi.PodSpec{ + InitContainers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{ + Limits: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuLimit), + kapi.ResourceMemory: resource.MustParse(memLimit), + }, + Requests: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuRequest), + kapi.ResourceMemory: resource.MustParse(memRequest), + }, + }, + }, + }, + Containers: []kapi.Container{ + { + Resources: kapi.ResourceRequirements{ + Limits: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuLimit), + kapi.ResourceMemory: resource.MustParse(memLimit), + }, + Requests: kapi.ResourceList{ + kapi.ResourceCPU: resource.MustParse(cpuRequest), + kapi.ResourceMemory: resource.MustParse(memRequest), + }, + }, + }, + }, + }, + } +} + +func fakeUser() user.Info { + return &user.DefaultInfo{ + Name: "testuser", + } +} + +var nsIndex = 0 + +func fakeNamespace(pluginEnabled bool) *corev1.Namespace { + nsIndex++ + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("fakeNS%d", nsIndex), + Annotations: map[string]string{}, + }, + } + if !pluginEnabled { + ns.Annotations[clusterResourceOverrideAnnotation] = "false" + } + return ns +} + +func fakeNamespaceLister(ns *corev1.Namespace) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func testConfig(lc2mr int64, cr2lr int64, mr2lr int64) *clusterresourceoverride.ClusterResourceOverrideConfig { + return &clusterresourceoverride.ClusterResourceOverrideConfig{ + LimitCPUToMemoryPercent: lc2mr, + CPURequestToLimitPercent: cr2lr, + MemoryRequestToLimitPercent: mr2lr, + } +} + +func fakeMinLimitRange(limitType corev1.LimitType, resourceType corev1.ResourceName, limits ...string) *corev1.LimitRange { + r := &corev1.LimitRange{} + + for i := range limits { + rl := corev1.ResourceList{} + rl[resourceType] = resource.MustParse(limits[i]) + r.Spec.Limits = append(r.Spec.Limits, + corev1.LimitRangeItem{ + Type: limitType, + Min: rl, + }, + ) + } + + return r +} + +func fakeMinMemoryLimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypeContainer, corev1.ResourceMemory, limits...) +} + +func fakeMinCPULimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypeContainer, corev1.ResourceCPU, limits...) +} + +func fakeMinStorageLimitRange(limits ...string) *corev1.LimitRange { + return fakeMinLimitRange(corev1.LimitTypePersistentVolumeClaim, corev1.ResourceStorage, limits...) +} + +type fakeLimitRangeLister struct { + corev1listers.LimitRangeLister + namespaceLister fakeLimitRangeNamespaceLister +} + +type fakeLimitRangeNamespaceLister struct { + corev1listers.LimitRangeNamespaceLister + limits []*corev1.LimitRange +} + +func (f fakeLimitRangeLister) LimitRanges(namespace string) corev1listers.LimitRangeNamespaceLister { + return f.namespaceLister +} + +func (f fakeLimitRangeNamespaceLister) List(selector labels.Selector) ([]*corev1.LimitRange, error) { + return f.limits, nil +} diff --git a/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go new file mode 100644 index 0000000000000..aaf2176af054a --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/clusterresourceoverride/doc.go @@ -0,0 +1,8 @@ +package clusterresourceoverride + +// The ClusterResourceOverride plugin is only active when admission control config is supplied for it. +// The plugin allows administrators to override user-provided container request/limit values +// in order to control overcommit and optionally pin CPU to memory. +// The plugin's actions can be disabled per-project with the project annotation +// autoscaling.openshift.io/cluster-resource-override-enabled="false", so cluster admins +// can exempt infrastructure projects and such from the overrides. diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go new file mode 100644 index 0000000000000..9326205f9b333 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission.go @@ -0,0 +1,148 @@ +package runonceduration + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + + "k8s.io/klog/v2" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/utils/integer" + + "github.com/openshift/library-go/pkg/config/helpers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration/validation" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("autoscaling.openshift.io/RunOnceDuration", + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", "autoscaling.openshift.io/RunOnceDuration") + return nil, nil + } + return NewRunOnceDuration(pluginConfig), nil + }) +} + +func readConfig(reader io.Reader) (*runonceduration.RunOnceDurationConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, runonceduration.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*runonceduration.RunOnceDurationConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object %#v", obj) + } + errs := validation.ValidateRunOnceDurationConfig(config) + if len(errs) > 0 { + return nil, errs.ToAggregate() + } + return config, nil +} + +// NewRunOnceDuration creates a new RunOnceDuration admission plugin +func NewRunOnceDuration(config *runonceduration.RunOnceDurationConfig) admission.Interface { + return &runOnceDuration{ + Handler: admission.NewHandler(admission.Create), + config: config, + } +} + +type runOnceDuration struct { + *admission.Handler + config *runonceduration.RunOnceDurationConfig + nsLister corev1listers.NamespaceLister +} + +var _ = initializer.WantsExternalKubeInformerFactory(&runOnceDuration{}) + +func (a *runOnceDuration) Admit(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + switch { + case a.config == nil, + attributes.GetResource().GroupResource() != kapi.Resource("pods"), + len(attributes.GetSubresource()) > 0: + return nil + } + pod, ok := attributes.GetObject().(*kapi.Pod) + if !ok { + return admission.NewForbidden(attributes, fmt.Errorf("unexpected object: %#v", attributes.GetObject())) + } + + // Only update pods with a restart policy of Never or OnFailure + switch pod.Spec.RestartPolicy { + case kapi.RestartPolicyNever, + kapi.RestartPolicyOnFailure: + // continue + default: + return nil + } + + appliedProjectLimit, err := a.applyProjectAnnotationLimit(attributes.GetNamespace(), pod) + if err != nil { + return admission.NewForbidden(attributes, err) + } + + if !appliedProjectLimit && a.config.ActiveDeadlineSecondsOverride != nil { + pod.Spec.ActiveDeadlineSeconds = int64MinP(a.config.ActiveDeadlineSecondsOverride, pod.Spec.ActiveDeadlineSeconds) + } + return nil +} + +func (a *runOnceDuration) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + a.nsLister = kubeInformers.Core().V1().Namespaces().Lister() +} + +func (a *runOnceDuration) ValidateInitialization() error { + if a.nsLister == nil { + return errors.New("autoscaling.openshift.io/RunOnceDuration plugin requires a namespace listers") + } + return nil +} + +func (a *runOnceDuration) applyProjectAnnotationLimit(namespace string, pod *kapi.Pod) (bool, error) { + ns, err := a.nsLister.Get(namespace) + if err != nil { + return false, fmt.Errorf("error looking up pod namespace: %v", err) + } + if ns.Annotations == nil { + return false, nil + } + limit, hasLimit := ns.Annotations[runonceduration.ActiveDeadlineSecondsLimitAnnotation] + if !hasLimit { + return false, nil + } + limitInt64, err := strconv.ParseInt(limit, 10, 64) + if err != nil { + return false, fmt.Errorf("cannot parse the ActiveDeadlineSeconds limit (%s) for project %s: %v", limit, ns.Name, err) + } + pod.Spec.ActiveDeadlineSeconds = int64MinP(&limitInt64, pod.Spec.ActiveDeadlineSeconds) + return true, nil +} + +func int64MinP(a, b *int64) *int64 { + switch { + case a == nil: + return b + case b == nil: + return a + default: + c := integer.Int64Min(*a, *b) + return &c + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go new file mode 100644 index 0000000000000..856d32801bfbb --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/admission_test.go @@ -0,0 +1,215 @@ +package runonceduration + +import ( + "bytes" + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apiserver/pkg/admission" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + kapi "k8s.io/kubernetes/pkg/apis/core" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/autoscaling/apis/runonceduration" +) + +func fakeNamespaceLister(projectAnnotations map[string]string) corev1listers.NamespaceLister { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + ns := &corev1.Namespace{} + ns.Name = "default" + ns.Annotations = projectAnnotations + indexer.Add(ns) + return corev1listers.NewNamespaceLister(indexer) +} + +func testConfig(n *int64) *runonceduration.RunOnceDurationConfig { + return &runonceduration.RunOnceDurationConfig{ + ActiveDeadlineSecondsOverride: n, + } +} + +func testRunOncePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyNever + return pod +} + +func testRestartOnFailurePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyOnFailure + return pod +} + +func testRunOncePodWithDuration(n int64) *kapi.Pod { + pod := testRunOncePod() + pod.Spec.ActiveDeadlineSeconds = &n + return pod +} + +func testRestartAlwaysPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.RestartPolicy = kapi.RestartPolicyAlways + return pod +} + +func int64p(n int64) *int64 { + return &n +} + +func TestRunOnceDurationAdmit(t *testing.T) { + tests := []struct { + name string + config *runonceduration.RunOnceDurationConfig + pod *kapi.Pod + projectAnnotations map[string]string + expectedActiveDeadlineSeconds *int64 + }{ + { + name: "expect globally configured duration to be set", + config: testConfig(int64p(10)), + pod: testRunOncePod(), + expectedActiveDeadlineSeconds: int64p(10), + }, + { + name: "empty config, no duration to be set", + config: testConfig(nil), + pod: testRunOncePod(), + expectedActiveDeadlineSeconds: nil, + }, + { + name: "expect configured duration to not limit lower existing duration", + config: testConfig(int64p(10)), + pod: testRunOncePodWithDuration(5), + expectedActiveDeadlineSeconds: int64p(5), + }, + { + name: "expect empty config to not limit existing duration", + config: testConfig(nil), + pod: testRunOncePodWithDuration(5), + expectedActiveDeadlineSeconds: int64p(5), + }, + { + name: "expect project limit to be used with nil global value", + config: testConfig(nil), + pod: testRunOncePodWithDuration(2000), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(1000), + }, + { + name: "expect project limit to not limit a smaller set value", + config: testConfig(nil), + pod: testRunOncePodWithDuration(10), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(10), + }, + { + name: "expect project limit to have priority over global config value", + config: testConfig(int64p(10)), + pod: testRunOncePodWithDuration(2000), + projectAnnotations: map[string]string{ + runonceduration.ActiveDeadlineSecondsLimitAnnotation: "1000", + }, + expectedActiveDeadlineSeconds: int64p(1000), + }, + { + name: "make no change to a pod that is not a run-once pod", + config: testConfig(int64p(10)), + pod: testRestartAlwaysPod(), + expectedActiveDeadlineSeconds: nil, + }, + { + name: "update a pod that has a RestartOnFailure policy", + config: testConfig(int64p(10)), + pod: testRestartOnFailurePod(), + expectedActiveDeadlineSeconds: int64p(10), + }, + } + + for _, tc := range tests { + admissionPlugin := NewRunOnceDuration(tc.config) + admissionPlugin.(*runOnceDuration).nsLister = fakeNamespaceLister(tc.projectAnnotations) + pod := tc.pod + attrs := admission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "default", "test", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, nil) + if err := admissionPlugin.(admission.MutationInterface).Admit(context.TODO(), attrs, nil); err != nil { + t.Errorf("%s: unexpected mutating admission error: %v", tc.name, err) + continue + } + + switch { + case tc.expectedActiveDeadlineSeconds == nil && pod.Spec.ActiveDeadlineSeconds == nil: + // continue + case tc.expectedActiveDeadlineSeconds == nil && pod.Spec.ActiveDeadlineSeconds != nil: + t.Errorf("%s: expected nil ActiveDeadlineSeconds. Got: %d", tc.name, *pod.Spec.ActiveDeadlineSeconds) + case tc.expectedActiveDeadlineSeconds != nil && pod.Spec.ActiveDeadlineSeconds == nil: + t.Errorf("%s: unexpected nil ActiveDeadlineSeconds.", tc.name) + case *pod.Spec.ActiveDeadlineSeconds != *tc.expectedActiveDeadlineSeconds: + t.Errorf("%s: unexpected active deadline seconds: %d", tc.name, *pod.Spec.ActiveDeadlineSeconds) + } + } +} + +func TestReadConfig(t *testing.T) { + configStr := `apiVersion: autoscaling.openshift.io/v1 +kind: RunOnceDurationConfig +activeDeadlineSecondsOverride: 3600 +` + buf := bytes.NewBufferString(configStr) + config, err := readConfig(buf) + if err != nil { + t.Fatalf("unexpected error reading config: %v", err) + } + if config.ActiveDeadlineSecondsOverride == nil { + t.Fatalf("nil value for ActiveDeadlineSecondsLimit") + } + if *config.ActiveDeadlineSecondsOverride != 3600 { + t.Errorf("unexpected value for ActiveDeadlineSecondsLimit: %d", config.ActiveDeadlineSecondsOverride) + } +} + +func TestInt64MinP(t *testing.T) { + ten := int64(10) + twenty := int64(20) + tests := []struct { + a, b, expected *int64 + }{ + { + a: &ten, + b: nil, + expected: &ten, + }, + { + a: nil, + b: &ten, + expected: &ten, + }, + { + a: &ten, + b: &twenty, + expected: &ten, + }, + { + a: nil, + b: nil, + expected: nil, + }, + } + + for _, test := range tests { + actual := int64MinP(test.a, test.b) + switch { + case actual == nil && test.expected != nil, + test.expected == nil && actual != nil: + t.Errorf("unexpected %v for %#v", actual, test) + continue + case actual == nil: + continue + case *actual != *test.expected: + t.Errorf("unexpected: %v for %#v", actual, test) + } + } +} diff --git a/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go b/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go new file mode 100644 index 0000000000000..c289b04fa9a49 --- /dev/null +++ b/openshift-kube-apiserver/admission/autoscaling/runonceduration/doc.go @@ -0,0 +1,22 @@ +/* +Package runonceduration contains the RunOnceDuration admission control plugin. +The plugin allows overriding the ActiveDeadlineSeconds for pods that have a +RestartPolicy of RestartPolicyNever (run once). If configured to allow a project +annotation override, and an annotation exists in the pod's namespace of: + + openshift.io/active-deadline-seconds-override + +the value of the annotation will take precedence over the globally configured +value in the plugin's configuration. + + +Configuration + +The plugin is configured via a RunOnceDurationConfig object: + + apiVersion: v1 + kind: RunOnceDurationConfig + enabled: true + activeDeadlineSecondsOverride: 3600 +*/ +package runonceduration diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go new file mode 100644 index 0000000000000..2a858c75113f3 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver.go @@ -0,0 +1,149 @@ +package apiserver + +import ( + "context" + "fmt" + "regexp" + "strings" + + configv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + + "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func toAPIServerV1(uncastObj runtime.Object) (*configv1.APIServer, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.APIServer) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"APIServer"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type apiserverV1 struct { + infrastructureGetter func() configv1client.InfrastructuresGetter +} + +func (a apiserverV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerSpecCreate(obj.Spec)...) + errs = append(errs, a.validateSNINames(obj)...) + + return errs +} + +func (a apiserverV1) validateSNINames(obj *configv1.APIServer) field.ErrorList { + errs := field.ErrorList{} + if len(obj.Spec.ServingCerts.NamedCertificates) == 0 { + return errs + } + + infrastructure, err := a.infrastructureGetter().Infrastructures().Get(context.TODO(), "cluster", metav1.GetOptions{}) + if err != nil { + errs = append(errs, field.InternalError(field.NewPath("metadata"), err)) + } + for i, currSNI := range obj.Spec.ServingCerts.NamedCertificates { + // if names are specified, confirm they do not match + // if names are not specified, the cert can still match, but only the operator resolves the secrets down. We gain a lot of benefit by being sure + // we don't allow an explicit override of these values + for j, currName := range currSNI.Names { + path := field.NewPath("spec").Child("servingCerts").Index(i).Child("names").Index(j) + if currName == infrastructure.Status.APIServerInternalURL { + errs = append(errs, field.Invalid(path, currName, fmt.Sprintf("may not match internal loadbalancer: %q", infrastructure.Status.APIServerInternalURL))) + continue + } + if strings.HasSuffix(currName, ".*") { + withoutSuffix := currName[0 : len(currName)-2] + if strings.HasPrefix(infrastructure.Status.APIServerInternalURL, withoutSuffix) { + errs = append(errs, field.Invalid(path, currName, fmt.Sprintf("may not match internal loadbalancer: %q", infrastructure.Status.APIServerInternalURL))) + } + } + } + } + + return errs +} + +func (a apiserverV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIServerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerSpecUpdate(obj.Spec, oldObj.Spec)...) + errs = append(errs, a.validateSNINames(obj)...) + + return errs +} + +func (apiserverV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAPIServerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAPIServerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAPIServerStatus(obj.Status)...) + + return errs +} + +func validateAPIServerSpecCreate(spec configv1.APIServerSpec) field.ErrorList { + + errs := validateAdditionalCORSAllowedOrigins(field.NewPath("spec").Child("additionalCORSAllowedOrigins"), spec.AdditionalCORSAllowedOrigins) + return errs +} + +func validateAPIServerSpecUpdate(newSpec, oldSpec configv1.APIServerSpec) field.ErrorList { + + errs := validateAdditionalCORSAllowedOrigins(field.NewPath("spec").Child("additionalCORSAllowedOrigins"), newSpec.AdditionalCORSAllowedOrigins) + return errs +} + +func validateAPIServerStatus(status configv1.APIServerStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateAdditionalCORSAllowedOrigins(fieldPath *field.Path, cors []string) field.ErrorList { + errs := field.ErrorList{} + + for i, re := range cors { + if _, err := regexp.Compile(re); err != nil { + errs = append(errs, field.Invalid(fieldPath.Index(i), re, fmt.Sprintf("not a valid regular expression: %v", err))) + } + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go new file mode 100644 index 0000000000000..156761f86c50d --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validate_apiserver_test.go @@ -0,0 +1,117 @@ +package apiserver + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + configclientfake "github.com/openshift/client-go/config/clientset/versioned/fake" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidateSNINames(t *testing.T) { + expectNoErrors := func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) > 0 { + t.Fatal(errs) + } + } + + tests := []struct { + name string + + internalName string + apiserver *configv1.APIServer + + validateErrors func(t *testing.T, errs field.ErrorList) + }{ + { + name: "no sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{}, + validateErrors: expectNoErrors, + }, + { + name: "allowed sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + { + Names: []string{"external.host.com", "somwhere.else.*"}, + }, + }, + }, + }, + }, + validateErrors: expectNoErrors, + }, + { + name: "directly invalid sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + {Names: []string{"external.host.com", "somwhere.else.*"}}, + {Names: []string{"foo.bar", "internal.host.com"}}, + }, + }, + }, + }, + validateErrors: func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) != 1 { + t.Fatal(errs) + } + if errs[0].Error() != `spec.servingCerts[1].names[1]: Invalid value: "internal.host.com": may not match internal loadbalancer: "internal.host.com"` { + t.Error(errs[0]) + } + }, + }, + { + name: "wildcard invalid sni", + internalName: "internal.host.com", + apiserver: &configv1.APIServer{ + Spec: configv1.APIServerSpec{ + ServingCerts: configv1.APIServerServingCerts{ + NamedCertificates: []configv1.APIServerNamedServingCert{ + {Names: []string{"internal.*"}}, + }, + }, + }, + }, + validateErrors: func(t *testing.T, errs field.ErrorList) { + t.Helper() + if len(errs) != 1 { + t.Fatal(errs) + } + if errs[0].Error() != `spec.servingCerts[0].names[0]: Invalid value: "internal.*": may not match internal loadbalancer: "internal.host.com"` { + t.Error(errs[0]) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeclient := configclientfake.NewSimpleClientset(&configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: configv1.InfrastructureStatus{ + APIServerInternalURL: test.internalName, + }, + }) + + instance := apiserverV1{ + infrastructureGetter: func() configv1client.InfrastructuresGetter { + return fakeclient.ConfigV1() + }, + } + test.validateErrors(t, instance.validateSNINames(test.apiserver)) + }) + + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go new file mode 100644 index 0000000000000..06e9e04c1320b --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/apiserver/validation_wrapper.go @@ -0,0 +1,76 @@ +package apiserver + +import ( + "fmt" + "io" + + configv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/rest" +) + +const PluginName = "config.openshift.io/ValidateAPIServer" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return NewValidateAPIServer() + }) +} + +type validateCustomResourceWithClient struct { + admission.ValidationInterface + + infrastructureGetter configv1client.InfrastructuresGetter +} + +func NewValidateAPIServer() (admission.Interface, error) { + ret := &validateCustomResourceWithClient{} + + delegate, err := customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("apiservers").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("APIServer"): apiserverV1{infrastructureGetter: ret.getInfrastructureGetter}, + }) + if err != nil { + return nil, err + } + ret.ValidationInterface = delegate + + return ret, nil +} + +var _ admissionrestconfig.WantsRESTClientConfig = &validateCustomResourceWithClient{} + +func (a *validateCustomResourceWithClient) getInfrastructureGetter() configv1client.InfrastructuresGetter { + return a.infrastructureGetter +} + +func (a *validateCustomResourceWithClient) SetRESTClientConfig(restClientConfig rest.Config) { + var err error + a.infrastructureGetter, err = configv1client.NewForConfig(&restClientConfig) + if err != nil { + utilruntime.HandleError(err) + return + } +} + +func (a *validateCustomResourceWithClient) ValidateInitialization() error { + if a.infrastructureGetter == nil { + return fmt.Errorf(PluginName + " needs an infrastructureGetter") + } + + if initializationValidator, ok := a.ValidationInterface.(admission.InitializationValidator); ok { + return initializationValidator.ValidateInitialization() + } + + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go b/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go new file mode 100644 index 0000000000000..f72b89a63526b --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/attributes.go @@ -0,0 +1,53 @@ +package customresourcevalidation + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + + authorizationv1 "github.com/openshift/api/authorization/v1" + configv1 "github.com/openshift/api/config/v1" + quotav1 "github.com/openshift/api/quota/v1" + securityv1 "github.com/openshift/api/security/v1" +) + +// unstructuredUnpackingAttributes tries to convert to a real object in the config scheme +type unstructuredUnpackingAttributes struct { + admission.Attributes +} + +func (a *unstructuredUnpackingAttributes) GetObject() runtime.Object { + return toBestObjectPossible(a.Attributes.GetObject()) +} + +func (a *unstructuredUnpackingAttributes) GetOldObject() runtime.Object { + return toBestObjectPossible(a.Attributes.GetOldObject()) +} + +// toBestObjectPossible tries to convert to a real object in the supported scheme +func toBestObjectPossible(orig runtime.Object) runtime.Object { + unstructuredOrig, ok := orig.(runtime.Unstructured) + if !ok { + return orig + } + + targetObj, err := supportedObjectsScheme.New(unstructuredOrig.GetObjectKind().GroupVersionKind()) + if err != nil { + utilruntime.HandleError(err) + return unstructuredOrig + } + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredOrig.UnstructuredContent(), targetObj); err != nil { + utilruntime.HandleError(err) + return unstructuredOrig + } + return targetObj +} + +var supportedObjectsScheme = runtime.NewScheme() + +func init() { + utilruntime.Must(configv1.Install(supportedObjectsScheme)) + utilruntime.Must(quotav1.Install(supportedObjectsScheme)) + utilruntime.Must(securityv1.Install(supportedObjectsScheme)) + utilruntime.Must(authorizationv1.Install(supportedObjectsScheme)) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go new file mode 100644 index 0000000000000..94e9828cb9d83 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication.go @@ -0,0 +1,133 @@ +package authentication + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateAuthentication" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("authentications").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Authentication"): authenticationV1{}, + }) + }) +} + +func toAuthenticationV1(uncastObj runtime.Object) (*configv1.Authentication, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*configv1.Authentication) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Authentication"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"}), + } + } + + return obj, nil +} + +type authenticationV1 struct{} + +func (authenticationV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, crvalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationSpecCreate(obj.Spec)...) + + return errs +} + +func (authenticationV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAuthenticationV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (authenticationV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toAuthenticationV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toAuthenticationV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateAuthenticationStatus(obj.Status)...) + + return errs +} + +func validateAuthenticationSpecCreate(spec configv1.AuthenticationSpec) field.ErrorList { + return validateAuthenticationSpec(spec) +} + +func validateAuthenticationSpecUpdate(newspec, oldspec configv1.AuthenticationSpec) field.ErrorList { + return validateAuthenticationSpec(newspec) +} + +func validateAuthenticationSpec(spec configv1.AuthenticationSpec) field.ErrorList { + errs := field.ErrorList{} + specField := field.NewPath("spec") + + switch spec.Type { + case configv1.AuthenticationTypeNone, configv1.AuthenticationTypeIntegratedOAuth, "": + default: + errs = append(errs, field.NotSupported(specField.Child("type"), + spec.Type, + []string{string(configv1.AuthenticationTypeNone), string(configv1.AuthenticationTypeIntegratedOAuth)}, + )) + } + + errs = append(errs, crvalidation.ValidateConfigMapReference(specField.Child("oauthMetadata"), spec.OAuthMetadata, false)...) + + // validate the secret names in WebhookTokenAuthenticators + for i, wh := range spec.WebhookTokenAuthenticators { + errs = append( + errs, + crvalidation.ValidateSecretReference( + specField.Child("webhookTokenAuthenticators").Index(i).Child("kubeConfig"), + wh.KubeConfig, + true, + )...) + } + + return errs +} + +func validateAuthenticationStatus(status configv1.AuthenticationStatus) field.ErrorList { + return crvalidation.ValidateConfigMapReference(field.NewPath("status", "integratedOAuthMetadata"), status.IntegratedOAuthMetadata, false) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go new file mode 100644 index 0000000000000..56f70b29d895b --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/authentication/validate_authentication_test.go @@ -0,0 +1,189 @@ +package authentication + +import ( + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func TestFailValidateAuthenticationSpec(t *testing.T) { + errorCases := map[string]struct { + spec configv1.AuthenticationSpec + errorType field.ErrorType + errorField string + }{ + "invalid authn type": { + spec: configv1.AuthenticationSpec{ + Type: "MyCoolOAuthSrv", + }, + errorType: field.ErrorTypeNotSupported, + errorField: "spec.type", + }, + "invalid metadata ref": { + spec: configv1.AuthenticationSpec{ + Type: "", + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "../shadow", + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.oauthMetadata.name", + }, + "invalid webhook ref": { + spec: configv1.AuthenticationSpec{ + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "this+that"}}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.webhookTokenAuthenticators[0].kubeConfig.name", + }, + "invalid webhook ref - multiple webhooks": { + spec: configv1.AuthenticationSpec{ + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "that.now"}}, + {KubeConfig: configv1.SecretNameReference{Name: "this+that"}}, + {KubeConfig: configv1.SecretNameReference{Name: "this.then"}}, + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "spec.webhookTokenAuthenticators[1].kubeConfig.name", + }, + "empty webhook name": { + spec: configv1.AuthenticationSpec{ + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: ""}}, + }, + }, + errorType: field.ErrorTypeRequired, + errorField: "spec.webhookTokenAuthenticators[0].kubeConfig.name", + }, + } + + for tcName, tc := range errorCases { + errs := validateAuthenticationSpec(tc.spec) + if len(errs) == 0 { + t.Errorf("'%s': should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("'%s': expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("'%s': expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestSucceedValidateAuthenticationSpec(t *testing.T) { + successCases := map[string]configv1.AuthenticationSpec{ + "integrated oauth authn type": { + Type: "IntegratedOAuth", + }, + "_none_ authn type": { + Type: "None", + }, + "empty authn type": { + Type: "", + }, + "integrated oauth + oauth metadata": { + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "configmapwithmetadata", + }, + }, + "webhook set": { + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "wheniwaslittleiwantedtobecomeawebhook"}}, + }, + }, + "some webhooks": { + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "whatacoolnameforasecret"}}, + {KubeConfig: configv1.SecretNameReference{Name: "whatacoolnameforasecret2"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisalsoisacoolname"}}, + {KubeConfig: configv1.SecretNameReference{Name: "letsnotoverdoit"}}, + }, + }, + "all fields set": { + Type: "IntegratedOAuth", + OAuthMetadata: configv1.ConfigMapNameReference{ + Name: "suchname", + }, + WebhookTokenAuthenticators: []configv1.DeprecatedWebhookTokenAuthenticator{ + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook2"}}, + {KubeConfig: configv1.SecretNameReference{Name: "thisisawebhook33"}}, + }, + }, + } + + for tcName, s := range successCases { + errs := validateAuthenticationSpec(s) + if len(errs) != 0 { + t.Errorf("'%s': expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } +} + +func TestFailValidateAuthenticationStatus(t *testing.T) { + errorCases := map[string]struct { + status configv1.AuthenticationStatus + errorType field.ErrorType + errorField string + }{ + "wrong reference name": { + status: configv1.AuthenticationStatus{ + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "something_wrong", + }, + }, + errorType: field.ErrorTypeInvalid, + errorField: "status.integratedOAuthMetadata.name", + }, + } + + for tcName, tc := range errorCases { + errs := validateAuthenticationStatus(tc.status) + if len(errs) == 0 { + t.Errorf("'%s': should have failed but did not", tcName) + } + + for _, e := range errs { + if e.Type != tc.errorType { + t.Errorf("'%s': expected errors of type '%s', got %v:", tcName, tc.errorType, e) + } + + if e.Field != tc.errorField { + t.Errorf("'%s': expected errors in field '%s', got %v:", tcName, tc.errorField, e) + } + } + } +} + +func TestSucceedValidateAuthenticationStatus(t *testing.T) { + successCases := map[string]configv1.AuthenticationStatus{ + "basic case": { + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "hey-there", + }, + }, + "empty reference": { + IntegratedOAuthMetadata: configv1.ConfigMapNameReference{ + Name: "", + }, + }, + "empty status": {}, + } + + for tcName, s := range successCases { + errs := validateAuthenticationStatus(s) + if len(errs) != 0 { + t.Errorf("'%s': expected success, but failed: %v", tcName, errs.ToAggregate().Error()) + } + } + +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go new file mode 100644 index 0000000000000..18af783d61305 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validate_crq.go @@ -0,0 +1,83 @@ +package clusterresourcequota + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + quotav1 "github.com/openshift/api/quota/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + quotavalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation" +) + +const PluginName = "quota.openshift.io/ValidateClusterResourceQuota" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: quotav1.GroupName, Resource: "clusterresourcequotas"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + quotav1.GroupVersion.WithKind("ClusterResourceQuota"): clusterResourceQuotaV1{}, + }) + }) +} + +func toClusterResourceQuota(uncastObj runtime.Object) (*quotav1.ClusterResourceQuota, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*quotav1.ClusterResourceQuota) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"ClusterResourceQuota"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{quotav1.GroupVersion.String()})) + } + + return obj, nil +} + +type clusterResourceQuotaV1 struct { +} + +func (clusterResourceQuotaV1) ValidateCreate(obj runtime.Object) field.ErrorList { + clusterResourceQuotaObj, errs := toClusterResourceQuota(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&clusterResourceQuotaObj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, quotavalidation.ValidateClusterResourceQuota(clusterResourceQuotaObj)...) + + return errs +} + +func (clusterResourceQuotaV1) ValidateUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + clusterResourceQuotaObj, errs := toClusterResourceQuota(obj) + if len(errs) > 0 { + return errs + } + clusterResourceQuotaOldObj, errs := toClusterResourceQuota(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&clusterResourceQuotaObj.ObjectMeta, false, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, quotavalidation.ValidateClusterResourceQuotaUpdate(clusterResourceQuotaObj, clusterResourceQuotaOldObj)...) + + return errs +} + +func (c clusterResourceQuotaV1) ValidateStatusUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return c.ValidateUpdate(obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go new file mode 100644 index 0000000000000..19993a05fdd60 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation.go @@ -0,0 +1,68 @@ +package validation + +import ( + "sort" + + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func ValidateClusterResourceQuota(clusterquota *quotav1.ClusterResourceQuota) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&clusterquota.ObjectMeta, false, validation.ValidateResourceQuotaName, field.NewPath("metadata")) + + hasSelectionCriteria := (clusterquota.Spec.Selector.LabelSelector != nil && len(clusterquota.Spec.Selector.LabelSelector.MatchLabels)+len(clusterquota.Spec.Selector.LabelSelector.MatchExpressions) > 0) || + (len(clusterquota.Spec.Selector.AnnotationSelector) > 0) + + if !hasSelectionCriteria { + allErrs = append(allErrs, field.Required(field.NewPath("spec", "selector"), "must restrict the selected projects")) + } + if clusterquota.Spec.Selector.LabelSelector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(clusterquota.Spec.Selector.LabelSelector, field.NewPath("spec", "selector", "labels"))...) + if len(clusterquota.Spec.Selector.LabelSelector.MatchLabels)+len(clusterquota.Spec.Selector.LabelSelector.MatchExpressions) == 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "selector", "labels"), clusterquota.Spec.Selector.LabelSelector, "must restrict the selected projects")) + } + } + if clusterquota.Spec.Selector.AnnotationSelector != nil { + allErrs = append(allErrs, validation.ValidateAnnotations(clusterquota.Spec.Selector.AnnotationSelector, field.NewPath("spec", "selector", "annotations"))...) + } + + internalQuota := &core.ResourceQuotaSpec{} + if err := v1.Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&clusterquota.Spec.Quota, internalQuota, nil); err != nil { + panic(err) + } + internalStatus := &core.ResourceQuotaStatus{} + if err := v1.Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&clusterquota.Status.Total, internalStatus, nil); err != nil { + panic(err) + } + + allErrs = append(allErrs, validation.ValidateResourceQuotaSpec(internalQuota, field.NewPath("spec", "quota"))...) + allErrs = append(allErrs, validation.ValidateResourceQuotaStatus(internalStatus, field.NewPath("status", "overall"))...) + + orderedNamespaces := clusterquota.Status.Namespaces.DeepCopy() + sort.Slice(orderedNamespaces, func(i, j int) bool { + return orderedNamespaces[i].Namespace < orderedNamespaces[j].Namespace + }) + + for _, namespace := range orderedNamespaces { + fldPath := field.NewPath("status", "namespaces").Key(namespace.Namespace) + for k, v := range namespace.Status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, validation.ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, validation.ValidateResourceQuantityValue(string(k), v, resPath)...) + } + } + + return allErrs +} + +func ValidateClusterResourceQuotaUpdate(clusterquota, oldClusterResourceQuota *quotav1.ClusterResourceQuota) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&clusterquota.ObjectMeta, &oldClusterResourceQuota.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateClusterResourceQuota(clusterquota)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go new file mode 100644 index 0000000000000..c1dbf76aecf46 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota/validation/validation_test.go @@ -0,0 +1,173 @@ +package validation + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" + corekubev1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/validation" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func spec(scopes ...corev1.ResourceQuotaScope) corev1.ResourceQuotaSpec { + return corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100"), + corev1.ResourceMemory: resource.MustParse("10000"), + corev1.ResourceRequestsCPU: resource.MustParse("100"), + corev1.ResourceRequestsMemory: resource.MustParse("10000"), + corev1.ResourceLimitsCPU: resource.MustParse("100"), + corev1.ResourceLimitsMemory: resource.MustParse("10000"), + corev1.ResourcePods: resource.MustParse("10"), + corev1.ResourceServices: resource.MustParse("0"), + corev1.ResourceReplicationControllers: resource.MustParse("10"), + corev1.ResourceQuotas: resource.MustParse("10"), + corev1.ResourceConfigMaps: resource.MustParse("10"), + corev1.ResourceSecrets: resource.MustParse("10"), + }, + Scopes: scopes, + } +} + +func scopeableSpec(scopes ...corev1.ResourceQuotaScope) corev1.ResourceQuotaSpec { + return corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100"), + corev1.ResourceMemory: resource.MustParse("10000"), + corev1.ResourceRequestsCPU: resource.MustParse("100"), + corev1.ResourceRequestsMemory: resource.MustParse("10000"), + corev1.ResourceLimitsCPU: resource.MustParse("100"), + corev1.ResourceLimitsMemory: resource.MustParse("10000"), + }, + Scopes: scopes, + } +} + +func TestValidationClusterQuota(t *testing.T) { + // storage is not yet supported as a quota tracked resource + invalidQuotaResourceSpec := corev1.ResourceQuotaSpec{ + Hard: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("10"), + }, + } + validLabels := map[string]string{"a": "b"} + + errs := ValidateClusterResourceQuota( + "av1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(), + }, + }, + ) + if len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + + errorCases := map[string]struct { + A quotav1.ClusterResourceQuota + T field.ErrorType + F string + }{ + "non-zero-length namespace": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Namespace: "bad", Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(), + }, + }, + T: field.ErrorTypeForbidden, + F: "metadata.namespace", + }, + "missing label selector": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Quota: spec(), + }, + }, + T: field.ErrorTypeRequired, + F: "spec.selector", + }, + "ok scope": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Quota: scopeableSpec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + T: field.ErrorTypeRequired, + F: "spec.selector", + }, + "bad scope": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: spec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + T: field.ErrorTypeInvalid, + F: "spec.quota.scopes", + }, + "bad quota spec": { + A: quotav1.ClusterResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Name: "good"}, + Spec: quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{LabelSelector: &metav1.LabelSelector{MatchLabels: validLabels}}, + Quota: invalidQuotaResourceSpec, + }, + }, + T: field.ErrorTypeInvalid, + F: "spec.quota.hard[storage]", + }, + } + for k, v := range errorCases { + errs := ValidateClusterResourceQuota(&v.A) + if len(errs) == 0 { + t.Errorf("expected failure %s for %v", k, v.A) + continue + } + for i := range errs { + if errs[i].Type != v.T { + t.Errorf("%s: expected errors to have type %s: %v", k, v.T, errs[i]) + } + if errs[i].Field != v.F { + t.Errorf("%s: expected errors to have field %s: %v", k, v.F, errs[i]) + } + } + } +} + +func TestValidationQuota(t *testing.T) { + tests := map[string]struct { + A corev1.ResourceQuota + T field.ErrorType + F string + }{ + "scope": { + A: corev1.ResourceQuota{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "good"}, + Spec: scopeableSpec(corev1.ResourceQuotaScopeNotTerminating), + }, + }, + } + for k, v := range tests { + internal := core.ResourceQuota{} + if err := corekubev1.Convert_v1_ResourceQuota_To_core_ResourceQuota(&v.A, &internal, nil); err != nil { + panic(err) + } + errs := validation.ValidateResourceQuota(&internal) + if len(errs) != 0 { + t.Errorf("%s: %v", k, errs) + continue + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go new file mode 100644 index 0000000000000..f637e95cece3a --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource.go @@ -0,0 +1,54 @@ +package config + +import ( + "context" + "fmt" + "io" + + "k8s.io/apiserver/pkg/admission" +) + +const PluginName = "config.openshift.io/DenyDeleteClusterConfiguration" + +// Register registers an admission plugin factory whose plugin prevents the deletion of cluster configuration resources. +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return newAdmissionPlugin(), nil + }) +} + +var _ admission.ValidationInterface = &admissionPlugin{} + +type admissionPlugin struct { + *admission.Handler +} + +func newAdmissionPlugin() *admissionPlugin { + return &admissionPlugin{Handler: admission.NewHandler(admission.Delete)} +} + +// Validate returns an error if there is an attempt to delete a cluster configuration resource. +func (p *admissionPlugin) Validate(ctx context.Context, attributes admission.Attributes, _ admission.ObjectInterfaces) error { + if len(attributes.GetSubresource()) > 0 { + return nil + } + if attributes.GetResource().Group != "config.openshift.io" { + return nil + } + // clusteroperators can be deleted so that we can force status refreshes and change over time. + // clusterversions not named `version` can be deleted (none are expected to exist). + // other config.openshift.io resources not named `cluster` can be deleted (none are expected to exist). + switch attributes.GetResource().Resource { + case "clusteroperators": + return nil + case "clusterversions": + if attributes.GetName() != "version" { + return nil + } + default: + if attributes.GetName() != "cluster" { + return nil + } + } + return admission.NewForbidden(attributes, fmt.Errorf("deleting required %s.%s resource, named %s, is not allowed", attributes.GetResource().Resource, attributes.GetResource().Group, attributes.GetName())) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go new file mode 100644 index 0000000000000..70d289f5f26df --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/config/deny_delete_cluster_config_resource_test.go @@ -0,0 +1,73 @@ +package config + +import ( + "context" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +func TestAdmissionPlugin_Validate(t *testing.T) { + testCases := []struct { + tcName string + group string + resource string + name string + denyDelete bool + }{ + { + tcName: "NotWhiteListedResourceNamedCluster", + group: "config.openshift.io", + resource: "notWhitelisted", + name: "cluster", + denyDelete: true, + }, + { + tcName: "NotWhiteListedResourceNotNamedCluster", + group: "config.openshift.io", + resource: "notWhitelisted", + name: "notCluster", + denyDelete: false, + }, + { + tcName: "ClusterVersionVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "version", + denyDelete: true, + }, + { + tcName: "ClusterVersionNotVersion", + group: "config.openshift.io", + resource: "clusterversions", + name: "instance", + denyDelete: false, + }, + { + tcName: "ClusterOperator", + group: "config.openshift.io", + resource: "clusteroperator", + name: "instance", + denyDelete: false, + }, + { + tcName: "OtherGroup", + group: "not.config.openshift.io", + resource: "notWhitelisted", + name: "cluster", + denyDelete: false, + }, + } + for _, tc := range testCases { + t.Run(tc.tcName, func(t *testing.T) { + err := newAdmissionPlugin().Validate(context.TODO(), admission.NewAttributesRecord( + nil, nil, schema.GroupVersionKind{}, "", + tc.name, schema.GroupVersionResource{Group: tc.group, Resource: tc.resource}, + "", admission.Delete, nil, false, nil), nil) + if tc.denyDelete != (err != nil) { + t.Error(tc.denyDelete, err) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go b/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go new file mode 100644 index 0000000000000..3cdd31c9e4a97 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/console/validate_console.go @@ -0,0 +1,118 @@ +package console + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateConsole" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("consoles").GroupResource(): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Console"): consoleV1{}, + }) + }) +} + +func toConsoleV1(uncastObj runtime.Object) (*configv1.Console, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Console) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Console"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type consoleV1 struct{} + +func (consoleV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleSpecCreate(obj.Spec)...) + + return errs +} + +func (consoleV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConsoleV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (consoleV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toConsoleV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toConsoleV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateConsoleStatus(obj.Status)...) + + return errs +} + +func validateConsoleSpecCreate(spec configv1.ConsoleSpec) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateConsoleSpecUpdate(newSpec, oldSpec configv1.ConsoleSpec) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} + +func validateConsoleStatus(status configv1.ConsoleStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go new file mode 100644 index 0000000000000..bdc4681be54fa --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidationregistration/cr_validation_registration.go @@ -0,0 +1,60 @@ +package customresourcevalidationregistration + +import ( + "k8s.io/apiserver/pkg/admission" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/apiserver" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/authentication" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/clusterresourcequota" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/config" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/console" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/features" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/image" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/oauth" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/project" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/scheduler" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints" +) + +// AllCustomResourceValidators are the names of all custom resource validators that should be registered +var AllCustomResourceValidators = []string{ + apiserver.PluginName, + authentication.PluginName, + features.PluginName, + console.PluginName, + image.PluginName, + oauth.PluginName, + project.PluginName, + config.PluginName, + scheduler.PluginName, + clusterresourcequota.PluginName, + securitycontextconstraints.PluginName, + rolebindingrestriction.PluginName, + + // this one is special because we don't work without it. + securitycontextconstraints.DefaultingPluginName, +} + +func RegisterCustomResourceValidation(plugins *admission.Plugins) { + apiserver.Register(plugins) + authentication.Register(plugins) + features.Register(plugins) + console.Register(plugins) + image.Register(plugins) + oauth.Register(plugins) + project.Register(plugins) + config.Register(plugins) + scheduler.Register(plugins) + + // This plugin validates the quota.openshift.io/v1 ClusterResourceQuota resources. + // NOTE: This is only allowed because it is required to get a running control plane operator. + clusterresourcequota.Register(plugins) + // This plugin validates the security.openshift.io/v1 SecurityContextConstraints resources. + securitycontextconstraints.Register(plugins) + // This plugin validates the authorization.openshift.io/v1 RoleBindingRestriction resources. + rolebindingrestriction.Register(plugins) + + // this one is special because we don't work without it. + securitycontextconstraints.RegisterDefaulting(plugins) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go new file mode 100644 index 0000000000000..6cb55fe81988c --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator.go @@ -0,0 +1,98 @@ +package customresourcevalidation + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" +) + +type ObjectValidator interface { + ValidateCreate(obj runtime.Object) field.ErrorList + ValidateUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList + ValidateStatusUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList +} + +// ValidateCustomResource is an implementation of admission.Interface. +// It looks at all new pods and overrides each container's image pull policy to Always. +type validateCustomResource struct { + *admission.Handler + + resources map[schema.GroupResource]bool + validators map[schema.GroupVersionKind]ObjectValidator +} + +func NewValidator(resources map[schema.GroupResource]bool, validators map[schema.GroupVersionKind]ObjectValidator) (admission.ValidationInterface, error) { + return &validateCustomResource{ + Handler: admission.NewHandler(admission.Create, admission.Update), + resources: resources, + validators: validators, + }, nil +} + +var _ admission.ValidationInterface = &validateCustomResource{} + +// Validate is an admission function that will validate a CRD in config.openshift.io. uncastAttributes are attributes +// that are of type unstructured. +func (a *validateCustomResource) Validate(ctx context.Context, uncastAttributes admission.Attributes, _ admission.ObjectInterfaces) error { + attributes := &unstructuredUnpackingAttributes{Attributes: uncastAttributes} + if a.shouldIgnore(attributes) { + return nil + } + validator, ok := a.validators[attributes.GetKind()] + if !ok { + return admission.NewForbidden(attributes, fmt.Errorf("unhandled kind: %v", attributes.GetKind())) + } + + switch attributes.GetOperation() { + case admission.Create: + // creating subresources isn't something we understand, but we can be pretty sure we don't need to validate it + if len(attributes.GetSubresource()) > 0 { + return nil + } + errors := validator.ValidateCreate(attributes.GetObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + case admission.Update: + switch attributes.GetSubresource() { + case "": + errors := validator.ValidateUpdate(attributes.GetObject(), attributes.GetOldObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + case "status": + errors := validator.ValidateStatusUpdate(attributes.GetObject(), attributes.GetOldObject()) + if len(errors) == 0 { + return nil + } + return apierrors.NewInvalid(attributes.GetKind().GroupKind(), attributes.GetName(), errors) + + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled subresource: %v", attributes.GetSubresource())) + } + + default: + return admission.NewForbidden(attributes, fmt.Errorf("unhandled operation: %v", attributes.GetOperation())) + } +} + +func (a *validateCustomResource) shouldIgnore(attributes admission.Attributes) bool { + if !a.resources[attributes.GetResource().GroupResource()] { + return true + } + // if a subresource is specified and it isn't status, skip it + if len(attributes.GetSubresource()) > 0 && attributes.GetSubresource() != "status" { + return true + } + + return false +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go new file mode 100644 index 0000000000000..6a53c16c062ca --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/customresourcevalidator_test.go @@ -0,0 +1,278 @@ +package customresourcevalidation + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authentication/user" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestCustomResourceValidator(t *testing.T) { + + const ( + testGroup = "config.openshift.io" + testVersion = "v1" + testResource = "images" + testKind = "Image" + ) + + var testObjectType *configv1.Image + + testCases := []struct { + description string + object runtime.Object + objectBytes []byte + oldObject runtime.Object + oldObjectBytes []byte + kind schema.GroupVersionKind + namespace string + name string + resource schema.GroupVersionResource + subresource string + operation admission.Operation + userInfo user.Info + expectError bool + expectCreateFuncCalled bool + expectUpdateFuncCalled bool + expectStatusUpdateFuncCalled bool + validateFuncErr bool + expectedObjectType interface{} + }{ + { + description: "ShouldIgnoreUnknownResource", + resource: schema.GroupVersionResource{ + Group: "other_group", + Version: "other_version", + Resource: "other_resource", + }, + }, + { + description: "ShouldIgnoreUnknownSubresource", + subresource: "not_status", + }, + { + description: "ShouldIgnoreUnknownSubresource", + subresource: "not_status", + }, + { + description: "UnhandledOperationConnect", + operation: admission.Connect, + expectError: true, + }, + { + description: "UnhandledOperationDelete", + operation: admission.Delete, + expectError: true, + }, + { + description: "UnhandledKind", + operation: admission.Create, + kind: schema.GroupVersionKind{ + Group: "other_group", + Version: "other_version", + Kind: "other_resource", + }, + expectError: true, + }, + { + description: "Create", + operation: admission.Create, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectCreateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "CreateSubresourceNope", + operation: admission.Create, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + }, + { + description: "CreateError", + operation: admission.Create, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + validateFuncErr: true, + expectCreateFuncCalled: true, + expectError: true, + }, + { + description: "Update", + operation: admission.Update, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectUpdateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "UpdateError", + operation: admission.Update, + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + validateFuncErr: true, + expectError: true, + }, + { + description: "UpdateStatus", + operation: admission.Update, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectStatusUpdateFuncCalled: true, + expectedObjectType: testObjectType, + }, + { + description: "UpdateStatusError", + operation: admission.Update, + subresource: "status", + objectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + oldObjectBytes: []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v/%v"}`, testKind, testGroup, testVersion)), + expectStatusUpdateFuncCalled: true, + validateFuncErr: true, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + + var createFuncCalled bool + var updateFuncCalled bool + var updateStatusFuncCalled bool + var funcArgObject runtime.Object + var funcArgOldObject runtime.Object + + handler, err := NewValidator( + map[schema.GroupResource]bool{ + {Group: testGroup, Resource: testResource}: true, + }, + map[schema.GroupVersionKind]ObjectValidator{ + {Group: testGroup, Version: testVersion, Kind: testKind}: testValidator{ + validateCreate: func(obj runtime.Object) field.ErrorList { + createFuncCalled = true + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + funcArgObject = obj + return nil + }, + validateUpdate: func(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + updateFuncCalled = true + funcArgObject = obj + funcArgOldObject = oldObj + return nil + }, + validateStatusUpdate: func(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + updateStatusFuncCalled = true + if tc.validateFuncErr { + return field.ErrorList{field.InternalError(field.NewPath("test"), errors.New("TEST Error"))} + } + funcArgObject = obj + funcArgOldObject = oldObj + return nil + }, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + validator := handler.(admission.ValidationInterface) + + if len(tc.objectBytes) > 0 { + object, kind, err := unstructured.UnstructuredJSONScheme.Decode(tc.objectBytes, nil, nil) + if err != nil { + t.Fatal(err) + } + tc.object = object.(runtime.Object) + tc.kind = *kind + } + + if len(tc.oldObjectBytes) > 0 { + object, kind, err := unstructured.UnstructuredJSONScheme.Decode(tc.oldObjectBytes, nil, nil) + if err != nil { + t.Fatal(err) + } + tc.oldObject = object.(runtime.Object) + tc.kind = *kind + } + + if tc.resource == (schema.GroupVersionResource{}) { + tc.resource = schema.GroupVersionResource{ + Group: testGroup, + Version: testVersion, + Resource: testResource, + } + } + + attributes := admission.NewAttributesRecord( + tc.object, + tc.oldObject, + tc.kind, + tc.namespace, + tc.name, + tc.resource, + tc.subresource, + tc.operation, + nil, + false, + tc.userInfo, + ) + + err = validator.Validate(context.TODO(), attributes, nil) + switch { + case tc.expectError && err == nil: + t.Error("Error expected") + case !tc.expectError && err != nil: + t.Errorf("Unexpected error: %v", err) + } + if tc.expectCreateFuncCalled != createFuncCalled { + t.Errorf("ValidateObjCreateFunc called: expected: %v, actual: %v", tc.expectCreateFuncCalled, createFuncCalled) + } + if tc.expectUpdateFuncCalled != updateFuncCalled { + t.Errorf("ValidateObjUpdateFunc called: expected: %v, actual: %v", tc.expectUpdateFuncCalled, updateFuncCalled) + } + if tc.expectStatusUpdateFuncCalled != updateStatusFuncCalled { + t.Errorf("ValidateStatusUpdateFunc called: expected: %v, actual: %v", tc.expectStatusUpdateFuncCalled, updateStatusFuncCalled) + } + if reflect.TypeOf(tc.expectedObjectType) != reflect.TypeOf(funcArgObject) { + t.Errorf("Expected %T, actual %T", tc.expectedObjectType, funcArgObject) + } + if (tc.oldObject != nil) && (reflect.TypeOf(tc.expectedObjectType) != reflect.TypeOf(funcArgOldObject)) { + t.Errorf("Expected %T, actual %T", tc.expectedObjectType, funcArgOldObject) + } + }) + } + +} + +type testValidator struct { + validateCreate func(uncastObj runtime.Object) field.ErrorList + validateUpdate func(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList + validateStatusUpdate func(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList +} + +func (v testValidator) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + return v.validateCreate(uncastObj) +} + +func (v testValidator) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + return v.validateUpdate(uncastObj, uncastOldObj) + +} + +func (v testValidator) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + return v.validateStatusUpdate(uncastObj, uncastOldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go new file mode 100644 index 0000000000000..051ad9d4e3a24 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features.go @@ -0,0 +1,129 @@ +package features + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateFeatureGate" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("features"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("FeatureGate"): featureGateV1{}, + }) + }) +} + +func toFeatureGateV1(uncastObj runtime.Object) (*configv1.FeatureGate, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.FeatureGate) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"FeatureGate"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type featureGateV1 struct { +} + +var knownFeatureSets = sets.NewString("", string(configv1.TechPreviewNoUpgrade), string(configv1.CustomNoUpgrade)) + +func validateFeatureGateSpecCreate(spec configv1.FeatureGateSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on create, we only allow values that we are aware of + if !knownFeatureSets.Has(string(spec.FeatureSet)) { + allErrs = append(allErrs, field.NotSupported(field.NewPath("spec.featureSet"), spec.FeatureSet, knownFeatureSets.List())) + } + + return allErrs +} + +func validateFeatureGateSpecUpdate(spec, oldSpec configv1.FeatureGateSpec) field.ErrorList { + allErrs := field.ErrorList{} + + // on update, we don't fail validation on a field we don't recognize as long as it is not changing + if !knownFeatureSets.Has(string(spec.FeatureSet)) && oldSpec.FeatureSet != spec.FeatureSet { + allErrs = append(allErrs, field.NotSupported(field.NewPath("spec.featureSet"), spec.FeatureSet, knownFeatureSets.List())) + } + + // we do not allow anyone to take back TechPreview + if oldSpec.FeatureSet == configv1.TechPreviewNoUpgrade && spec.FeatureSet != configv1.TechPreviewNoUpgrade { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.featureSet"), "once enabled, tech preview features may not be disabled")) + } + // we do not allow anyone to take back CustomNoUpgrade + if oldSpec.FeatureSet == configv1.CustomNoUpgrade && spec.FeatureSet != configv1.CustomNoUpgrade { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.featureSet"), "once enabled, custom feature gates may not be disabled")) + } + + return allErrs +} + +func (featureGateV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toFeatureGateV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateFeatureGateSpecCreate(obj.Spec)...) + + return allErrs +} + +func (featureGateV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toFeatureGateV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toFeatureGateV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateFeatureGateSpecUpdate(obj.Spec, oldObj.Spec)...) + + return allErrs +} + +func (featureGateV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toFeatureGateV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toFeatureGateV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features_test.go new file mode 100644 index 0000000000000..840d7d8aea8c8 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/features/validate_features_test.go @@ -0,0 +1,127 @@ +package features + +import ( + "strings" + "testing" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestValidateCreateSpec(t *testing.T) { + tests := []struct { + name string + featureSet string + expectedErr string + }{ + { + name: "empty", + featureSet: "", + expectedErr: "", + }, + { + name: "techpreview", + featureSet: string(configv1.TechPreviewNoUpgrade), + expectedErr: "", + }, + { + name: "not real", + featureSet: "fake-value", + expectedErr: "Unsupported value", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := validateFeatureGateSpecCreate(configv1.FeatureGateSpec{FeatureGateSelection: configv1.FeatureGateSelection{FeatureSet: configv1.FeatureSet(tc.featureSet)}}) + switch { + case len(actual) == 0 && len(tc.expectedErr) == 0: + case len(actual) == 0 && len(tc.expectedErr) != 0: + t.Fatal(tc.expectedErr) + case len(actual) != 0 && len(tc.expectedErr) == 0: + t.Fatal(actual) + case len(actual) != 0 && len(tc.expectedErr) != 0: + found := false + for _, actualErr := range actual { + found = found || strings.Contains(actualErr.Error(), tc.expectedErr) + } + if !found { + t.Fatal(actual) + } + default: + } + + }) + } +} + +func TestValidateUpdateSpec(t *testing.T) { + tests := []struct { + name string + featureSet string + oldFeatureSet string + expectedErr string + }{ + { + name: "empty", + featureSet: "", + oldFeatureSet: "", + expectedErr: "", + }, + { + name: "change to techpreview", + featureSet: string(configv1.TechPreviewNoUpgrade), + oldFeatureSet: "", + expectedErr: "", + }, + { + name: "change from techpreview", + featureSet: "", + oldFeatureSet: string(configv1.TechPreviewNoUpgrade), + expectedErr: "once enabled, tech preview features may not be disabled", + }, + { + name: "change from custom", + featureSet: string(configv1.TechPreviewNoUpgrade), + oldFeatureSet: string(configv1.CustomNoUpgrade), + expectedErr: "once enabled, custom feature gates may not be disabled", + }, + { + name: "unknown, but no change", + featureSet: "fake-value", + oldFeatureSet: "fake-value", + expectedErr: "", + }, + { + name: "unknown, with change", + featureSet: "fake-value", + oldFeatureSet: "fake-value-2", + expectedErr: "Unsupported value", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := validateFeatureGateSpecUpdate( + configv1.FeatureGateSpec{FeatureGateSelection: configv1.FeatureGateSelection{FeatureSet: configv1.FeatureSet(tc.featureSet)}}, + configv1.FeatureGateSpec{FeatureGateSelection: configv1.FeatureGateSelection{FeatureSet: configv1.FeatureSet(tc.oldFeatureSet)}}, + ) + switch { + case len(actual) == 0 && len(tc.expectedErr) == 0: + case len(actual) == 0 && len(tc.expectedErr) != 0: + t.Fatal(tc.expectedErr) + case len(actual) != 0 && len(tc.expectedErr) == 0: + t.Fatal(actual) + case len(actual) != 0 && len(tc.expectedErr) != 0: + found := false + for _, actualErr := range actual { + found = found || strings.Contains(actualErr.Error(), tc.expectedErr) + } + if !found { + t.Fatal(actual) + } + default: + } + + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go b/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go new file mode 100644 index 0000000000000..9248d469a7b95 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/helpers.go @@ -0,0 +1,40 @@ +package customresourcevalidation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core/validation" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateConfigMapReference(path *field.Path, configMap configv1.ConfigMapNameReference, required bool) field.ErrorList { + return validateConfigMapSecret(path.Child("name"), configMap.Name, required, validation.ValidateConfigMapName) +} + +func ValidateSecretReference(path *field.Path, secret configv1.SecretNameReference, required bool) field.ErrorList { + return validateConfigMapSecret(path.Child("name"), secret.Name, required, validation.ValidateSecretName) +} + +func validateConfigMapSecret(path *field.Path, name string, required bool, validator validation.ValidateNameFunc) field.ErrorList { + if len(name) == 0 { + if required { + return field.ErrorList{field.Required(path, "")} + } + return nil + } + + if valErrs := validator(name, false); len(valErrs) > 0 { + return field.ErrorList{field.Invalid(path, name, strings.Join(valErrs, ", "))} + } + return nil +} + +// RequireNameCluster is a name validation function that requires the name to be cluster. It's handy for config.openshift.io types. +func RequireNameCluster(name string, prefix bool) []string { + if name != "cluster" { + return []string{"must be cluster"} + } + return nil +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go b/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go new file mode 100644 index 0000000000000..535c0908296d0 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/image/validate_image.go @@ -0,0 +1,94 @@ +package image + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateImage" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("images"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Image"): imageV1{}, + }) + }) +} + +func toImageV1(uncastObj runtime.Object) (*configv1.Image, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Image) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Image"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type imageV1 struct { +} + +func (imageV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + + return errs +} + +func (imageV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toImageV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} + +func (imageV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toImageV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toImageV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go new file mode 100644 index 0000000000000..127c91e1312d1 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/helpers.go @@ -0,0 +1,33 @@ +package oauth + +import ( + "net" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func isValidHostname(hostname string) bool { + return len(kvalidation.IsDNS1123Subdomain(hostname)) == 0 || net.ParseIP(hostname) != nil +} + +func ValidateRemoteConnectionInfo(remoteConnectionInfo configv1.OAuthRemoteConnectionInfo, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(remoteConnectionInfo.URL) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("url"), "")) + } else { + _, urlErrs := validation.ValidateSecureURL(remoteConnectionInfo.URL, fldPath.Child("url")) + allErrs = append(allErrs, urlErrs...) + } + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fldPath.Child("ca"), remoteConnectionInfo.CA, false)...) + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fldPath.Child("tlsClientCert"), remoteConnectionInfo.TLSClientCert, false)...) + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fldPath.Child("tlsClientKey"), remoteConnectionInfo.TLSClientKey, false)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go new file mode 100644 index 0000000000000..2ae0b45254a14 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github.go @@ -0,0 +1,69 @@ +package oauth + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateGitHubIdentityProvider(provider *configv1.GitHubIdentityProvider, mappingMethod configv1.MappingMethodType, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath.Child("provider"))...) + + if len(provider.Teams) > 0 && len(provider.Organizations) > 0 { + errs = append(errs, field.Invalid(fieldPath.Child("organizations"), provider.Organizations, "specify organizations or teams, not both")) + errs = append(errs, field.Invalid(fieldPath.Child("teams"), provider.Teams, "specify organizations or teams, not both")) + } + + // only check that there are some teams/orgs if not GitHub Enterprise Server + if len(provider.Hostname) == 0 && len(provider.Teams) == 0 && len(provider.Organizations) == 0 && mappingMethod != configv1.MappingMethodLookup { + errs = append(errs, field.Invalid(fieldPath, nil, "one of organizations or teams must be specified unless hostname is set or lookup is used")) + } + for i, organization := range provider.Organizations { + if strings.Contains(organization, "/") { + errs = append(errs, field.Invalid(fieldPath.Child("organizations").Index(i), organization, "cannot contain /")) + } + if len(organization) == 0 { + errs = append(errs, field.Required(fieldPath.Child("organizations").Index(i), "cannot be empty")) + } + } + for i, team := range provider.Teams { + if split := strings.Split(team, "/"); len(split) != 2 { + errs = append(errs, field.Invalid(fieldPath.Child("teams").Index(i), team, "must be in the format /")) + } else if org, t := split[0], split[1]; len(org) == 0 || len(t) == 0 { + errs = append(errs, field.Invalid(fieldPath.Child("teams").Index(i), team, "must be in the format /")) + } + } + + if hostname := provider.Hostname; len(hostname) != 0 { + hostnamePath := fieldPath.Child("hostname") + + if hostname == "github.com" || strings.HasSuffix(hostname, ".github.com") { + errs = append(errs, field.Invalid(hostnamePath, hostname, "cannot equal [*.]github.com")) + } + + if !isValidHostname(hostname) { + errs = append(errs, field.Invalid(hostnamePath, hostname, "must be a valid DNS subdomain or IP address")) + } + } + + if caFile := provider.CA; len(caFile.Name) != 0 { + caPath := fieldPath.Child("ca") + + errs = append(errs, crvalidation.ValidateConfigMapReference(caPath, caFile, true)...) + + if len(provider.Hostname) == 0 { + errs = append(errs, field.Invalid(caPath, caFile, "cannot be specified when hostname is empty")) + } + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go new file mode 100644 index 0000000000000..10102f24e45e8 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_github_test.go @@ -0,0 +1,249 @@ +package oauth + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestValidateGitHubIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GitHubIdentityProvider + mappingMethod configv1.MappingMethodType + fieldPath *field.Path + } + tests := []struct { + name string + args args + errors field.ErrorList + }{ + { + name: "cannot use GH as hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "github.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "github.com", Detail: "cannot equal [*.]github.com"}, + }, + }, + { + name: "cannot use GH subdomain as hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "foo.github.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "foo.github.com", Detail: "cannot equal [*.]github.com"}, + }, + }, + { + name: "valid domain hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "company.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + }, + { + name: "valid ip hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "192.168.8.1", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + }, + { + name: "invalid ip hostname with port", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "192.168.8.1:8080", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "192.168.8.1:8080", Detail: "must be a valid DNS subdomain or IP address"}, + }, + }, + { + name: "invalid domain hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "google-.com", + CA: configv1.ConfigMapNameReference{Name: "caconfigmap"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "hostname", BadValue: "google-.com", Detail: "must be a valid DNS subdomain or IP address"}, + }, + }, + { + name: "invalid name in ca ref and no hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{Name: "ca&config-map"}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "ca.name", BadValue: "ca&config-map", Detail: "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"}, + {Type: field.ErrorTypeInvalid, Field: "ca", BadValue: configv1.ConfigMapNameReference{Name: "ca&config-map"}, Detail: "cannot be specified when hostname is empty"}, + }, + }, + { + name: "valid ca and hostname", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "mo.co", + CA: configv1.ConfigMapNameReference{Name: "ca-config-map"}, + }, + mappingMethod: "", + }, + }, + { + name: "GitHub requires client ID and secret", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "", + ClientSecret: configv1.SecretNameReference{}, + Organizations: []string{"org1"}, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeRequired, Field: "provider.clientID", BadValue: "", Detail: ""}, + {Type: field.ErrorTypeRequired, Field: "provider.clientSecret.name", BadValue: "", Detail: ""}, + }, + }, + { + name: "GitHub warns when not constrained to organizations or teams without lookup", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "", BadValue: nil, Detail: "one of organizations or teams must be specified unless hostname is set or lookup is used"}, + }, + }, + { + name: "GitHub does not warn when not constrained to organizations or teams with lookup", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: nil, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "lookup", + }, + }, + { + name: "invalid cannot specific both organizations and teams", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: []string{"org1"}, + Teams: []string{"org1/team1"}, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "organizations", BadValue: []string{"org1"}, Detail: "specify organizations or teams, not both"}, + {Type: field.ErrorTypeInvalid, Field: "teams", BadValue: []string{"org1/team1"}, Detail: "specify organizations or teams, not both"}, + }, + }, + { + name: "invalid team format", + args: args{ + provider: &configv1.GitHubIdentityProvider{ + ClientID: "client", + ClientSecret: configv1.SecretNameReference{Name: "secret"}, + Organizations: nil, + Teams: []string{"org1/team1", "org2/not/team2", "org3//team3", "", "org4/team4"}, + Hostname: "", + CA: configv1.ConfigMapNameReference{}, + }, + mappingMethod: "", + }, + errors: field.ErrorList{ + {Type: field.ErrorTypeInvalid, Field: "teams[1]", BadValue: "org2/not/team2", Detail: "must be in the format /"}, + {Type: field.ErrorTypeInvalid, Field: "teams[2]", BadValue: "org3//team3", Detail: "must be in the format /"}, + {Type: field.ErrorTypeInvalid, Field: "teams[3]", BadValue: "", Detail: "must be in the format /"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ValidateGitHubIdentityProvider(tt.args.provider, tt.args.mappingMethod, tt.args.fieldPath) + if tt.errors == nil && len(got) == 0 { + return + } + if !reflect.DeepEqual(got, tt.errors) { + t.Errorf("ValidateGitHubIdentityProvider() = %v, want %v", got, tt.errors) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go new file mode 100644 index 0000000000000..ea9fda2ab4d8c --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab.go @@ -0,0 +1,26 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateGitLabIdentityProvider(provider *configv1.GitLabIdentityProvider, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if provider == nil { + allErrs = append(allErrs, field.Required(fieldPath, "")) + return allErrs + } + + allErrs = append(allErrs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + _, urlErrs := validation.ValidateSecureURL(provider.URL, fieldPath.Child("url")) + allErrs = append(allErrs, urlErrs...) + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.CA, false)...) + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go new file mode 100644 index 0000000000000..9ce73cdc731ee --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_gitlab_test.go @@ -0,0 +1,104 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func gitlabIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeGitLab, + GitLab: &configv1.GitLabIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-gitlab-secret"}, + URL: "https://thisgitlabinstancerighthere.com", + CA: configv1.ConfigMapNameReference{Name: "letsencrypt-for-gitlab.instance"}, + }, + } +} + +func TestValidateGitLabIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GitLabIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "insecure URL", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "http://anyonecanseemenow.com", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "http://anyonecanseemenow.com", "must use https scheme"), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + URL: "https://privategitlab.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "invalid CA ref name", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "https://anyonecanseemenow.com", + CA: configv1.ConfigMapNameReference{Name: "veryBadRefName?:("}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "veryBadRefName?:(", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "minimal passing case", + args: args{ + provider: &configv1.GitLabIdentityProvider{ + ClientID: "hereBeMyId", + ClientSecret: configv1.SecretNameReference{Name: "gitlab-client-sec"}, + URL: "https://anyonecanseemenow.com", + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated case", + args: args{ + provider: gitlabIDP().GitLab, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateGitLabIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateGitLabIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go new file mode 100644 index 0000000000000..481b162cf756b --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google.go @@ -0,0 +1,23 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateGoogleIdentityProvider(provider *configv1.GoogleIdentityProvider, mappingMethod configv1.MappingMethodType, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + if len(provider.HostedDomain) == 0 && mappingMethod != configv1.MappingMethodLookup { + errs = append(errs, field.Invalid(fieldPath.Child("hostedDomain"), nil, "hostedDomain must be specified unless lookup is used")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go new file mode 100644 index 0000000000000..88306d0f1919f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_google_test.go @@ -0,0 +1,90 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func googleIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeGoogle, + Google: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + HostedDomain: "myprivategoogledomain.com", + }, + } +} + +func TestValidateGoogleIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.GoogleIdentityProvider + mappingMethod configv1.MappingMethodType + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + HostedDomain: "myprivategoogledomain.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "no hosted domain with mapping method != 'lookup'", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + }, + mappingMethod: configv1.MappingMethodClaim, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("hostedDomain"), nil, "hostedDomain must be specified unless lookup is used"), + }, + }, + { + name: "no hosted domain with mapping method == 'lookup'", + args: args{ + provider: &configv1.GoogleIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-google-secret"}, + }, + mappingMethod: configv1.MappingMethodLookup, + }, + want: field.ErrorList{}, + }, + { + name: "working example", + args: args{ + provider: googleIDP().Google, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateGoogleIdentityProvider(tt.args.provider, tt.args.mappingMethod, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateGoogleIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go new file mode 100644 index 0000000000000..2cfb664c300d1 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp.go @@ -0,0 +1,215 @@ +package oauth + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + pointerutil "k8s.io/utils/pointer" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const ( + // MinimumInactivityTimeoutSeconds defines the the smallest value allowed + // for AccessTokenInactivityTimeoutSeconds. + // It also defines the ticker interval for the token update routine as + // MinimumInactivityTimeoutSeconds / 3 is used there. + MinimumInactivityTimeoutSeconds = 5 * 60 +) + +var validMappingMethods = sets.NewString( + string(configv1.MappingMethodLookup), + string(configv1.MappingMethodClaim), + string(configv1.MappingMethodAdd), +) + +func validateOAuthSpec(spec configv1.OAuthSpec) field.ErrorList { + errs := field.ErrorList{} + specPath := field.NewPath("spec") + + providerNames := sets.NewString() + + challengeIssuingIdentityProviders := []string{} + challengeRedirectingIdentityProviders := []string{} + + // TODO move to ValidateIdentityProviders (plural) + for i, identityProvider := range spec.IdentityProviders { + if isUsedAsChallenger(identityProvider.IdentityProviderConfig) { + // TODO fix CAO to properly let you use request header and other challengers by disabling the other ones on CLI + // RequestHeaderIdentityProvider is special, it can only react to challenge clients by redirecting them + // Make sure we don't have more than a single redirector, and don't have a mix of challenge issuers and redirectors + if identityProvider.Type == configv1.IdentityProviderTypeRequestHeader { + challengeRedirectingIdentityProviders = append(challengeRedirectingIdentityProviders, identityProvider.Name) + } else { + challengeIssuingIdentityProviders = append(challengeIssuingIdentityProviders, identityProvider.Name) + } + } + + identityProviderPath := specPath.Child("identityProviders").Index(i) + errs = append(errs, ValidateIdentityProvider(identityProvider, identityProviderPath)...) + + if len(identityProvider.Name) > 0 { + if providerNames.Has(identityProvider.Name) { + errs = append(errs, field.Invalid(identityProviderPath.Child("name"), identityProvider.Name, "must have a unique name")) + } + providerNames.Insert(identityProvider.Name) + } + } + + if len(challengeRedirectingIdentityProviders) > 1 { + errs = append(errs, field.Invalid(specPath.Child("identityProviders"), "", fmt.Sprintf("only one identity provider can redirect clients requesting an authentication challenge, found: %v", strings.Join(challengeRedirectingIdentityProviders, ", ")))) + } + if len(challengeRedirectingIdentityProviders) > 0 && len(challengeIssuingIdentityProviders) > 0 { + errs = append(errs, field.Invalid(specPath.Child("identityProviders"), "", fmt.Sprintf( + "cannot mix providers that redirect clients requesting auth challenges (%s) with providers issuing challenges to those clients (%s)", + strings.Join(challengeRedirectingIdentityProviders, ", "), + strings.Join(challengeIssuingIdentityProviders, ", "), + ))) + } + + // TODO move to ValidateTokenConfig + timeout := spec.TokenConfig.AccessTokenInactivityTimeoutSeconds + if timeout > 0 && timeout < MinimumInactivityTimeoutSeconds { + errs = append(errs, field.Invalid( + specPath.Child("tokenConfig", "accessTokenInactivityTimeoutSeconds"), timeout, + fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", + MinimumInactivityTimeoutSeconds))) + } + + if tokenMaxAge := spec.TokenConfig.AccessTokenMaxAgeSeconds; tokenMaxAge < 0 { + errs = append(errs, field.Invalid(specPath.Child("tokenConfig", "accessTokenMaxAgeSeconds"), tokenMaxAge, "must be a positive integer or 0")) + } + + // TODO move to ValidateTemplates + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "login"), spec.Templates.Login, false)...) + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "providerSelection"), spec.Templates.ProviderSelection, false)...) + errs = append(errs, crvalidation.ValidateSecretReference(specPath.Child("templates", "error"), spec.Templates.Error, false)...) + + return errs +} + +// if you change this, update the peer in user validation. also, don't change this. +func validateIdentityProviderName(name string) []string { + if reasons := path.ValidatePathSegmentName(name, false); len(reasons) != 0 { + return reasons + } + + if strings.Contains(name, ":") { + return []string{`may not contain ":"`} + } + return nil +} + +func ValidateIdentityProvider(identityProvider configv1.IdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + if len(identityProvider.Name) == 0 { + errs = append(errs, field.Required(fldPath.Child("name"), "")) + } else if reasons := validateIdentityProviderName(identityProvider.Name); len(reasons) != 0 { + errs = append(errs, field.Invalid(fldPath.Child("name"), identityProvider.Name, strings.Join(reasons, ", "))) + } + + if len(identityProvider.MappingMethod) > 0 && !validMappingMethods.Has(string(identityProvider.MappingMethod)) { + errs = append(errs, field.NotSupported(fldPath.Child("mappingMethod"), identityProvider.MappingMethod, validMappingMethods.List())) + } + + provider := identityProvider.IdentityProviderConfig + // create a copy of the provider to simplify checking that only one IdPs is set + providerCopy := provider.DeepCopy() + switch provider.Type { + case "": + errs = append(errs, field.Required(fldPath.Child("type"), "")) + + case configv1.IdentityProviderTypeRequestHeader: + errs = append(errs, ValidateRequestHeaderIdentityProvider(provider.RequestHeader, fldPath)...) + providerCopy.RequestHeader = nil + + case configv1.IdentityProviderTypeBasicAuth: + // TODO move to ValidateBasicAuthIdentityProvider for consistency + if provider.BasicAuth == nil { + errs = append(errs, field.Required(fldPath.Child("basicAuth"), "")) + } else { + errs = append(errs, ValidateRemoteConnectionInfo(provider.BasicAuth.OAuthRemoteConnectionInfo, fldPath.Child("basicAuth"))...) + } + providerCopy.BasicAuth = nil + + case configv1.IdentityProviderTypeHTPasswd: + // TODO move to ValidateHTPasswdIdentityProvider for consistency + if provider.HTPasswd == nil { + errs = append(errs, field.Required(fldPath.Child("htpasswd"), "")) + } else { + errs = append(errs, crvalidation.ValidateSecretReference(fldPath.Child("htpasswd", "fileData"), provider.HTPasswd.FileData, true)...) + } + providerCopy.HTPasswd = nil + + case configv1.IdentityProviderTypeLDAP: + errs = append(errs, ValidateLDAPIdentityProvider(provider.LDAP, fldPath.Child("ldap"))...) + providerCopy.LDAP = nil + + case configv1.IdentityProviderTypeKeystone: + errs = append(errs, ValidateKeystoneIdentityProvider(provider.Keystone, fldPath.Child("keystone"))...) + providerCopy.Keystone = nil + + case configv1.IdentityProviderTypeGitHub: + errs = append(errs, ValidateGitHubIdentityProvider(provider.GitHub, identityProvider.MappingMethod, fldPath.Child("github"))...) + providerCopy.GitHub = nil + + case configv1.IdentityProviderTypeGitLab: + errs = append(errs, ValidateGitLabIdentityProvider(provider.GitLab, fldPath.Child("gitlab"))...) + providerCopy.GitLab = nil + + case configv1.IdentityProviderTypeGoogle: + errs = append(errs, ValidateGoogleIdentityProvider(provider.Google, identityProvider.MappingMethod, fldPath.Child("google"))...) + providerCopy.Google = nil + + case configv1.IdentityProviderTypeOpenID: + errs = append(errs, ValidateOpenIDIdentityProvider(provider.OpenID, fldPath.Child("openID"))...) + providerCopy.OpenID = nil + + default: + errs = append(errs, field.Invalid(fldPath.Child("type"), identityProvider.Type, "not a valid provider type")) + } + + if !pointerutil.AllPtrFieldsNil(providerCopy) { + errs = append(errs, field.Invalid(fldPath, identityProvider.IdentityProviderConfig, "only one identity provider can be configured in single object")) + } + + return errs +} + +func ValidateOAuthIdentityProvider(clientID string, clientSecretRef configv1.SecretNameReference, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(clientID) == 0 { + allErrs = append(allErrs, field.Required(fieldPath.Child("clientID"), "")) + } + + allErrs = append(allErrs, crvalidation.ValidateSecretReference(fieldPath.Child("clientSecret"), clientSecretRef, true)...) + + return allErrs +} + +func isUsedAsChallenger(idp configv1.IdentityProviderConfig) bool { + // TODO this is wrong and needs to be more dynamic... + switch idp.Type { + // whitelist all the IdPs that we set `UseAsChallenger: true` in cluster-authentication-operator + case configv1.IdentityProviderTypeBasicAuth, configv1.IdentityProviderTypeGitLab, + configv1.IdentityProviderTypeHTPasswd, configv1.IdentityProviderTypeKeystone, + configv1.IdentityProviderTypeLDAP, + // guard open ID for now because it *could* have challenge in the future + configv1.IdentityProviderTypeOpenID: + return true + case configv1.IdentityProviderTypeRequestHeader: + if idp.RequestHeader == nil { + // this is an error reported elsewhere + return false + } + return len(idp.RequestHeader.ChallengeURL) > 0 + default: + return false + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go new file mode 100644 index 0000000000000..a91419e96f8e6 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_idp_test.go @@ -0,0 +1,421 @@ +package oauth + +import ( + "fmt" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +const wrongConfigMapSecretErrMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')" + +func htpasswdIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeHTPasswd, + HTPasswd: &configv1.HTPasswdIdentityProvider{ + FileData: configv1.SecretNameReference{ + Name: "innocent.llama", + }, + }, + } +} + +func TestValidateOAuthSpec(t *testing.T) { + doubledIdPs := configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeHTPasswd, + HTPasswd: &configv1.HTPasswdIdentityProvider{ + FileData: configv1.SecretNameReference{ + Name: "innocent.llama", + }, + }, + GitLab: &configv1.GitLabIdentityProvider{ + ClientID: "masterOfInstances", + ClientSecret: configv1.SecretNameReference{Name: "secret-gitlab-secret"}, + URL: "https://thisgitlabinstancerighthere.com", + CA: configv1.ConfigMapNameReference{Name: "letsencrypt-for-gitlab.instance"}, + }, + } + + type args struct { + spec configv1.OAuthSpec + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty object", + args: args{ + spec: configv1.OAuthSpec{}, + }, + }, + { + name: "more than one challenge issuing IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "htpasswd", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "ldap", + IdentityProviderConfig: ldapIDP(), + }, + }, + }, + }, + }, + { + name: "more than one challenge redirecting IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "sso1", + IdentityProviderConfig: requestHeaderIDP(true, true), + }, + { + Name: "sso2", + IdentityProviderConfig: requestHeaderIDP(true, false), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders"), "", "only one identity provider can redirect clients requesting an authentication challenge, found: sso1, sso2"), + }, + }, + { + name: "mixing challenge issuing and redirecting IdPs", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "sso", + IdentityProviderConfig: requestHeaderIDP(true, false), + }, + { + Name: "ldap", + IdentityProviderConfig: ldapIDP(), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders"), "", "cannot mix providers that redirect clients requesting auth challenges (sso) with providers issuing challenges to those clients (ldap)"), + }, + }, + { + name: "two IdPs with the same name", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "aname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "bname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "aname", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "cname", + IdentityProviderConfig: htpasswdIDP(), + }, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders").Index(2).Child("name"), "aname", "must have a unique name"), + }, + }, + { + name: "negative token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeoutSeconds: -50, + }, + }, + }, + }, + { + name: "positive token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeoutSeconds: 32578, + }, + }, + }, + }, + { + name: "zero token inactivity timeout", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeoutSeconds: 0, + }, + }, + }, + }, + { + name: "token inactivity timeout lower than the api constant minimum", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeoutSeconds: 250, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenInactivityTimeoutSeconds"), 250, fmt.Sprintf("the minimum acceptable token timeout value is %d seconds", MinimumInactivityTimeoutSeconds)), + }, + }, + { + name: "negative token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: -20, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "tokenConfig", "accessTokenMaxAgeSeconds"), -20, "must be a positive integer or 0"), + }, + }, + { + name: "positive token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 213123, + }, + }, + }, + }, + { + name: "zero token max age", + args: args{ + spec: configv1.OAuthSpec{ + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 0, + }, + }, + }, + }, + { + name: "template names all messed up", + args: args{ + spec: configv1.OAuthSpec{ + Templates: configv1.OAuthTemplates{ + Login: configv1.SecretNameReference{Name: "/this/is/wrong.html"}, + ProviderSelection: configv1.SecretNameReference{Name: "also_wrong"}, + Error: configv1.SecretNameReference{Name: "the&very+woRst"}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "templates", "login", "name"), "/this/is/wrong.html", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("spec", "templates", "providerSelection", "name"), "also_wrong", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("spec", "templates", "error", "name"), "the&very+woRst", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "everything set properly", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "some_httpasswd", + IdentityProviderConfig: htpasswdIDP(), + }, + { + Name: "sso", + IdentityProviderConfig: requestHeaderIDP(false, true), + }, + }, + TokenConfig: configv1.TokenConfig{ + AccessTokenInactivityTimeoutSeconds: -1, + AccessTokenMaxAgeSeconds: 216000, + }, + Templates: configv1.OAuthTemplates{ + Login: configv1.SecretNameReference{Name: "my-login-template"}, + ProviderSelection: configv1.SecretNameReference{Name: "provider-selection.template"}, + Error: configv1.SecretNameReference{Name: "a.template-with-error"}, + }, + }, + }, + }, + { + name: "two different IdPs in one object", + args: args{ + spec: configv1.OAuthSpec{ + IdentityProviders: []configv1.IdentityProvider{ + { + Name: "bad_bad_config", + IdentityProviderConfig: doubledIdPs, + }, + }, + TokenConfig: configv1.TokenConfig{ + AccessTokenMaxAgeSeconds: 216000, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("spec", "identityProviders").Index(0), doubledIdPs, "only one identity provider can be configured in single object"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := validateOAuthSpec(tt.args.spec) + + // DeepEqual does not seem to be working well here + var failedCheck bool + if len(got) != len(tt.want) { + failedCheck = true + } else { + // Check all the errors + for i := range got { + if got[i].Error() != tt.want[i].Error() { + failedCheck = true + break + } + } + } + + if failedCheck { + t.Errorf("validateOAuthSpec() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestValidateIdentityProvider(t *testing.T) { + type args struct { + identityProvider configv1.IdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty provider needs at least name and type in provider", + args: args{ + identityProvider: configv1.IdentityProvider{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("name"), ""), + field.Required(field.NewPath("type"), ""), + }, + }, + { + name: "unknown type name", + args: args{ + identityProvider: configv1.IdentityProvider{ + Name: "providingProvider", + IdentityProviderConfig: configv1.IdentityProviderConfig{ + Type: "someText", + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("type"), "someText", "not a valid provider type"), + }, + }, + { + name: "basic provider", + args: args{ + identityProvider: configv1.IdentityProvider{ + Name: "providingProvider", + IdentityProviderConfig: htpasswdIDP(), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ValidateIdentityProvider(tt.args.identityProvider, tt.args.fldPath) + // DeepEqual does not seem to be working well here + var failedCheck bool + if len(got) != len(tt.want) { + failedCheck = true + } else { + // Check all the errors + for i := range got { + if got[i].Error() != tt.want[i].Error() { + failedCheck = true + break + } + } + } + + if failedCheck { + t.Errorf("ValidateIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestValidateOAuthIdentityProvider(t *testing.T) { + type args struct { + clientID string + clientSecretRef configv1.SecretNameReference + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "empty client ID and secret ref", + args: args{ + clientID: "", + clientSecretRef: configv1.SecretNameReference{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "improper client secret refname", + args: args{ + clientID: "thisBeClient", + clientSecretRef: configv1.SecretNameReference{Name: "terribleName_forASecret"}, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("clientSecret", "name"), "terribleName_forASecret", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "working example", + args: args{ + clientID: "thisBeClient", + clientSecretRef: configv1.SecretNameReference{Name: "client-secret-hideout"}, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateOAuthIdentityProvider(tt.args.clientID, tt.args.clientSecretRef, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateOAuthIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go new file mode 100644 index 0000000000000..e1bf7cb76aed2 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone.go @@ -0,0 +1,23 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" +) + +func ValidateKeystoneIdentityProvider(provider *configv1.KeystoneIdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fldPath, "")) + return errs + } + + errs = append(errs, ValidateRemoteConnectionInfo(provider.OAuthRemoteConnectionInfo, fldPath)...) + + if len(provider.DomainName) == 0 { + errs = append(errs, field.Required(field.NewPath("domainName"), "")) + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go new file mode 100644 index 0000000000000..6ccdddb7b9ebf --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_keystone_test.go @@ -0,0 +1,96 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func keystoneIdP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeKeystone, + Keystone: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "https://somewhere.over.rainbow/ks", + CA: configv1.ConfigMapNameReference{Name: "govt-ca"}, + }, + DomainName: "production", + }, + } +} + +func TestValidateKeystoneIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.KeystoneIdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "empty url", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "", + }, + DomainName: "production", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("url"), ""), + }, + }, + { + name: "http url", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "http://foo", + }, + DomainName: "production", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "http://foo", "must use https scheme"), + }, + }, + { + name: "missing domain name", + args: args{ + provider: &configv1.KeystoneIdentityProvider{ + OAuthRemoteConnectionInfo: configv1.OAuthRemoteConnectionInfo{ + URL: "https://keystone.openstack.nasa.gov/", + }, + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("domainName"), ""), + }, + }, + { + name: "working provider", + args: args{ + provider: keystoneIdP().Keystone, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateKeystoneIdentityProvider(tt.args.provider, tt.args.fldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateKeystoneIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go new file mode 100644 index 0000000000000..b5f40060b9cc9 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap.go @@ -0,0 +1,66 @@ +package oauth + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/security/ldaputil" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateLDAPIdentityProvider(provider *configv1.LDAPIdentityProvider, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + if provider == nil { + errs = append(errs, field.Required(fldPath, "")) + return errs + } + + errs = append(errs, validateLDAPClientConfig(provider.URL, provider.BindDN, provider.BindPassword.Name, provider.CA.Name, provider.Insecure, fldPath)...) + errs = append(errs, crvalidation.ValidateSecretReference(fldPath.Child("bindPassword"), provider.BindPassword, false)...) + errs = append(errs, crvalidation.ValidateConfigMapReference(fldPath.Child("ca"), provider.CA, false)...) + + // At least one attribute to use as the user id is required + if len(provider.Attributes.ID) == 0 { + errs = append(errs, field.Invalid(fldPath.Child("attributes", "id"), "[]", "at least one id attribute is required (LDAP standard identity attribute is 'dn')")) + } + + return errs +} + +// TODO clean this up +func validateLDAPClientConfig(url, bindDN, bindPasswordRef, CA string, insecure bool, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + + // Make sure bindDN and bindPassword are both set, or both unset + // Both unset means an anonymous bind is used for search (https://tools.ietf.org/html/rfc4513#section-5.1.1) + // Both set means the name/password simple bind is used for search (https://tools.ietf.org/html/rfc4513#section-5.1.3) + if (len(bindDN) == 0) != (len(bindPasswordRef) == 0) { + errs = append(errs, field.Invalid(fldPath.Child("bindDN"), bindDN, "bindDN and bindPassword must both be specified, or both be empty")) + errs = append(errs, field.Invalid(fldPath.Child("bindPassword").Child("name"), bindPasswordRef, "bindDN and bindPassword must both be specified, or both be empty")) + } + + if len(url) == 0 { + errs = append(errs, field.Required(fldPath.Child("url"), "")) + return errs + } + + u, err := ldaputil.ParseURL(url) + if err != nil { + errs = append(errs, field.Invalid(fldPath.Child("url"), url, err.Error())) + return errs + } + + if insecure { + if u.Scheme == ldaputil.SchemeLDAPS { + errs = append(errs, field.Invalid(fldPath.Child("url"), url, fmt.Sprintf("Cannot use %s scheme with insecure=true", u.Scheme))) + } + if len(CA) > 0 { + errs = append(errs, field.Invalid(fldPath.Child("ca"), CA, "Cannot specify a ca with insecure=true")) + } + } + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go new file mode 100644 index 0000000000000..85daa9e182541 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_ldap_test.go @@ -0,0 +1,101 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func ldapIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeLDAP, + LDAP: &configv1.LDAPIdentityProvider{ + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"memberUid"}, + }, + BindDN: "uid=readallaccount,ou=privileged,dc=coolpeople,dc=se", + BindPassword: configv1.SecretNameReference{ + Name: "ldap-secret", + }, + CA: configv1.ConfigMapNameReference{Name: "ldap-ca-configmap"}, + Insecure: false, + URL: "ldaps://ldapinstance.corporate.coolpeople.se/ou=Groups,dc=coolpeople,dc=se?memberUid?sub", + }, + } +} + +func TestValidateLDAPIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.LDAPIdentityProvider + fldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "invalid bindPassword ref name, missing ID", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + BindPassword: configv1.SecretNameReference{Name: "bad_refname"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("bindDN"), "", "bindDN and bindPassword must both be specified, or both be empty"), + field.Invalid(field.NewPath("bindPassword", "name"), "bad_refname", "bindDN and bindPassword must both be specified, or both be empty"), + field.Required(field.NewPath("url"), ""), + field.Invalid(field.NewPath("bindPassword", "name"), "bad_refname", wrongConfigMapSecretErrMsg), + field.Invalid(field.NewPath("attributes", "id"), "[]", "at least one id attribute is required (LDAP standard identity attribute is 'dn')"), + }, + }, + { + name: "invalid url", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + URL: "https://foo", + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"uid"}, + }, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("url"), "https://foo", `invalid scheme "https"`), + }, + }, + { + name: "minimal passing provider", + args: args{ + provider: &configv1.LDAPIdentityProvider{ + URL: "ldap://foo", + Attributes: configv1.LDAPAttributeMapping{ + ID: []string{"uid"}, + }, + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: ldapIDP().LDAP, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateLDAPIdentityProvider(tt.args.provider, tt.args.fldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateLDAPIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go new file mode 100644 index 0000000000000..5b8a146a6733f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_oauth.go @@ -0,0 +1,110 @@ +package oauth + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateOAuth" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return crvalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.GroupVersion.WithResource("oauths").GroupResource(): true, + }, + map[schema.GroupVersionKind]crvalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("OAuth"): oauthV1{}, + }) + }) +} + +func toOAuthV1(uncastObj runtime.Object) (*configv1.OAuth, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + errs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.OAuth) + if !ok { + return nil, append(errs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"OAuth"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type oauthV1 struct{} + +func (oauthV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, crvalidation.RequireNameCluster, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthSpecCreate(obj.Spec)...) + + return errs +} + +func (oauthV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toOAuthV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthSpecUpdate(obj.Spec, oldObj.Spec)...) + + return errs +} + +func (oauthV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toOAuthV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toOAuthV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + errs = append(errs, validateOAuthStatus(obj.Status)...) + + return errs +} + +func validateOAuthSpecCreate(spec configv1.OAuthSpec) field.ErrorList { + return validateOAuthSpec(spec) +} + +func validateOAuthSpecUpdate(newspec, oldspec configv1.OAuthSpec) field.ErrorList { + return validateOAuthSpec(newspec) +} + +func validateOAuthStatus(status configv1.OAuthStatus) field.ErrorList { + errs := field.ErrorList{} + + // TODO + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go new file mode 100644 index 0000000000000..41d8c35db3f91 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid.go @@ -0,0 +1,54 @@ +package oauth + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +func ValidateOpenIDIdentityProvider(provider *configv1.OpenIDIdentityProvider, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if provider == nil { + allErrs = append(allErrs, field.Required(fieldPath, "")) + return allErrs + } + + allErrs = append(allErrs, ValidateOAuthIdentityProvider(provider.ClientID, provider.ClientSecret, fieldPath)...) + + if provider.Issuer != strings.TrimRight(provider.Issuer, "/") { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("issuer"), provider.Issuer, "cannot end with '/'")) + } + + // The specs are a bit ambiguous on whether this must or needn't be https:// + // schema, but they do require (MUST) TLS support for the discovery and we do + // require this in out API description + // https://openid.net/specs/openid-connect-discovery-1_0.html#TLSRequirements + url, issuerErrs := validation.ValidateSecureURL(provider.Issuer, fieldPath.Child("issuer")) + allErrs = append(allErrs, issuerErrs...) + if len(url.RawQuery) > 0 || len(url.Fragment) > 0 { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("issuer"), provider.Issuer, "must not specify query or fragment component")) + } + + allErrs = append(allErrs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.CA, false)...) + + for i, scope := range provider.ExtraScopes { + // https://tools.ietf.org/html/rfc6749#section-3.3 (full list of allowed chars is %x21 / %x23-5B / %x5D-7E) + // for those without an ascii table, that's `!`, `#-[`, `]-~` inclusive. + for _, ch := range scope { + switch { + case ch == '!': + case ch >= '#' && ch <= '[': + case ch >= ']' && ch <= '~': + default: + allErrs = append(allErrs, field.Invalid(fieldPath.Child("extraScopes").Index(i), scope, fmt.Sprintf("cannot contain %v", ch))) + } + } + } + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go new file mode 100644 index 0000000000000..2c243bcccaa47 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_openid_test.go @@ -0,0 +1,125 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func openidIDP() configv1.IdentityProviderConfig { + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeOpenID, + OpenID: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + CA: configv1.ConfigMapNameReference{Name: "oidc-ca"}, + ExtraScopes: []string{"email", "profile"}, + ExtraAuthorizeParameters: map[string]string{ + "include_granted_scopes": "true", + }, + Claims: configv1.OpenIDClaims{ + PreferredUsername: []string{"full_name", "email"}, + Email: []string{"email"}, + }, + }, + } +} + +func TestValidateOpenIDIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.OpenIDIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "missing client ID and secret", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + Issuer: "https://bigcorp.oidc.com", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("clientID"), ""), + field.Required(field.NewPath("clientSecret", "name"), ""), + }, + }, + { + name: "missing issuer", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("issuer"), "", "must contain a scheme (e.g. https://)"), + field.Invalid(field.NewPath("issuer"), "", "must contain a host"), + }, + }, + { + name: "issuer with http:// scheme", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "gentleDolphin", + ClientSecret: configv1.SecretNameReference{Name: "seemsliggit"}, + Issuer: "http://oidc-friendly.domain.com", + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("issuer"), "http://oidc-friendly.domain.com", "must use https scheme"), + }, + }, + { + name: "bad CA refname", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + CA: configv1.ConfigMapNameReference{Name: "the_Nameofaca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "the_Nameofaca", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "minimal working example", + args: args{ + provider: &configv1.OpenIDIdentityProvider{ + ClientID: "readallPerson", + ClientSecret: configv1.SecretNameReference{Name: "oidc-secret"}, + Issuer: "https://oidc-friendly.domain.com", + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: openidIDP().OpenID, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateOpenIDIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateOpenIDIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go new file mode 100644 index 0000000000000..93b7c5844cd4f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader.go @@ -0,0 +1,85 @@ +package oauth + +import ( + "fmt" + "net/url" + "path" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/validation" + crvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const ( + // URLToken in the query of the redirectURL gets replaced with the original request URL, escaped as a query parameter. + // Example use: https://www.example.com/login?then=${url} + urlToken = "${url}" + + // QueryToken in the query of the redirectURL gets replaced with the original request URL, unescaped. + // Example use: https://www.example.com/sso/oauth/authorize?${query} + queryToken = "${query}" +) + +func ValidateRequestHeaderIdentityProvider(provider *configv1.RequestHeaderIdentityProvider, fieldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if provider == nil { + errs = append(errs, field.Required(fieldPath, "")) + return errs + } + + errs = append(errs, crvalidation.ValidateConfigMapReference(fieldPath.Child("ca"), provider.ClientCA, true)...) + + if len(provider.Headers) == 0 { + errs = append(errs, field.Required(fieldPath.Child("headers"), "")) + } + + if len(provider.ChallengeURL) == 0 && len(provider.LoginURL) == 0 { + errs = append(errs, field.Required(fieldPath, "at least one of challengeURL or loginURL must be specified")) + } + + if len(provider.ChallengeURL) > 0 { + u, urlErrs := validation.ValidateURL(provider.ChallengeURL, fieldPath.Child("challengeURL")) + errs = append(errs, urlErrs...) + if len(urlErrs) == 0 { + if !hasParamToken(u) { + errs = append(errs, + field.Invalid(field.NewPath("challengeURL"), provider.ChallengeURL, + fmt.Sprintf("query does not include %q or %q, redirect will not preserve original authorize parameters", urlToken, queryToken)), + ) + } + } + } + + if len(provider.LoginURL) > 0 { + u, urlErrs := validation.ValidateURL(provider.LoginURL, fieldPath.Child("loginURL")) + errs = append(errs, urlErrs...) + if len(urlErrs) == 0 { + if !hasParamToken(u) { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, + fmt.Sprintf("query does not include %q or %q, redirect will not preserve original authorize parameters", urlToken, queryToken), + ), + ) + } + if strings.HasSuffix(u.Path, "/") { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, `path ends with "/", grant approval flows will not function correctly`), + ) + } + if _, file := path.Split(u.Path); file != "authorize" { + errs = append(errs, + field.Invalid(fieldPath.Child("loginURL"), provider.LoginURL, `path does not end with "/authorize", grant approval flows will not function correctly`), + ) + } + } + } + + return errs +} + +func hasParamToken(u *url.URL) bool { + return strings.Contains(u.RawQuery, urlToken) || strings.Contains(u.RawQuery, queryToken) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go new file mode 100644 index 0000000000000..44e590f0b2b5e --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/oauth/validate_requestheader_test.go @@ -0,0 +1,193 @@ +package oauth + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func requestHeaderIDP(challenge, login bool) configv1.IdentityProviderConfig { + var challengeURL, loginURL string + + if challenge { + challengeURL = "https://sso.corporate.coolpeople.se/challenges/oauth/authorize?${query}" + } + if login { + loginURL = "https://sso.corporate.coolpeople.se/loginz/oauth/authorize?${query}" + } + + return configv1.IdentityProviderConfig{ + Type: configv1.IdentityProviderTypeRequestHeader, + RequestHeader: &configv1.RequestHeaderIdentityProvider{ + LoginURL: loginURL, + ChallengeURL: challengeURL, + ClientCA: configv1.ConfigMapNameReference{ + Name: "coolpeople-client-ca", + }, + ClientCommonNames: []string{"authn-proxy"}, + Headers: []string{"X-Remote-User", "SSO-User"}, + NameHeaders: []string{"X-Remote-User-Display-Name"}, + }, + } +} + +func TestValidateRequestHeaderIdentityProvider(t *testing.T) { + type args struct { + provider *configv1.RequestHeaderIdentityProvider + fieldPath *field.Path + } + tests := []struct { + name string + args args + want field.ErrorList + }{ + { + name: "nil input provider", + want: field.ErrorList{ + field.Required(nil, ""), + }, + }, + { + name: "empty provider", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{}, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Required(field.NewPath("headers"), ""), + {Type: field.ErrorTypeRequired, Field: "", BadValue: "", Detail: "at least one of challengeURL or loginURL must be specified"}, + }, + }, + { + name: "wrong ca refname", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + LoginURL: "http://oauth.coolpeoplecorp.com/login/authorize?${query}", + Headers: []string{"X-Remote-User"}, + ClientCA: configv1.ConfigMapNameReference{Name: "dat_badrefname"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("ca", "name"), "dat_badrefname", wrongConfigMapSecretErrMsg), + }, + }, + { + name: "challenge url without query, no client CA set", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Invalid(field.NewPath("challengeURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "challenge url with query - no ${url}, ${query}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint?${sender}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("challengeURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint?${sender}", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "challenge url with query - ${url}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + ChallengeURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint?${url}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{}, + }, + { + name: "login url without query and authorize", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/challenge-endpoint", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/challenge-endpoint", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - no ${url}, ${query} - no client CA set", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${custom}", + }, + }, + want: field.ErrorList{ + field.Required(field.NewPath("ca", "name"), ""), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${custom}", "query does not include \"${url}\" or \"${query}\", redirect will not preserve original authorize parameters"), + }, + }, + { + name: "login url with query - ${query} - no /authorize", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint?${query}", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - ${query} - ends with /", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{ + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", "path ends with \"/\", grant approval flows will not function correctly"), + field.Invalid(field.NewPath("loginURL"), "http://oauth.coolpeoplecorp.com/login-endpoint/authorize/?${query}", "path does not end with \"/authorize\", grant approval flows will not function correctly"), + }, + }, + { + name: "login url with query - ${query}", + args: args{ + provider: &configv1.RequestHeaderIdentityProvider{ + Headers: []string{"X-Remote-User"}, + LoginURL: "http://oauth.coolpeoplecorp.com/login-endpoint/authorize?${query}", + ClientCA: configv1.ConfigMapNameReference{Name: "auth-ca"}, + }, + }, + want: field.ErrorList{}, + }, + { + name: "more complicated use", + args: args{ + provider: requestHeaderIDP(true, true).RequestHeader, + }, + want: field.ErrorList{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ValidateRequestHeaderIdentityProvider(tt.args.provider, tt.args.fieldPath); !reflect.DeepEqual(got, tt.want) { + t.Errorf("ValidateRequestHeaderIdentityProvider() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go b/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go new file mode 100644 index 0000000000000..06ff8d967a1f8 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/project/validate_project.go @@ -0,0 +1,111 @@ +package project + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + validationutil "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateProject" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("projects"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Project"): projectV1{}, + }) + }) +} + +func toProjectV1(uncastObj runtime.Object) (*configv1.Project, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Project) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Project"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type projectV1 struct { +} + +func validateProjectSpec(spec configv1.ProjectSpec) field.ErrorList { + allErrs := field.ErrorList{} + + if len(spec.ProjectRequestMessage) > 4096 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.projectRequestMessage"), spec, validationutil.MaxLenError(4096))) + } + + if name := spec.ProjectRequestTemplate.Name; len(name) > 0 { + for _, msg := range validation.NameIsDNSSubdomain(spec.ProjectRequestTemplate.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.projectRequestTemplate.name"), name, msg)) + } + } + + return allErrs +} + +func (projectV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toProjectV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateProjectSpec(obj.Spec)...) + + return allErrs +} + +func (projectV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toProjectV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toProjectV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateProjectSpec(obj.Spec)...) + + return allErrs +} + +func (projectV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toProjectV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toProjectV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go new file mode 100644 index 0000000000000..f8e04de28bdfc --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validate_rbr.go @@ -0,0 +1,83 @@ +package rolebindingrestriction + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + authorizationv1 "github.com/openshift/api/authorization/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + rbrvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation" +) + +const PluginName = "authorization.openshift.io/ValidateRoleBindingRestriction" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: authorizationv1.GroupName, Resource: "rolebindingrestrictions"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + authorizationv1.GroupVersion.WithKind("RoleBindingRestriction"): roleBindingRestrictionV1{}, + }) + }) +} + +func toRoleBindingRestriction(uncastObj runtime.Object) (*authorizationv1.RoleBindingRestriction, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*authorizationv1.RoleBindingRestriction) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"RoleBindingRestriction"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{authorizationv1.GroupVersion.String()})) + } + + return obj, nil +} + +type roleBindingRestrictionV1 struct { +} + +func (roleBindingRestrictionV1) ValidateCreate(obj runtime.Object) field.ErrorList { + roleBindingRestrictionObj, errs := toRoleBindingRestriction(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&roleBindingRestrictionObj.ObjectMeta, true, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, rbrvalidation.ValidateRoleBindingRestriction(roleBindingRestrictionObj)...) + + return errs +} + +func (roleBindingRestrictionV1) ValidateUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + roleBindingRestrictionObj, errs := toRoleBindingRestriction(obj) + if len(errs) > 0 { + return errs + } + roleBindingRestrictionOldObj, errs := toRoleBindingRestriction(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, validation.ValidateObjectMeta(&roleBindingRestrictionObj.ObjectMeta, true, validation.NameIsDNSSubdomain, field.NewPath("metadata"))...) + errs = append(errs, rbrvalidation.ValidateRoleBindingRestrictionUpdate(roleBindingRestrictionObj, roleBindingRestrictionOldObj)...) + + return errs +} + +func (r roleBindingRestrictionV1) ValidateStatusUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return r.ValidateUpdate(obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go new file mode 100644 index 0000000000000..e6ee1a40b28d3 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/rolebindingrestriction/validation/validation.go @@ -0,0 +1,113 @@ +package validation + +import ( + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core/validation" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +func ValidateRoleBindingRestriction(rbr *authorizationv1.RoleBindingRestriction) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&rbr.ObjectMeta, true, + apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata")) + + allErrs = append(allErrs, + ValidateRoleBindingRestrictionSpec(&rbr.Spec, field.NewPath("spec"))...) + + return allErrs +} + +func ValidateRoleBindingRestrictionUpdate(rbr, old *authorizationv1.RoleBindingRestriction) field.ErrorList { + allErrs := ValidateRoleBindingRestriction(rbr) + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&rbr.ObjectMeta, + &old.ObjectMeta, field.NewPath("metadata"))...) + + return allErrs +} + +func ValidateRoleBindingRestrictionSpec(spec *authorizationv1.RoleBindingRestrictionSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify exactly one of userrestriction, grouprestriction, or serviceaccountrestriction` + + if spec.UserRestriction != nil { + if spec.GroupRestriction != nil { + allErrs = append(allErrs, field.Invalid(fld.Child("grouprestriction"), + "both userrestriction and grouprestriction specified", invalidMsg)) + } + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, + field.Invalid(fld.Child("serviceaccountrestriction"), + "both userrestriction and serviceaccountrestriction specified", invalidMsg)) + } + } else if spec.GroupRestriction != nil { + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, + field.Invalid(fld.Child("serviceaccountrestriction"), + "both grouprestriction and serviceaccountrestriction specified", invalidMsg)) + } + } else if spec.ServiceAccountRestriction == nil { + allErrs = append(allErrs, field.Required(fld.Child("userrestriction"), + invalidMsg)) + } + + if spec.UserRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionUser(spec.UserRestriction, fld.Child("userrestriction"))...) + } + if spec.GroupRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionGroup(spec.GroupRestriction, fld.Child("grouprestriction"))...) + } + if spec.ServiceAccountRestriction != nil { + allErrs = append(allErrs, ValidateRoleBindingRestrictionServiceAccount(spec.ServiceAccountRestriction, fld.Child("serviceaccountrestriction"))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionUser(user *authorizationv1.UserRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one user, group, or label selector` + + if !(len(user.Users) > 0 || len(user.Groups) > 0 || len(user.Selectors) > 0) { + allErrs = append(allErrs, field.Required(fld.Child("users"), invalidMsg)) + } + + for i, selector := range user.Selectors { + allErrs = append(allErrs, + unversionedvalidation.ValidateLabelSelector(&selector, + fld.Child("selector").Index(i))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionGroup(group *authorizationv1.GroupRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one group or label selector` + + if !(len(group.Groups) > 0 || len(group.Selectors) > 0) { + allErrs = append(allErrs, field.Required(fld.Child("groups"), invalidMsg)) + } + + for i, selector := range group.Selectors { + allErrs = append(allErrs, + unversionedvalidation.ValidateLabelSelector(&selector, + fld.Child("selector").Index(i))...) + } + + return allErrs +} + +func ValidateRoleBindingRestrictionServiceAccount(sa *authorizationv1.ServiceAccountRestriction, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + const invalidMsg = `must specify at least one service account or namespace` + + if !(len(sa.ServiceAccounts) > 0 || len(sa.Namespaces) > 0) { + allErrs = append(allErrs, + field.Required(fld.Child("serviceaccounts"), invalidMsg)) + } + + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go b/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go new file mode 100644 index 0000000000000..ec5f517663b66 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/scheduler/validate_scheduler.go @@ -0,0 +1,106 @@ +package scheduler + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + configv1 "github.com/openshift/api/config/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" +) + +const PluginName = "config.openshift.io/ValidateScheduler" + +// Register registers a plugin +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + configv1.Resource("schedulers"): true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + configv1.GroupVersion.WithKind("Scheduler"): schedulerV1{}, + }) + }) +} + +func toSchedulerV1(uncastObj runtime.Object) (*configv1.Scheduler, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + allErrs := field.ErrorList{} + + obj, ok := uncastObj.(*configv1.Scheduler) + if !ok { + return nil, append(allErrs, + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Scheduler"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) + } + + return obj, nil +} + +type schedulerV1 struct { +} + +func validateSchedulerSpec(spec configv1.SchedulerSpec) field.ErrorList { + allErrs := field.ErrorList{} + + if name := spec.Policy.Name; len(name) > 0 { + for _, msg := range validation.NameIsDNSSubdomain(spec.Policy.Name, false) { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec.Policy.name"), name, msg)) + } + } + + return allErrs +} + +func (schedulerV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { + obj, allErrs := toSchedulerV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateSchedulerSpec(obj.Spec)...) + + return allErrs +} + +func (schedulerV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, allErrs := toSchedulerV1(uncastObj) + if len(allErrs) > 0 { + return allErrs + } + oldObj, allErrs := toSchedulerV1(uncastOldObj) + if len(allErrs) > 0 { + return allErrs + } + + allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateSchedulerSpec(obj.Spec)...) + + return allErrs +} + +func (schedulerV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { + obj, errs := toSchedulerV1(uncastObj) + if len(errs) > 0 { + return errs + } + oldObj, errs := toSchedulerV1(uncastOldObj) + if len(errs) > 0 { + return errs + } + + // TODO validate the obj. remember that status validation should *never* fail on spec validation errors. + errs = append(errs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) + + return errs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go new file mode 100644 index 0000000000000..1a7193eff7c75 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc.go @@ -0,0 +1,93 @@ +package securitycontextconstraints + +import ( + "bytes" + "context" + "io" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" +) + +const DefaultingPluginName = "security.openshift.io/DefaultSecurityContextConstraints" + +func RegisterDefaulting(plugins *admission.Plugins) { + plugins.Register(DefaultingPluginName, func(config io.Reader) (admission.Interface, error) { + return NewDefaulter(), nil + }) +} + +type defaultSCC struct { + *admission.Handler + + scheme *runtime.Scheme + codecFactory runtimeserializer.CodecFactory +} + +var _ admission.MutationInterface = &defaultSCC{} + +func NewDefaulter() admission.Interface { + scheme := runtime.NewScheme() + codecFactory := runtimeserializer.NewCodecFactory(scheme) + utilruntime.Must(securityv1.Install(scheme)) + + return &defaultSCC{ + Handler: admission.NewHandler(admission.Create, admission.Update), + scheme: scheme, + codecFactory: codecFactory, + } +} + +// Admit defaults an SCC by going unstructured > external > internal > external > unstructured +func (a *defaultSCC) Admit(ctx context.Context, attributes admission.Attributes, o admission.ObjectInterfaces) error { + if a.shouldIgnore(attributes) { + return nil + } + + unstructuredOrig, ok := attributes.GetObject().(*unstructured.Unstructured) + if !ok { + return nil + } + buf := &bytes.Buffer{} + if err := unstructured.UnstructuredJSONScheme.Encode(unstructuredOrig, buf); err != nil { + return err + } + + uncastObj, err := runtime.Decode(a.codecFactory.UniversalDeserializer(), buf.Bytes()) + if err != nil { + return err + } + + outSCCExternal := uncastObj.(*securityv1.SecurityContextConstraints) + SetDefaults_SCC(outSCCExternal) + defaultedBytes, err := runtime.Encode(a.codecFactory.LegacyCodec(securityv1.GroupVersion), outSCCExternal) + if err != nil { + return err + } + outUnstructured := &unstructured.Unstructured{} + if _, _, err := unstructured.UnstructuredJSONScheme.Decode(defaultedBytes, nil, outUnstructured); err != nil { + return err + } + + unstructuredOrig.Object = outUnstructured.Object + + return nil +} + +func (a *defaultSCC) shouldIgnore(attributes admission.Attributes) bool { + if attributes.GetResource().GroupResource() != (schema.GroupResource{Group: "security.openshift.io", Resource: "securitycontextconstraints"}) { + return true + } + // if a subresource is specified, skip it + if len(attributes.GetSubresource()) > 0 { + return true + } + + return false +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go new file mode 100644 index 0000000000000..16c6d56af2e2f --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaulting_scc_test.go @@ -0,0 +1,274 @@ +package securitycontextconstraints + +import ( + "bytes" + "context" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" + sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" +) + +func TestDefaultingHappens(t *testing.T) { + inputSCC := `{ + "allowHostDirVolumePlugin": true, + "allowHostNetwork": true, + "allowHostPID": true, + "allowHostPorts": true, + "apiVersion": "security.openshift.io/v1", + "kind": "SecurityContextConstraints", + "metadata": { + "annotations": { + "kubernetes.io/description": "node-exporter scc is used for the Prometheus node exporter" + }, + "name": "node-exporter" + }, + "readOnlyRootFilesystem": false, + "runAsUser": { + "type": "RunAsAny" + }, + "seLinuxContext": { + "type": "RunAsAny" + }, + "users": [] +}` + + inputUnstructured := &unstructured.Unstructured{} + _, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(inputSCC), nil, inputUnstructured) + if err != nil { + t.Fatal(err) + } + + attributes := admission.NewAttributesRecord(inputUnstructured, nil, schema.GroupVersionKind{}, "", "", schema.GroupVersionResource{Group: "security.openshift.io", Resource: "securitycontextconstraints"}, "", admission.Create, nil, false, nil) + defaulter := NewDefaulter() + if err := defaulter.(*defaultSCC).Admit(context.TODO(), attributes, nil); err != nil { + t.Fatal(err) + } + + buf := &bytes.Buffer{} + if err := unstructured.UnstructuredJSONScheme.Encode(inputUnstructured, buf); err != nil { + t.Fatal(err) + } + + expectedSCC := `{ + "allowHostDirVolumePlugin": true, + "allowHostIPC": false, + "allowHostNetwork": true, + "allowHostPID": true, + "allowHostPorts": true, + "allowPrivilegeEscalation": true, + "allowPrivilegedContainer": false, + "allowedCapabilities": null, + "apiVersion": "security.openshift.io/v1", + "defaultAddCapabilities": null, + "fsGroup": { + "type": "RunAsAny" + }, + "groups": [], + "kind": "SecurityContextConstraints", + "metadata": { + "annotations": { + "kubernetes.io/description": "node-exporter scc is used for the Prometheus node exporter" + }, + "name": "node-exporter", + "creationTimestamp":null + }, + "priority": null, + "readOnlyRootFilesystem": false, + "requiredDropCapabilities": null, + "runAsUser": { + "type": "RunAsAny" + }, + "seLinuxContext": { + "type": "RunAsAny" + }, + "supplementalGroups": { + "type": "RunAsAny" + }, + "users": [], + "volumes": [ + "*" + ] +}` + expectedUnstructured := &unstructured.Unstructured{} + if _, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(expectedSCC), nil, expectedUnstructured); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expectedUnstructured.Object, inputUnstructured.Object) { + t.Fatal(diff.ObjectDiff(expectedUnstructured.Object, inputUnstructured.Object)) + } +} + +func TestDefaultSecurityContextConstraints(t *testing.T) { + tests := map[string]struct { + scc *securityv1.SecurityContextConstraints + expectedFSGroup securityv1.FSGroupStrategyType + expectedSupGroup securityv1.SupplementalGroupsStrategyType + }{ + "shouldn't default": { + scc: &securityv1.SecurityContextConstraints{ + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default fsgroup runAsAny": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyRunAsAny, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default sup group runAsAny": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + "default fsgroup runAsAny with mustRunAs UID strategy": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyMustRunAsRange, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyRunAsAny, + expectedSupGroup: securityv1.SupplementalGroupsStrategyMustRunAs, + }, + "default sup group runAsAny with mustRunAs UID strategy": { + scc: &securityv1.SecurityContextConstraints{ + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyMustRunAsRange, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyMustRunAs, + }, + }, + expectedFSGroup: securityv1.FSGroupStrategyMustRunAs, + expectedSupGroup: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + } + for k, v := range tests { + SetDefaults_SCC(v.scc) + if v.scc.FSGroup.Type != v.expectedFSGroup { + t.Errorf("%s has invalid fsgroup. Expected: %v got: %v", k, v.expectedFSGroup, v.scc.FSGroup.Type) + } + if v.scc.SupplementalGroups.Type != v.expectedSupGroup { + t.Errorf("%s has invalid supplemental group. Expected: %v got: %v", k, v.expectedSupGroup, v.scc.SupplementalGroups.Type) + } + } +} + +func TestDefaultSCCVolumes(t *testing.T) { + tests := map[string]struct { + scc *securityv1.SecurityContextConstraints + expectedVolumes []securityv1.FSType + expectedHostDir bool + }{ + // this expects the volumes to default to all for an empty volume slice + // but since the host dir setting is false it should be all - host dir + "old client - default allow* fields, no volumes slice": { + scc: &securityv1.SecurityContextConstraints{}, + expectedVolumes: StringSetToFSType(sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath))), + expectedHostDir: false, + }, + // this expects the volumes to default to all for an empty volume slice + "old client - set allowHostDir true fields, no volumes slice": { + scc: &securityv1.SecurityContextConstraints{ + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeAll}, + expectedHostDir: true, + }, + "new client - allow* fields set with matching volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + expectedHostDir: true, + }, + "new client - allow* fields set with mismatch host dir volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir}, + expectedHostDir: false, + }, + "new client - allow* fields set with mismatch FSTypeAll volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeAll}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: StringSetToFSType(sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath))), + expectedHostDir: false, + }, + "new client - allow* fields unset with volume slice": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath}, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir}, + expectedHostDir: false, + }, + "new client - extra volume params retained": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeHostPath, securityv1.FSTypeGitRepo}, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeEmptyDir, securityv1.FSTypeGitRepo}, + expectedHostDir: false, + }, + "new client - empty volume slice, host dir true": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{}, + AllowHostDirVolumePlugin: true, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeHostPath}, + expectedHostDir: true, + }, + "new client - empty volume slice, host dir false": { + scc: &securityv1.SecurityContextConstraints{ + Volumes: []securityv1.FSType{}, + AllowHostDirVolumePlugin: false, + }, + expectedVolumes: []securityv1.FSType{securityv1.FSTypeNone}, + expectedHostDir: false, + }, + } + for k, v := range tests { + SetDefaults_SCC(v.scc) + + if !reflect.DeepEqual(v.scc.Volumes, v.expectedVolumes) { + t.Errorf("%s has invalid volumes. Expected: %v got: %v", k, v.expectedVolumes, v.scc.Volumes) + } + + if v.scc.AllowHostDirVolumePlugin != v.expectedHostDir { + t.Errorf("%s has invalid host dir. Expected: %v got: %v", k, v.expectedHostDir, v.scc.AllowHostDirVolumePlugin) + } + } +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go new file mode 100644 index 0000000000000..e6e4b5ff44fc7 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/defaults.go @@ -0,0 +1,100 @@ +package securitycontextconstraints + +import ( + "k8s.io/apimachinery/pkg/util/sets" + + securityv1 "github.com/openshift/api/security/v1" + sccutil "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/util" +) + +// Default SCCs for new fields. FSGroup and SupplementalGroups are +// set to the RunAsAny strategy if they are unset on the scc. +func SetDefaults_SCC(scc *securityv1.SecurityContextConstraints) { + if len(scc.FSGroup.Type) == 0 { + scc.FSGroup.Type = securityv1.FSGroupStrategyRunAsAny + } + if len(scc.SupplementalGroups.Type) == 0 { + scc.SupplementalGroups.Type = securityv1.SupplementalGroupsStrategyRunAsAny + } + + if scc.Users == nil { + scc.Users = []string{} + } + if scc.Groups == nil { + scc.Groups = []string{} + } + + var defaultAllowedVolumes sets.String + switch { + case scc.Volumes == nil: + // assume a nil volume slice is allowing everything for backwards compatibility + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeAll)) + + case len(scc.Volumes) == 0 && scc.AllowHostDirVolumePlugin: + // an empty volume slice means "allow no volumes", but the boolean fields will always take precedence. + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeHostPath)) + + case len(scc.Volumes) == 0 && !scc.AllowHostDirVolumePlugin: + // an empty volume slice means "allow no volumes", but cannot be persisted in protobuf. + // convert this to volumes:["none"] + defaultAllowedVolumes = sets.NewString(string(securityv1.FSTypeNone)) + + default: + // defaults the volume slice of the SCC. + // In order to support old clients the boolean fields will always take precedence. + defaultAllowedVolumes = fsTypeToStringSet(scc.Volumes) + } + + if scc.AllowHostDirVolumePlugin { + // if already allowing all then there is no reason to add + if !defaultAllowedVolumes.Has(string(securityv1.FSTypeAll)) { + defaultAllowedVolumes.Insert(string(securityv1.FSTypeHostPath)) + } + } else { + // we should only default all volumes if the SCC came in with FSTypeAll or we defaulted it + // otherwise we should only change the volumes slice to ensure that it does not conflict with + // the AllowHostDirVolumePlugin setting + shouldDefaultAllVolumes := defaultAllowedVolumes.Has(string(securityv1.FSTypeAll)) + + // remove anything from volumes that conflicts with AllowHostDirVolumePlugin = false + defaultAllowedVolumes.Delete(string(securityv1.FSTypeAll)) + defaultAllowedVolumes.Delete(string(securityv1.FSTypeHostPath)) + + if shouldDefaultAllVolumes { + allVolumes := sccutil.GetAllFSTypesExcept(string(securityv1.FSTypeHostPath)) + defaultAllowedVolumes.Insert(allVolumes.List()...) + } + } + + scc.Volumes = StringSetToFSType(defaultAllowedVolumes) + + // Constraints that do not include this field must remain as permissive as + // they were prior to the introduction of this field. + if scc.AllowPrivilegeEscalation == nil { + t := true + scc.AllowPrivilegeEscalation = &t + } + +} + +func StringSetToFSType(set sets.String) []securityv1.FSType { + if set == nil { + return nil + } + volumes := []securityv1.FSType{} + for _, v := range set.List() { + volumes = append(volumes, securityv1.FSType(v)) + } + return volumes +} + +func fsTypeToStringSet(volumes []securityv1.FSType) sets.String { + if volumes == nil { + return nil + } + set := sets.NewString() + for _, v := range volumes { + set.Insert(string(v)) + } + return set +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go new file mode 100644 index 0000000000000..79ae37f27ccdb --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validate_scc.go @@ -0,0 +1,79 @@ +package securitycontextconstraints + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + + securityv1 "github.com/openshift/api/security/v1" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation" + sccvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation" +) + +const PluginName = "security.openshift.io/ValidateSecurityContextConstraints" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { + return customresourcevalidation.NewValidator( + map[schema.GroupResource]bool{ + {Group: securityv1.GroupName, Resource: "securitycontextconstraints"}: true, + }, + map[schema.GroupVersionKind]customresourcevalidation.ObjectValidator{ + securityv1.GroupVersion.WithKind("SecurityContextConstraints"): securityContextConstraintsV1{}, + }) + }) +} + +func toSecurityContextConstraints(uncastObj runtime.Object) (*securityv1.SecurityContextConstraints, field.ErrorList) { + if uncastObj == nil { + return nil, nil + } + + obj, ok := uncastObj.(*securityv1.SecurityContextConstraints) + if !ok { + return nil, field.ErrorList{ + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"SecurityContextConstraints"}), + field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{securityv1.GroupVersion.String()}), + } + } + + return obj, nil +} + +type securityContextConstraintsV1 struct { +} + +func (securityContextConstraintsV1) ValidateCreate(obj runtime.Object) field.ErrorList { + securityContextConstraintsObj, errs := toSecurityContextConstraints(obj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, sccvalidation.ValidateSecurityContextConstraints(securityContextConstraintsObj)...) + + return errs +} + +func (securityContextConstraintsV1) ValidateUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + securityContextConstraintsObj, errs := toSecurityContextConstraints(obj) + if len(errs) > 0 { + return errs + } + securityContextConstraintsOldObj, errs := toSecurityContextConstraints(oldObj) + if len(errs) > 0 { + return errs + } + + errs = append(errs, sccvalidation.ValidateSecurityContextConstraintsUpdate(securityContextConstraintsObj, securityContextConstraintsOldObj)...) + + return errs +} + +func (c securityContextConstraintsV1) ValidateStatusUpdate(obj runtime.Object, oldObj runtime.Object) field.ErrorList { + return c.ValidateUpdate(obj, oldObj) +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go new file mode 100644 index 0000000000000..493339867b8c5 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation.go @@ -0,0 +1,275 @@ +package validation + +import ( + "fmt" + "regexp" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/validation" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + kapivalidation "k8s.io/kubernetes/pkg/apis/core/validation" + + securityv1 "github.com/openshift/api/security/v1" +) + +// ValidateSecurityContextConstraintsName can be used to check whether the given +// security context constraint name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateSecurityContextConstraintsName = apimachineryvalidation.NameIsDNSSubdomain + +func ValidateSecurityContextConstraints(scc *securityv1.SecurityContextConstraints) field.ErrorList { + allErrs := validation.ValidateObjectMeta(&scc.ObjectMeta, false, ValidateSecurityContextConstraintsName, field.NewPath("metadata")) + + if scc.Priority != nil { + if *scc.Priority < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("priority"), *scc.Priority, "priority cannot be negative")) + } + } + + // ensure the user strategy has a valid type + runAsUserPath := field.NewPath("runAsUser") + switch scc.RunAsUser.Type { + case securityv1.RunAsUserStrategyMustRunAs, securityv1.RunAsUserStrategyMustRunAsNonRoot, securityv1.RunAsUserStrategyRunAsAny, securityv1.RunAsUserStrategyMustRunAsRange: + //good types + default: + msg := fmt.Sprintf("invalid strategy type. Valid values are %s, %s, %s, %s", securityv1.RunAsUserStrategyMustRunAs, securityv1.RunAsUserStrategyMustRunAsNonRoot, securityv1.RunAsUserStrategyMustRunAsRange, securityv1.RunAsUserStrategyRunAsAny) + allErrs = append(allErrs, field.Invalid(runAsUserPath.Child("type"), scc.RunAsUser.Type, msg)) + } + + // if specified, uid cannot be negative + if scc.RunAsUser.UID != nil { + if *scc.RunAsUser.UID < 0 { + allErrs = append(allErrs, field.Invalid(runAsUserPath.Child("uid"), *scc.RunAsUser.UID, "uid cannot be negative")) + } + } + + // ensure the selinux strategy has a valid type + seLinuxContextPath := field.NewPath("seLinuxContext") + switch scc.SELinuxContext.Type { + case securityv1.SELinuxStrategyMustRunAs, securityv1.SELinuxStrategyRunAsAny: + //good types + default: + msg := fmt.Sprintf("invalid strategy type. Valid values are %s, %s", securityv1.SELinuxStrategyMustRunAs, securityv1.SELinuxStrategyRunAsAny) + allErrs = append(allErrs, field.Invalid(seLinuxContextPath.Child("type"), scc.SELinuxContext.Type, msg)) + } + + // ensure the fsgroup strategy has a valid type + if scc.FSGroup.Type != securityv1.FSGroupStrategyMustRunAs && scc.FSGroup.Type != securityv1.FSGroupStrategyRunAsAny { + allErrs = append(allErrs, field.NotSupported(field.NewPath("fsGroup", "type"), scc.FSGroup.Type, + []string{string(securityv1.FSGroupStrategyMustRunAs), string(securityv1.FSGroupStrategyRunAsAny)})) + } + allErrs = append(allErrs, validateIDRanges(scc.FSGroup.Ranges, field.NewPath("fsGroup"))...) + + if scc.SupplementalGroups.Type != securityv1.SupplementalGroupsStrategyMustRunAs && + scc.SupplementalGroups.Type != securityv1.SupplementalGroupsStrategyRunAsAny { + allErrs = append(allErrs, field.NotSupported(field.NewPath("supplementalGroups", "type"), scc.SupplementalGroups.Type, + []string{string(securityv1.SupplementalGroupsStrategyMustRunAs), string(securityv1.SupplementalGroupsStrategyRunAsAny)})) + } + allErrs = append(allErrs, validateIDRanges(scc.SupplementalGroups.Ranges, field.NewPath("supplementalGroups"))...) + + // validate capabilities + allErrs = append(allErrs, validateSCCCapsAgainstDrops(scc.RequiredDropCapabilities, scc.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...) + allErrs = append(allErrs, validateSCCCapsAgainstDrops(scc.RequiredDropCapabilities, scc.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) + + if hasCap(securityv1.AllowAllCapabilities, scc.AllowedCapabilities) && len(scc.RequiredDropCapabilities) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("requiredDropCapabilities"), scc.RequiredDropCapabilities, + "required capabilities must be empty when all capabilities are allowed by a wildcard")) + } + + allErrs = append(allErrs, validateSCCDefaultAllowPrivilegeEscalation(field.NewPath("defaultAllowPrivilegeEscalation"), scc.DefaultAllowPrivilegeEscalation, scc.AllowPrivilegeEscalation)...) + + allowsFlexVolumes := false + hasNoneVolume := false + + if len(scc.Volumes) > 0 { + for _, fsType := range scc.Volumes { + if fsType == securityv1.FSTypeNone { + hasNoneVolume = true + + } else if fsType == securityv1.FSTypeFlexVolume || fsType == securityv1.FSTypeAll { + allowsFlexVolumes = true + } + } + } + + if hasNoneVolume && len(scc.Volumes) > 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("volumes"), scc.Volumes, + "if 'none' is specified, no other values are allowed")) + } + + if len(scc.AllowedFlexVolumes) > 0 { + if allowsFlexVolumes { + for idx, allowedFlexVolume := range scc.AllowedFlexVolumes { + if len(allowedFlexVolume.Driver) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("allowedFlexVolumes").Index(idx).Child("driver"), + "must specify a driver")) + } + } + } else { + allErrs = append(allErrs, field.Invalid(field.NewPath("allowedFlexVolumes"), scc.AllowedFlexVolumes, + "volumes does not include 'flexVolume' or '*', so no flex volumes are allowed")) + } + } + + allowedUnsafeSysctlsPath := field.NewPath("allowedUnsafeSysctls") + forbiddenSysctlsPath := field.NewPath("forbiddenSysctls") + allErrs = append(allErrs, validateSCCSysctls(allowedUnsafeSysctlsPath, scc.AllowedUnsafeSysctls)...) + allErrs = append(allErrs, validateSCCSysctls(forbiddenSysctlsPath, scc.ForbiddenSysctls)...) + allErrs = append(allErrs, validatePodSecurityPolicySysctlListsDoNotOverlap(allowedUnsafeSysctlsPath, forbiddenSysctlsPath, scc.AllowedUnsafeSysctls, scc.ForbiddenSysctls)...) + + return allErrs +} + +const sysctlPatternSegmentFmt string = "([a-z0-9][-_a-z0-9]*)?[a-z0-9*]" +const sysctlPatternFmt string = "(" + kapivalidation.SysctlSegmentFmt + "\\.)*" + sysctlPatternSegmentFmt + +var sysctlPatternRegexp = regexp.MustCompile("^" + sysctlPatternFmt + "$") + +func IsValidSysctlPattern(name string) bool { + if len(name) > kapivalidation.SysctlMaxLength { + return false + } + return sysctlPatternRegexp.MatchString(name) +} + +// validatePodSecurityPolicySysctlListsDoNotOverlap validates the values in forbiddenSysctls and allowedSysctls fields do not overlap. +func validatePodSecurityPolicySysctlListsDoNotOverlap(allowedSysctlsFldPath, forbiddenSysctlsFldPath *field.Path, allowedUnsafeSysctls, forbiddenSysctls []string) field.ErrorList { + allErrs := field.ErrorList{} + for i, allowedSysctl := range allowedUnsafeSysctls { + isAllowedSysctlPattern := false + allowedSysctlPrefix := "" + if strings.HasSuffix(allowedSysctl, "*") { + isAllowedSysctlPattern = true + allowedSysctlPrefix = strings.TrimSuffix(allowedSysctl, "*") + } + for j, forbiddenSysctl := range forbiddenSysctls { + isForbiddenSysctlPattern := false + forbiddenSysctlPrefix := "" + if strings.HasSuffix(forbiddenSysctl, "*") { + isForbiddenSysctlPattern = true + forbiddenSysctlPrefix = strings.TrimSuffix(forbiddenSysctl, "*") + } + switch { + case isAllowedSysctlPattern && isForbiddenSysctlPattern: + if strings.HasPrefix(allowedSysctlPrefix, forbiddenSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } else if strings.HasPrefix(forbiddenSysctlPrefix, allowedSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl))) + } + case isAllowedSysctlPattern: + if strings.HasPrefix(forbiddenSysctl, allowedSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(forbiddenSysctlsFldPath.Index(j), forbiddenSysctls[j], fmt.Sprintf("sysctl overlaps with %v", allowedSysctl))) + } + case isForbiddenSysctlPattern: + if strings.HasPrefix(allowedSysctl, forbiddenSysctlPrefix) { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } + default: + if allowedSysctl == forbiddenSysctl { + allErrs = append(allErrs, field.Invalid(allowedSysctlsFldPath.Index(i), allowedUnsafeSysctls[i], fmt.Sprintf("sysctl overlaps with %v", forbiddenSysctl))) + } + } + } + } + return allErrs +} + +// validatePodSecurityPolicySysctls validates the sysctls fields of PodSecurityPolicy. +func validateSCCSysctls(fldPath *field.Path, sysctls []string) field.ErrorList { + allErrs := field.ErrorList{} + + if len(sysctls) == 0 { + return allErrs + } + + coversAll := false + for i, s := range sysctls { + if len(s) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("empty sysctl not allowed"))) + } else if !IsValidSysctlPattern(string(s)) { + allErrs = append( + allErrs, + field.Invalid(fldPath.Index(i), sysctls[i], fmt.Sprintf("must have at most %d characters and match regex %s", + kapivalidation.SysctlMaxLength, + sysctlPatternFmt, + )), + ) + } else if s[0] == '*' { + coversAll = true + } + } + + if coversAll && len(sysctls) > 1 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("items"), fmt.Sprintf("if '*' is present, must not specify other sysctls"))) + } + + return allErrs +} + +// validateSCCCapsAgainstDrops ensures an allowed cap is not listed in the required drops. +func validateSCCCapsAgainstDrops(requiredDrops []corev1.Capability, capsToCheck []corev1.Capability, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requiredDrops == nil { + return allErrs + } + for _, cap := range capsToCheck { + if hasCap(cap, requiredDrops) { + allErrs = append(allErrs, field.Invalid(fldPath, cap, + fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String()))) + } + } + return allErrs +} + +// validateSCCDefaultAllowPrivilegeEscalation validates the DefaultAllowPrivilegeEscalation field against the AllowPrivilegeEscalation field of a SecurityContextConstraints. +func validateSCCDefaultAllowPrivilegeEscalation(fldPath *field.Path, defaultAllowPrivilegeEscalation, allowPrivilegeEscalation *bool) field.ErrorList { + allErrs := field.ErrorList{} + if defaultAllowPrivilegeEscalation != nil && allowPrivilegeEscalation != nil && *defaultAllowPrivilegeEscalation && !*allowPrivilegeEscalation { + allErrs = append(allErrs, field.Invalid(fldPath, defaultAllowPrivilegeEscalation, "Cannot set DefaultAllowPrivilegeEscalation to true without also setting AllowPrivilegeEscalation to true")) + } + + return allErrs +} + +// hasCap checks for needle in haystack. +func hasCap(needle corev1.Capability, haystack []corev1.Capability) bool { + for _, c := range haystack { + if needle == c { + return true + } + } + return false +} + +// validateIDRanges ensures the range is valid. +func validateIDRanges(rng []securityv1.IDRange, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, r := range rng { + // if 0 <= Min <= Max then we do not need to validate max. It is always greater than or + // equal to 0 and Min. + minPath := fldPath.Child("ranges").Index(i).Child("min") + maxPath := fldPath.Child("ranges").Index(i).Child("max") + + if r.Min < 0 { + allErrs = append(allErrs, field.Invalid(minPath, r.Min, "min cannot be negative")) + } + if r.Max < 0 { + allErrs = append(allErrs, field.Invalid(maxPath, r.Max, "max cannot be negative")) + } + if r.Min > r.Max { + allErrs = append(allErrs, field.Invalid(minPath, r, "min cannot be greater than max")) + } + } + + return allErrs +} + +func ValidateSecurityContextConstraintsUpdate(newScc, oldScc *securityv1.SecurityContextConstraints) field.ErrorList { + allErrs := validation.ValidateObjectMetaUpdate(&newScc.ObjectMeta, &oldScc.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateSecurityContextConstraints(newScc)...) + return allErrs +} diff --git a/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go new file mode 100644 index 0000000000000..01c4d472c0a22 --- /dev/null +++ b/openshift-kube-apiserver/admission/customresourcevalidation/securitycontextconstraints/validation/validation_test.go @@ -0,0 +1,343 @@ +package validation + +import ( + "fmt" + "testing" + + kcorev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + securityv1 "github.com/openshift/api/security/v1" +) + +func TestValidateSecurityContextConstraints(t *testing.T) { + var invalidUID int64 = -1 + var invalidPriority int32 = -1 + var validPriority int32 = 1 + yes := true + no := false + + validSCC := func() *securityv1.SecurityContextConstraints { + return &securityv1.SecurityContextConstraints{ + ObjectMeta: metav1.ObjectMeta{Name: "foo"}, + SELinuxContext: securityv1.SELinuxContextStrategyOptions{ + Type: securityv1.SELinuxStrategyRunAsAny, + }, + RunAsUser: securityv1.RunAsUserStrategyOptions{ + Type: securityv1.RunAsUserStrategyRunAsAny, + }, + FSGroup: securityv1.FSGroupStrategyOptions{ + Type: securityv1.FSGroupStrategyRunAsAny, + }, + SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{ + Type: securityv1.SupplementalGroupsStrategyRunAsAny, + }, + Priority: &validPriority, + } + } + + noUserOptions := validSCC() + noUserOptions.RunAsUser.Type = "" + + noSELinuxOptions := validSCC() + noSELinuxOptions.SELinuxContext.Type = "" + + invalidUserStratType := validSCC() + invalidUserStratType.RunAsUser.Type = "invalid" + + invalidSELinuxStratType := validSCC() + invalidSELinuxStratType.SELinuxContext.Type = "invalid" + + invalidUIDSCC := validSCC() + invalidUIDSCC.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAs + invalidUIDSCC.RunAsUser.UID = &invalidUID + + missingObjectMetaName := validSCC() + missingObjectMetaName.ObjectMeta.Name = "" + + noFSGroupOptions := validSCC() + noFSGroupOptions.FSGroup.Type = "" + + invalidFSGroupStratType := validSCC() + invalidFSGroupStratType.FSGroup.Type = "invalid" + + noSupplementalGroupsOptions := validSCC() + noSupplementalGroupsOptions.SupplementalGroups.Type = "" + + invalidSupGroupStratType := validSCC() + invalidSupGroupStratType.SupplementalGroups.Type = "invalid" + + invalidRangeMinGreaterThanMax := validSCC() + invalidRangeMinGreaterThanMax.FSGroup.Ranges = []securityv1.IDRange{ + {Min: 2, Max: 1}, + } + + invalidRangeNegativeMin := validSCC() + invalidRangeNegativeMin.FSGroup.Ranges = []securityv1.IDRange{ + {Min: -1, Max: 10}, + } + + invalidRangeNegativeMax := validSCC() + invalidRangeNegativeMax.FSGroup.Ranges = []securityv1.IDRange{ + {Min: 1, Max: -10}, + } + + negativePriority := validSCC() + negativePriority.Priority = &invalidPriority + + requiredCapAddAndDrop := validSCC() + requiredCapAddAndDrop.DefaultAddCapabilities = []kcorev1.Capability{"foo"} + requiredCapAddAndDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + + allowedCapListedInRequiredDrop := validSCC() + allowedCapListedInRequiredDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + allowedCapListedInRequiredDrop.AllowedCapabilities = []kcorev1.Capability{"foo"} + + wildcardAllowedCapAndRequiredDrop := validSCC() + wildcardAllowedCapAndRequiredDrop.RequiredDropCapabilities = []kcorev1.Capability{"foo"} + wildcardAllowedCapAndRequiredDrop.AllowedCapabilities = []kcorev1.Capability{securityv1.AllowAllCapabilities} + + emptyFlexDriver := validSCC() + emptyFlexDriver.Volumes = []securityv1.FSType{securityv1.FSTypeFlexVolume} + emptyFlexDriver.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{{}} + + nonEmptyFlexVolumes := validSCC() + nonEmptyFlexVolumes.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{{Driver: "example/driver"}} + + invalidDefaultAllowPrivilegeEscalation := validSCC() + invalidDefaultAllowPrivilegeEscalation.DefaultAllowPrivilegeEscalation = &yes + invalidDefaultAllowPrivilegeEscalation.AllowPrivilegeEscalation = &no + + invalidAllowedUnsafeSysctlPattern := validSCC() + invalidAllowedUnsafeSysctlPattern.AllowedUnsafeSysctls = []string{"a.*.b"} + + invalidForbiddenSysctlPattern := validSCC() + invalidForbiddenSysctlPattern.ForbiddenSysctls = []string{"a.*.b"} + + invalidOverlappingSysctls := validSCC() + invalidOverlappingSysctls.ForbiddenSysctls = []string{"kernel.*", "net.ipv4.ip_local_port_range"} + invalidOverlappingSysctls.AllowedUnsafeSysctls = []string{"kernel.shmmax", "net.ipv4.ip_local_port_range"} + + invalidDuplicatedSysctls := validSCC() + invalidDuplicatedSysctls.ForbiddenSysctls = []string{"net.ipv4.ip_local_port_range"} + invalidDuplicatedSysctls.AllowedUnsafeSysctls = []string{"net.ipv4.ip_local_port_range"} + + errorCases := map[string]struct { + scc *securityv1.SecurityContextConstraints + errorType field.ErrorType + errorDetail string + }{ + "no user options": { + scc: noUserOptions, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, MustRunAsNonRoot, MustRunAsRange, RunAsAny", + }, + "no selinux options": { + scc: noSELinuxOptions, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, RunAsAny", + }, + "no fsgroup options": { + scc: noFSGroupOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "no sup group options": { + scc: noSupplementalGroupsOptions, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid user strategy type": { + scc: invalidUserStratType, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, MustRunAsNonRoot, MustRunAsRange, RunAsAny", + }, + "invalid selinux strategy type": { + scc: invalidSELinuxStratType, + errorType: field.ErrorTypeInvalid, + errorDetail: "invalid strategy type. Valid values are MustRunAs, RunAsAny", + }, + "invalid sup group strategy type": { + scc: invalidSupGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid fs group strategy type": { + scc: invalidFSGroupStratType, + errorType: field.ErrorTypeNotSupported, + errorDetail: "supported values: \"MustRunAs\", \"RunAsAny\"", + }, + "invalid uid": { + scc: invalidUIDSCC, + errorType: field.ErrorTypeInvalid, + errorDetail: "uid cannot be negative", + }, + "missing object meta name": { + scc: missingObjectMetaName, + errorType: field.ErrorTypeRequired, + errorDetail: "name or generateName is required", + }, + "invalid range min greater than max": { + scc: invalidRangeMinGreaterThanMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be greater than max", + }, + "invalid range negative min": { + scc: invalidRangeNegativeMin, + errorType: field.ErrorTypeInvalid, + errorDetail: "min cannot be negative", + }, + "invalid range negative max": { + scc: invalidRangeNegativeMax, + errorType: field.ErrorTypeInvalid, + errorDetail: "max cannot be negative", + }, + "negative priority": { + scc: negativePriority, + errorType: field.ErrorTypeInvalid, + errorDetail: "priority cannot be negative", + }, + "invalid required caps": { + scc: requiredCapAddAndDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in defaultAddCapabilities and requiredDropCapabilities", + }, + "allowed cap listed in required drops": { + scc: allowedCapListedInRequiredDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "capability is listed in allowedCapabilities and requiredDropCapabilities", + }, + "all caps allowed by a wildcard and required drops is not empty": { + scc: wildcardAllowedCapAndRequiredDrop, + errorType: field.ErrorTypeInvalid, + errorDetail: "required capabilities must be empty when all capabilities are allowed by a wildcard", + }, + "empty flex volume driver": { + scc: emptyFlexDriver, + errorType: field.ErrorTypeRequired, + errorDetail: "must specify a driver", + }, + "non-empty allowed flex volumes": { + scc: nonEmptyFlexVolumes, + errorType: field.ErrorTypeInvalid, + errorDetail: "volumes does not include 'flexVolume' or '*', so no flex volumes are allowed", + }, + "invalid defaultAllowPrivilegeEscalation": { + scc: invalidDefaultAllowPrivilegeEscalation, + errorType: field.ErrorTypeInvalid, + errorDetail: "Cannot set DefaultAllowPrivilegeEscalation to true without also setting AllowPrivilegeEscalation to true", + }, + "invalid allowed unsafe sysctl pattern": { + scc: invalidAllowedUnsafeSysctlPattern, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("must have at most 253 characters and match regex %s", sysctlPatternFmt), + }, + "invalid forbidden sysctl pattern": { + scc: invalidForbiddenSysctlPattern, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("must have at most 253 characters and match regex %s", sysctlPatternFmt), + }, + "invalid overlapping sysctl pattern": { + scc: invalidOverlappingSysctls, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("sysctl overlaps with %s", invalidOverlappingSysctls.ForbiddenSysctls[0]), + }, + "invalid duplicated sysctls": { + scc: invalidDuplicatedSysctls, + errorType: field.ErrorTypeInvalid, + errorDetail: fmt.Sprintf("sysctl overlaps with %s", invalidDuplicatedSysctls.AllowedUnsafeSysctls[0]), + }, + } + + for k, v := range errorCases { + t.Run(k, func(t *testing.T) { + if errs := ValidateSecurityContextConstraints(v.scc); len(errs) == 0 || errs[0].Type != v.errorType || errs[0].Detail != v.errorDetail { + t.Errorf("Expected error type %q with detail %q, got %v", v.errorType, v.errorDetail, errs) + } + }) + } + + var validUID int64 = 1 + + mustRunAs := validSCC() + mustRunAs.FSGroup.Type = securityv1.FSGroupStrategyMustRunAs + mustRunAs.SupplementalGroups.Type = securityv1.SupplementalGroupsStrategyMustRunAs + mustRunAs.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAs + mustRunAs.RunAsUser.UID = &validUID + mustRunAs.SELinuxContext.Type = securityv1.SELinuxStrategyMustRunAs + + runAsNonRoot := validSCC() + runAsNonRoot.RunAsUser.Type = securityv1.RunAsUserStrategyMustRunAsNonRoot + + caseInsensitiveAddDrop := validSCC() + caseInsensitiveAddDrop.DefaultAddCapabilities = []kcorev1.Capability{"foo"} + caseInsensitiveAddDrop.RequiredDropCapabilities = []kcorev1.Capability{"FOO"} + + caseInsensitiveAllowedDrop := validSCC() + caseInsensitiveAllowedDrop.RequiredDropCapabilities = []kcorev1.Capability{"FOO"} + caseInsensitiveAllowedDrop.AllowedCapabilities = []kcorev1.Capability{"foo"} + + flexvolumeWhenFlexVolumesAllowed := validSCC() + flexvolumeWhenFlexVolumesAllowed.Volumes = []securityv1.FSType{securityv1.FSTypeFlexVolume} + flexvolumeWhenFlexVolumesAllowed.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{ + {Driver: "example/driver1"}, + } + + flexvolumeWhenAllVolumesAllowed := validSCC() + flexvolumeWhenAllVolumesAllowed.Volumes = []securityv1.FSType{securityv1.FSTypeAll} + flexvolumeWhenAllVolumesAllowed.AllowedFlexVolumes = []securityv1.AllowedFlexVolume{ + {Driver: "example/driver2"}, + } + + validDefaultAllowPrivilegeEscalation := validSCC() + validDefaultAllowPrivilegeEscalation.DefaultAllowPrivilegeEscalation = &yes + validDefaultAllowPrivilegeEscalation.AllowPrivilegeEscalation = &yes + + withForbiddenSysctl := validSCC() + withForbiddenSysctl.ForbiddenSysctls = []string{"net.*"} + + withAllowedUnsafeSysctl := validSCC() + withAllowedUnsafeSysctl.AllowedUnsafeSysctls = []string{"net.ipv4.tcp_max_syn_backlog"} + + successCases := map[string]struct { + scc *securityv1.SecurityContextConstraints + }{ + "must run as": { + scc: mustRunAs, + }, + "run as any": { + scc: validSCC(), + }, + "run as non-root (user only)": { + scc: runAsNonRoot, + }, + "comparison for add -> drop is case sensitive": { + scc: caseInsensitiveAddDrop, + }, + "comparison for allowed -> drop is case sensitive": { + scc: caseInsensitiveAllowedDrop, + }, + "allow white-listed flexVolume when flex volumes are allowed": { + scc: flexvolumeWhenFlexVolumesAllowed, + }, + "allow white-listed flexVolume when all volumes are allowed": { + scc: flexvolumeWhenAllVolumesAllowed, + }, + "valid defaultAllowPrivilegeEscalation as true": { + scc: validDefaultAllowPrivilegeEscalation, + }, + "with network sysctls forbidden": { + scc: withForbiddenSysctl, + }, + "with unsafe net.ipv4.tcp_max_syn_backlog sysctl allowed": { + scc: withAllowedUnsafeSysctl, + }, + } + + for k, v := range successCases { + if errs := ValidateSecurityContextConstraints(v.scc); len(errs) != 0 { + t.Errorf("Expected success for %q, got %v", k, errs) + } + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/decorator.go b/openshift-kube-apiserver/admission/namespaceconditions/decorator.go new file mode 100644 index 0000000000000..5cfd9dae5ba6e --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/decorator.go @@ -0,0 +1,60 @@ +package namespaceconditions + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1lister "k8s.io/client-go/listers/core/v1" +) + +// this is a list of namespaces with special meaning. The kube ones are here in particular because +// we don't control their creation or labeling on their creation +var runLevelZeroNamespaces = sets.NewString("default", "kube-system", "kube-public") +var runLevelOneNamespaces = sets.NewString("openshift-node", "openshift-infra", "openshift") + +func init() { + runLevelOneNamespaces.Insert(runLevelZeroNamespaces.List()...) +} + +// NamespaceLabelConditions provides a decorator that can delegate and conditionally add label conditions +type NamespaceLabelConditions struct { + NamespaceClient corev1client.NamespacesGetter + NamespaceLister corev1lister.NamespaceLister + + SkipLevelZeroNames sets.String + SkipLevelOneNames sets.String +} + +func (d *NamespaceLabelConditions) WithNamespaceLabelConditions(admissionPlugin admission.Interface, name string) admission.Interface { + switch { + case d.SkipLevelOneNames.Has(name): + // return a decorated admission plugin that skips runlevel 0 and 1 namespaces based on name (for known values) and + // label. + return &pluginHandlerWithNamespaceNameConditions{ + admissionPlugin: &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.NamespaceClient, + namespaceLister: d.NamespaceLister, + namespaceSelector: skipRunLevelOneSelector, + }, + namespacesToExclude: runLevelOneNamespaces, + } + + case d.SkipLevelZeroNames.Has(name): + // return a decorated admission plugin that skips runlevel 0 namespaces based on name (for known values) and + // label. + return &pluginHandlerWithNamespaceNameConditions{ + admissionPlugin: &pluginHandlerWithNamespaceLabelConditions{ + admissionPlugin: admissionPlugin, + namespaceClient: d.NamespaceClient, + namespaceLister: d.NamespaceLister, + namespaceSelector: skipRunLevelZeroSelector, + }, + namespacesToExclude: runLevelZeroNamespaces, + } + + default: + return admissionPlugin + } + +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go new file mode 100644 index 0000000000000..c3ebaf5895306 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition.go @@ -0,0 +1,125 @@ +package namespaceconditions + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/admission" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1lister "k8s.io/client-go/listers/core/v1" +) + +const runLevelLabel = "openshift.io/run-level" + +var ( + skipRunLevelZeroSelector labels.Selector + skipRunLevelOneSelector labels.Selector +) + +func init() { + var err error + skipRunLevelZeroSelector, err = labels.Parse(runLevelLabel + " notin ( 0 )") + if err != nil { + panic(err) + } + skipRunLevelOneSelector, err = labels.Parse(runLevelLabel + " notin ( 0,1 )") + if err != nil { + panic(err) + } +} + +// pluginHandlerWithNamespaceLabelConditions wraps an admission plugin in a conditional skip based on namespace labels +type pluginHandlerWithNamespaceLabelConditions struct { + admissionPlugin admission.Interface + namespaceClient corev1client.NamespacesGetter + namespaceLister corev1lister.NamespaceLister + namespaceSelector labels.Selector +} + +var _ admission.ValidationInterface = &pluginHandlerWithNamespaceLabelConditions{} +var _ admission.MutationInterface = &pluginHandlerWithNamespaceLabelConditions{} + +func (p pluginHandlerWithNamespaceLabelConditions) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithNamespaceLabelConditions) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + return mutatingHandler.Admit(ctx, a, o) +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithNamespaceLabelConditions) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + return validatingHandler.Validate(ctx, a, o) +} + +// MatchNamespaceSelector decideds whether the request matches the +// namespaceSelctor of the webhook. Only when they match, the webhook is called. +func (p pluginHandlerWithNamespaceLabelConditions) shouldRunAdmission(attr admission.Attributes) bool { + namespaceName := attr.GetNamespace() + if len(namespaceName) == 0 && attr.GetResource().Resource != "namespaces" { + // cluster scoped resources always run admission + return true + } + namespaceLabels, err := p.getNamespaceLabels(attr) + if err != nil { + // default to running the hook so we don't leak namespace existence information + return true + } + // TODO: adding an LRU cache to cache the match decision + return p.namespaceSelector.Matches(labels.Set(namespaceLabels)) +} + +// getNamespaceLabels gets the labels of the namespace related to the attr. +func (p pluginHandlerWithNamespaceLabelConditions) getNamespaceLabels(attr admission.Attributes) (map[string]string, error) { + // If the request itself is creating or updating a namespace, then get the + // labels from attr.Object, because namespaceLister doesn't have the latest + // namespace yet. + // + // However, if the request is deleting a namespace, then get the label from + // the namespace in the namespaceLister, because a delete request is not + // going to change the object, and attr.Object will be a DeleteOptions + // rather than a namespace object. + if attr.GetResource().Resource == "namespaces" && + len(attr.GetSubresource()) == 0 && + (attr.GetOperation() == admission.Create || attr.GetOperation() == admission.Update) { + accessor, err := meta.Accessor(attr.GetObject()) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil + } + + namespaceName := attr.GetNamespace() + namespace, err := p.namespaceLister.Get(namespaceName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if apierrors.IsNotFound(err) { + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + namespace, err = p.namespaceClient.Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + } + return namespace.Labels, nil +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go new file mode 100644 index 0000000000000..31474a4b7ee93 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/labelcondition_test.go @@ -0,0 +1,97 @@ +package namespaceconditions + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" +) + +type fakeNamespaceLister struct { + namespaces map[string]*corev1.Namespace +} + +func (f fakeNamespaceLister) List(selector labels.Selector) (ret []*corev1.Namespace, err error) { + return nil, nil +} +func (f fakeNamespaceLister) Get(name string) (*corev1.Namespace, error) { + ns, ok := f.namespaces[name] + if ok { + return ns, nil + } + return nil, errors.NewNotFound(corev1.Resource("namespaces"), name) +} + +func TestGetNamespaceLabels(t *testing.T) { + namespace1Labels := map[string]string{ + "runlevel": "1", + } + namespace1 := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "1", + Labels: namespace1Labels, + }, + } + namespace2Labels := map[string]string{ + "runlevel": "2", + } + namespace2 := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "2", + Labels: namespace2Labels, + }, + } + namespaceLister := fakeNamespaceLister{map[string]*corev1.Namespace{ + "1": &namespace1, + }, + } + + tests := []struct { + name string + attr admission.Attributes + expectedLabels map[string]string + }{ + { + name: "request is for creating namespace, the labels should be from the object itself", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, "", namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Create, nil, false, nil), + expectedLabels: namespace2Labels, + }, + { + name: "request is for updating namespace, the labels should be from the new object", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace2.Name, namespace2.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Update, nil, false, nil), + expectedLabels: namespace2Labels, + }, + { + name: "request is for deleting namespace, the labels should be from the cache", + attr: admission.NewAttributesRecord(&namespace2, nil, schema.GroupVersionKind{}, namespace1.Name, namespace1.Name, schema.GroupVersionResource{Resource: "namespaces"}, "", admission.Delete, nil, false, nil), + expectedLabels: namespace1Labels, + }, + { + name: "request is for namespace/finalizer", + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "namespaces"}, "finalizers", admission.Create, nil, false, nil), + expectedLabels: namespace1Labels, + }, + { + name: "request is for pod", + attr: admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, namespace1.Name, "mock-name", schema.GroupVersionResource{Resource: "pods"}, "", admission.Create, nil, false, nil), + expectedLabels: namespace1Labels, + }, + } + matcher := pluginHandlerWithNamespaceLabelConditions{ + namespaceLister: namespaceLister, + } + for _, tt := range tests { + actualLabels, err := matcher.getNamespaceLabels(tt.attr) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(actualLabels, tt.expectedLabels) { + t.Errorf("expected labels to be %#v, got %#v", tt.expectedLabels, actualLabels) + } + } +} diff --git a/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go b/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go new file mode 100644 index 0000000000000..848cef4d13ac4 --- /dev/null +++ b/openshift-kube-apiserver/admission/namespaceconditions/namecondition.go @@ -0,0 +1,60 @@ +package namespaceconditions + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" +) + +// pluginHandlerWithNamespaceNameConditions skips running admission plugins if they deal in the namespaceToExclude list +type pluginHandlerWithNamespaceNameConditions struct { + admissionPlugin admission.Interface + namespacesToExclude sets.String +} + +var _ admission.ValidationInterface = &pluginHandlerWithNamespaceNameConditions{} +var _ admission.MutationInterface = &pluginHandlerWithNamespaceNameConditions{} + +func (p pluginHandlerWithNamespaceNameConditions) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +// Admit performs a mutating admission control check and emit metrics. +func (p pluginHandlerWithNamespaceNameConditions) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + return mutatingHandler.Admit(ctx, a, o) +} + +// Validate performs a non-mutating admission control check and emits metrics. +func (p pluginHandlerWithNamespaceNameConditions) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + if !p.shouldRunAdmission(a) { + return nil + } + + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + return validatingHandler.Validate(ctx, a, o) +} + +func (p pluginHandlerWithNamespaceNameConditions) shouldRunAdmission(attr admission.Attributes) bool { + namespaceName := attr.GetNamespace() + if p.namespacesToExclude.Has(namespaceName) { + return false + } + if (attr.GetResource().GroupResource() == schema.GroupResource{Resource: "namespaces"}) && p.namespacesToExclude.Has(attr.GetName()) { + return false + } + + return true +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go new file mode 100644 index 0000000000000..4ef9330be1224 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package externalipranger is the internal version of the API. +package externalipranger diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go new file mode 100644 index 0000000000000..fe92abf523c1e --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/register.go @@ -0,0 +1,20 @@ +package externalipranger + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ExternalIPRangerAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go new file mode 100644 index 0000000000000..f127ca27aadcb --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/types.go @@ -0,0 +1,20 @@ +package externalipranger + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type ExternalIPRangerAdmissionConfig struct { + metav1.TypeMeta + + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string + // AllowIngressIP indicates that ingress IPs should be allowed + AllowIngressIP bool +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go new file mode 100644 index 0000000000000..79476f394930a --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go new file mode 100644 index 0000000000000..f55b5a5b494df --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/register.go @@ -0,0 +1,24 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + restrictedendpoints.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ExternalIPRangerAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go new file mode 100644 index 0000000000000..0fb8ea4ca830a --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/types.go @@ -0,0 +1,20 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExternalIPRangerAdmissionConfig is the configuration for which CIDRs services can't manage +type ExternalIPRangerAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"` + // AllowIngressIP indicates that ingress IPs should be allowed + AllowIngressIP bool `json:"allowIngressIP"` +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..34eaa7cff663f --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/v1/zz_generated.deepcopy.go @@ -0,0 +1,55 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyInto(out *ExternalIPRangerAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExternalIPNetworkCIDRs != nil { + in, out := &in.ExternalIPNetworkCIDRs, &out.ExternalIPNetworkCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPRangerAdmissionConfig. +func (in *ExternalIPRangerAdmissionConfig) DeepCopy() *ExternalIPRangerAdmissionConfig { + if in == nil { + return nil + } + out := new(ExternalIPRangerAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/externalipranger/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/externalipranger/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..8437974962071 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/externalipranger/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package externalipranger + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyInto(out *ExternalIPRangerAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExternalIPNetworkCIDRs != nil { + in, out := &in.ExternalIPNetworkCIDRs, &out.ExternalIPNetworkCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPRangerAdmissionConfig. +func (in *ExternalIPRangerAdmissionConfig) DeepCopy() *ExternalIPRangerAdmissionConfig { + if in == nil { + return nil + } + out := new(ExternalIPRangerAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExternalIPRangerAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go new file mode 100644 index 0000000000000..ff46fb9f13d76 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package restrictedendpoints is the internal version of the API. +package restrictedendpoints diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go new file mode 100644 index 0000000000000..171a4b1be5182 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/register.go @@ -0,0 +1,20 @@ +package restrictedendpoints + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: runtime.APIVersionInternal} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RestrictedEndpointsAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go new file mode 100644 index 0000000000000..e205762215ba1 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/types.go @@ -0,0 +1,15 @@ +package restrictedendpoints + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type RestrictedEndpointsAdmissionConfig struct { + metav1.TypeMeta + + // RestrictedCIDRs indicates what CIDRs will be disallowed for services. + RestrictedCIDRs []string +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go new file mode 100644 index 0000000000000..0dac22208df49 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go new file mode 100644 index 0000000000000..f924353fe24d3 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/register.go @@ -0,0 +1,24 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" +) + +var GroupVersion = schema.GroupVersion{Group: "network.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + restrictedendpoints.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &RestrictedEndpointsAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go new file mode 100644 index 0000000000000..f665aa1e73c2f --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/types.go @@ -0,0 +1,15 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestrictedEndpointsAdmissionConfig is the configuration for which CIDRs services can't manage +type RestrictedEndpointsAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // RestrictedCIDRs indicates what CIDRs will be disallowed for services. + RestrictedCIDRs []string `json:"restrictedCIDRs"` +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e9597be6b05da --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyInto(out *RestrictedEndpointsAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.RestrictedCIDRs != nil { + in, out := &in.RestrictedCIDRs, &out.RestrictedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestrictedEndpointsAdmissionConfig. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopy() *RestrictedEndpointsAdmissionConfig { + if in == nil { + return nil + } + out := new(RestrictedEndpointsAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..0a00cc4bc8da4 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package restrictedendpoints + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyInto(out *RestrictedEndpointsAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.RestrictedCIDRs != nil { + in, out := &in.RestrictedCIDRs, &out.RestrictedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestrictedEndpointsAdmissionConfig. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopy() *RestrictedEndpointsAdmissionConfig { + if in == nil { + return nil + } + out := new(RestrictedEndpointsAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestrictedEndpointsAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go new file mode 100644 index 0000000000000..79a1be9cee7eb --- /dev/null +++ b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission.go @@ -0,0 +1,208 @@ +package externalipranger + +import ( + "context" + "fmt" + "io" + "net" + "strings" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/externalipranger/v1" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +const ExternalIPPluginName = "network.openshift.io/ExternalIPRanger" + +func RegisterExternalIP(plugins *admission.Plugins) { + plugins.Register("network.openshift.io/ExternalIPRanger", + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", ExternalIPPluginName) + return nil, nil + } + + // this needs to be moved upstream to be part of core config + reject, admit, err := ParseRejectAdmitCIDRRules(pluginConfig.ExternalIPNetworkCIDRs) + if err != nil { + // should have been caught with validation + return nil, err + } + + return NewExternalIPRanger(reject, admit, pluginConfig.AllowIngressIP), nil + }) +} + +func readConfig(reader io.Reader) (*externalipranger.ExternalIPRangerAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, externalipranger.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*externalipranger.ExternalIPRangerAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +type externalIPRanger struct { + *admission.Handler + reject []*net.IPNet + admit []*net.IPNet + authorizer authorizer.Authorizer + allowIngressIP bool +} + +var _ admission.Interface = &externalIPRanger{} +var _ admission.ValidationInterface = &externalIPRanger{} +var _ = initializer.WantsAuthorizer(&externalIPRanger{}) + +// ParseRejectAdmitCIDRRules calculates a blacklist and whitelist from a list of string CIDR rules (treating +// a leading ! as a negation). Returns an error if any rule is invalid. +func ParseRejectAdmitCIDRRules(rules []string) (reject, admit []*net.IPNet, err error) { + for _, s := range rules { + negate := false + if strings.HasPrefix(s, "!") { + negate = true + s = s[1:] + } + _, cidr, err := net.ParseCIDR(s) + if err != nil { + return nil, nil, err + } + if negate { + reject = append(reject, cidr) + } else { + admit = append(admit, cidr) + } + } + return reject, admit, nil +} + +// NewConstraint creates a new SCC constraint admission plugin. +func NewExternalIPRanger(reject, admit []*net.IPNet, allowIngressIP bool) *externalIPRanger { + return &externalIPRanger{ + Handler: admission.NewHandler(admission.Create, admission.Update), + reject: reject, + admit: admit, + allowIngressIP: allowIngressIP, + } +} + +func (r *externalIPRanger) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *externalIPRanger) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("missing authorizer") + } + return nil +} + +// NetworkSlice is a helper for checking whether an IP is contained in a range +// of networks. +type NetworkSlice []*net.IPNet + +func (s NetworkSlice) Contains(ip net.IP) bool { + for _, cidr := range s { + if cidr.Contains(ip) { + return true + } + } + return false +} + +// Admit determines if the service should be admitted based on the configured network CIDR. +func (r *externalIPRanger) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() != kapi.Resource("services") { + return nil + } + + svc, ok := a.GetObject().(*kapi.Service) + // if we can't convert then we don't handle this object so just return + if !ok { + return nil + } + + // Determine if an ingress ip address should be allowed as an + // external ip by checking the loadbalancer status of the previous + // object state. Only updates need to be validated against the + // ingress ip since the loadbalancer status cannot be set on + // create. + ingressIP := "" + retrieveIngressIP := a.GetOperation() == admission.Update && + r.allowIngressIP && svc.Spec.Type == kapi.ServiceTypeLoadBalancer + if retrieveIngressIP { + old, ok := a.GetOldObject().(*kapi.Service) + ipPresent := ok && old != nil && len(old.Status.LoadBalancer.Ingress) > 0 + if ipPresent { + ingressIP = old.Status.LoadBalancer.Ingress[0].IP + } + } + + var errs field.ErrorList + switch { + // administrator disabled externalIPs + case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) == 0: + onlyIngressIP := len(svc.Spec.ExternalIPs) == 1 && svc.Spec.ExternalIPs[0] == ingressIP + if !onlyIngressIP { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs"), "externalIPs have been disabled")) + } + // administrator has limited the range + case len(svc.Spec.ExternalIPs) > 0 && len(r.admit) > 0: + for i, s := range svc.Spec.ExternalIPs { + ip := net.ParseIP(s) + if ip == nil { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIPs must be a valid address")) + continue + } + notIngressIP := s != ingressIP + if (NetworkSlice(r.reject).Contains(ip) || !NetworkSlice(r.admit).Contains(ip)) && notIngressIP { + errs = append(errs, field.Forbidden(field.NewPath("spec", "externalIPs").Index(i), "externalIP is not allowed")) + continue + } + } + } + + if len(errs) > 0 { + //if there are errors reported, resort to RBAC check to see + //if this is an admin user who can over-ride the check + allow, err := r.checkAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, errs.ToAggregate()) + } + } + + return nil +} + +func (r *externalIPRanger) checkAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Resource: "service", + Subresource: "externalips", + APIGroup: "network.openshift.io", + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} diff --git a/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go new file mode 100644 index 0000000000000..7c33d28b8c517 --- /dev/null +++ b/openshift-kube-apiserver/admission/network/externalipranger/externalip_admission_test.go @@ -0,0 +1,321 @@ +package externalipranger + +import ( + "context" + "fmt" + "net" + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + + "k8s.io/apiserver/pkg/authentication/user" + + "k8s.io/apiserver/pkg/authentication/serviceaccount" + kapi "k8s.io/kubernetes/pkg/apis/core" +) + +type fakeTestAuthorizer struct { + t *testing.T +} + +func fakeAuthorizer(t *testing.T) authorizer.Authorizer { + return &fakeTestAuthorizer{ + t: t, + } +} + +func (a *fakeTestAuthorizer) Authorize(_ context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + ui := attributes.GetUser() + if ui == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("No valid UserInfo for Context") + } + // system:serviceaccount:test:admin user aka admin user is allowed to set + // external IPs + if ui.GetName() == "system:serviceaccount:test:admin" { + return authorizer.DecisionAllow, "", nil + } + // Non test:admin user aka without admin privileges: + return authorizer.DecisionDeny, "", nil +} + +// TestAdmission verifies various scenarios involving pod/project/global node label selectors +func TestAdmission(t *testing.T) { + svc := &kapi.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + var oldSvc *kapi.Service + + _, ipv4, err := net.ParseCIDR("172.0.0.0/16") + if err != nil { + t.Fatal(err) + } + _, ipv4subset, err := net.ParseCIDR("172.0.1.0/24") + if err != nil { + t.Fatal(err) + } + _, ipv4offset, err := net.ParseCIDR("172.200.0.0/24") + if err != nil { + t.Fatal(err) + } + _, none, err := net.ParseCIDR("0.0.0.0/32") + if err != nil { + t.Fatal(err) + } + _, all, err := net.ParseCIDR("0.0.0.0/0") + if err != nil { + t.Fatal(err) + } + + tests := []struct { + testName string + rejects, admits []*net.IPNet + op admission.Operation + externalIPs []string + admit bool + errFn func(err error) bool + loadBalancer bool + ingressIP string + userinfo user.Info + }{ + { + admit: true, + op: admission.Create, + testName: "No external IPs on create for test:ordinary-user user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + op: admission.Update, + testName: "No external IPs on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "No external IPs allowed on create for test:ordinary-user user", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIPs have been disabled") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "External IPs allowed on create for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "No external IPs allowed on update", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIPs have been disabled") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "External IPs allowed on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP out of range on create", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "IP out of range on update", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + rejects: []*net.IPNet{ipv4subset}, + externalIPs: []string{"172.0.1.1"}, + op: admission.Update, + testName: "IP out of range due to blacklist", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + rejects: []*net.IPNet{ipv4offset}, + externalIPs: []string{"172.199.1.1"}, + op: admission.Update, + testName: "IP not in reject or admit", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIP is not allowed") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"172.0.0.1"}, + op: admission.Create, + testName: "IP in range on create for test:ordinary-user user", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"172.0.0.1"}, + op: admission.Update, + testName: "IP in range on update for test:admin user", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + // other checks + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"abcd"}, + op: admission.Create, + testName: "IP unparseable on create", + errFn: func(err error) bool { + return strings.Contains(err.Error(), "externalIPs must be a valid address") && + strings.Contains(err.Error(), "spec.externalIPs[0]") + }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: false, + admits: []*net.IPNet{none}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP range is empty for test:ordinary-user user", + errFn: func(err error) bool { return strings.Contains(err.Error(), "externalIP is not allowed") }, + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{none}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "IP range is empty, but test:admin user allowed", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + rejects: []*net.IPNet{all}, + admits: []*net.IPNet{all}, + externalIPs: []string{"1.2.3.4"}, + op: admission.Create, + testName: "rejections can cover the entire range", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + // Ingress IP checks + { + admit: true, + externalIPs: []string{"1.2.3.4"}, + op: admission.Update, + testName: "Ingress ip allowed when external ips are disabled", + loadBalancer: true, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + { + admit: true, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4", "172.0.0.1"}, + op: admission.Update, + testName: "Ingress ip allowed when external ips are enabled", + loadBalancer: true, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "admin", ""), + }, + { + admit: false, + admits: []*net.IPNet{ipv4}, + externalIPs: []string{"1.2.3.4", "172.0.0.1"}, + op: admission.Update, + testName: "Ingress ip not allowed for non-lb service", + loadBalancer: false, + ingressIP: "1.2.3.4", + userinfo: serviceaccount.UserInfo("test", "ordinary-user", ""), + }, + } + for _, test := range tests { + svc.Spec.ExternalIPs = test.externalIPs + allowIngressIP := len(test.ingressIP) > 0 || test.loadBalancer + handler := NewExternalIPRanger(test.rejects, test.admits, allowIngressIP) + handler.SetAuthorizer(fakeAuthorizer(t)) + err := handler.ValidateInitialization() + if err != nil { + t.Errorf("%s: Got an error %s", test.testName, err) + continue + } + if test.loadBalancer { + svc.Spec.Type = kapi.ServiceTypeLoadBalancer + } else { + svc.Spec.Type = kapi.ServiceTypeClusterIP + } + + if len(test.ingressIP) > 0 { + // Provide an ingress ip via the previous object state + oldSvc = &kapi.Service{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Status: kapi.ServiceStatus{ + LoadBalancer: kapi.LoadBalancerStatus{ + Ingress: []kapi.LoadBalancerIngress{ + { + IP: test.ingressIP, + }, + }, + }, + }, + } + + } else { + oldSvc = nil + } + + err = handler.Validate(context.TODO(), admission.NewAttributesRecord(svc, oldSvc, kapi.Kind("Service").WithVersion("version"), "namespace", svc.ObjectMeta.Name, kapi.Resource("services").WithVersion("version"), "", test.op, nil, false, test.userinfo), nil) + + if test.admit && err != nil { + t.Errorf("%s: expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("%s: expected an error", test.testName) + } + if test.errFn != nil && !test.errFn(err) { + t.Errorf("%s: unexpected error: %v", test.testName, err) + } + } +} + +func TestHandles(t *testing.T) { + for op, shouldHandle := range map[admission.Operation]bool{ + admission.Create: true, + admission.Update: true, + admission.Connect: false, + admission.Delete: false, + } { + ranger := NewExternalIPRanger(nil, nil, false) + if e, a := shouldHandle, ranger.Handles(op); e != a { + t.Errorf("%v: shouldHandle=%t, handles=%t", op, e, a) + } + } +} diff --git a/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go b/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go new file mode 100644 index 0000000000000..09566962e713b --- /dev/null +++ b/openshift-kube-apiserver/admission/network/restrictedendpoints/endpoint_admission.go @@ -0,0 +1,191 @@ +package restrictedendpoints + +import ( + "context" + "fmt" + "io" + "net" + "reflect" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + kapi "k8s.io/kubernetes/pkg/apis/core" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/network/apis/restrictedendpoints/v1" +) + +const RestrictedEndpointsPluginName = "network.openshift.io/RestrictedEndpointsAdmission" + +func RegisterRestrictedEndpoints(plugins *admission.Plugins) { + plugins.Register(RestrictedEndpointsPluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", RestrictedEndpointsPluginName) + return nil, nil + } + restrictedNetworks, err := ParseSimpleCIDRRules(pluginConfig.RestrictedCIDRs) + if err != nil { + // should have been caught with validation + return nil, err + } + + return NewRestrictedEndpointsAdmission(restrictedNetworks), nil + }) +} + +func readConfig(reader io.Reader) (*restrictedendpoints.RestrictedEndpointsAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, restrictedendpoints.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*restrictedendpoints.RestrictedEndpointsAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +type restrictedEndpointsAdmission struct { + *admission.Handler + + authorizer authorizer.Authorizer + restrictedNetworks []*net.IPNet +} + +var _ = initializer.WantsAuthorizer(&restrictedEndpointsAdmission{}) +var _ = admission.ValidationInterface(&restrictedEndpointsAdmission{}) + +// ParseSimpleCIDRRules parses a list of CIDR strings +func ParseSimpleCIDRRules(rules []string) (networks []*net.IPNet, err error) { + for _, s := range rules { + _, cidr, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + networks = append(networks, cidr) + } + return networks, nil +} + +// NewRestrictedEndpointsAdmission creates a new endpoints admission plugin. +func NewRestrictedEndpointsAdmission(restrictedNetworks []*net.IPNet) *restrictedEndpointsAdmission { + return &restrictedEndpointsAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + restrictedNetworks: restrictedNetworks, + } +} + +func (r *restrictedEndpointsAdmission) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *restrictedEndpointsAdmission) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("missing authorizer") + } + return nil +} + +var ( + defaultRestrictedPorts = []kapi.EndpointPort{ + // MCS ports + {Protocol: kapi.ProtocolTCP, Port: 22623}, + {Protocol: kapi.ProtocolTCP, Port: 22624}, + } + defaultRestrictedNetworks = []*net.IPNet{ + // IPv4 link-local range 169.254.0.0/16 (including cloud metadata IP) + {IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)}, + } +) + +func (r *restrictedEndpointsAdmission) findRestrictedIP(ep *kapi.Endpoints, restricted []*net.IPNet) error { + for _, subset := range ep.Subsets { + for _, addr := range subset.Addresses { + ip := net.ParseIP(addr.IP) + if ip == nil { + continue + } + for _, net := range restricted { + if net.Contains(ip) { + return fmt.Errorf("endpoint address %s is not allowed", addr.IP) + } + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) findRestrictedPort(ep *kapi.Endpoints, restricted []kapi.EndpointPort) error { + for _, subset := range ep.Subsets { + for _, port := range subset.Ports { + for _, restricted := range restricted { + if port.Protocol == restricted.Protocol && port.Port == restricted.Port { + return fmt.Errorf("endpoint port %s:%d is not allowed", string(port.Protocol), port.Port) + } + } + } + } + return nil +} + +func (r *restrictedEndpointsAdmission) checkAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "endpoints", + Subresource: "restricted", + APIGroup: kapi.GroupName, + Name: attr.GetName(), + ResourceRequest: true, + } + authorized, _, err := r.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +// Admit determines if the endpoints object should be admitted +func (r *restrictedEndpointsAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() != kapi.Resource("endpoints") { + return nil + } + ep, ok := a.GetObject().(*kapi.Endpoints) + if !ok { + return nil + } + old, ok := a.GetOldObject().(*kapi.Endpoints) + if ok && reflect.DeepEqual(ep.Subsets, old.Subsets) { + return nil + } + + restrictedErr := r.findRestrictedIP(ep, r.restrictedNetworks) + if restrictedErr == nil { + restrictedErr = r.findRestrictedIP(ep, defaultRestrictedNetworks) + } + if restrictedErr == nil { + restrictedErr = r.findRestrictedPort(ep, defaultRestrictedPorts) + } + if restrictedErr == nil { + return nil + } + + allow, err := r.checkAccess(ctx, a) + if err != nil { + return err + } + if !allow { + return admission.NewForbidden(a, restrictedErr) + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go new file mode 100644 index 0000000000000..04727861a1ea1 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package ingressadmission diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go new file mode 100644 index 0000000000000..e0e84492781a6 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/register.go @@ -0,0 +1,33 @@ +package ingressadmission + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &IngressAdmissionConfig{}, + ) + return nil +} + +func (obj *IngressAdmissionConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go new file mode 100644 index 0000000000000..bc1356398663c --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/types.go @@ -0,0 +1,22 @@ +package ingressadmission + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressAdmissionConfig is the configuration for the the ingress +// controller limiter plugin. It changes the behavior of ingress +// objects to behave better with openshift routes and routers. +// *NOTE* This has security implications in the router when handling +// ingress objects +type IngressAdmissionConfig struct { + metav1.TypeMeta + + // AllowHostnameChanges when false or unset openshift does not + // allow changing or adding hostnames to ingress objects. If set + // to true then hostnames can be added or modified which has + // security implications in the router. + AllowHostnameChanges bool +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go new file mode 100644 index 0000000000000..e105c48094abc --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/defaults_test.go @@ -0,0 +1,59 @@ +package v1 + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + scheme, codecs := apitesting.SchemeForOrDie(Install) + data, err := runtime.Encode(codecs.LegacyCodec(GroupVersion), obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(codecs.UniversalDecoder(), data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = scheme.Convert(obj2, obj3, nil) + if err != nil { + t.Errorf("%v\nSourceL %#v", err, obj2) + return nil + } + return obj3 +} + +func TestDefaults(t *testing.T) { + tests := []struct { + original *IngressAdmissionConfig + expected *IngressAdmissionConfig + }{ + { + original: &IngressAdmissionConfig{}, + expected: &IngressAdmissionConfig{ + AllowHostnameChanges: false, + }, + }, + } + for i, test := range tests { + t.Logf("test %d", i) + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*IngressAdmissionConfig) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("got different than expected:\nA:\t%#v\nB:\t%#v\n\nDiff:\n%s\n\n%s", got, expected, diff.ObjectDiff(expected, got), diff.ObjectGoPrintSideBySide(expected, got)) + } + } +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go new file mode 100644 index 0000000000000..65269e693b22a --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go new file mode 100644 index 0000000000000..aecb8a6eec279 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/register.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" +) + +func (obj *IngressAdmissionConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "route.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + ingressadmission.Install, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &IngressAdmissionConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go new file mode 100644 index 0000000000000..27266bc8b3f6d --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_IngressAdmissionConfig = map[string]string{ + "": "IngressAdmissionConfig is the configuration for the the ingress controller limiter plugin. It changes the behavior of ingress objects to behave better with openshift routes and routers. *NOTE* This has security implications in the router when handling ingress objects", + "allowHostnameChanges": "AllowHostnameChanges when false or unset openshift does not allow changing or adding hostnames to ingress objects. If set to true then hostnames can be added or modified which has security implications in the router.", +} + +func (IngressAdmissionConfig) SwaggerDoc() map[string]string { + return map_IngressAdmissionConfig +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go new file mode 100644 index 0000000000000..a770d0539f449 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/types.go @@ -0,0 +1,22 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressAdmissionConfig is the configuration for the the ingress +// controller limiter plugin. It changes the behavior of ingress +// objects to behave better with openshift routes and routers. +// *NOTE* This has security implications in the router when handling +// ingress objects +type IngressAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // AllowHostnameChanges when false or unset openshift does not + // allow changing or adding hostnames to ingress objects. If set + // to true then hostnames can be added or modified which has + // security implications in the router. + AllowHostnameChanges bool `json:"allowHostnameChanges"` +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..ba0f8a528c380 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1/zz_generated.deepcopy.go @@ -0,0 +1,34 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressAdmissionConfig) DeepCopyInto(out *IngressAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressAdmissionConfig. +func (in *IngressAdmissionConfig) DeepCopy() *IngressAdmissionConfig { + if in == nil { + return nil + } + out := new(IngressAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/apis/ingressadmission/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/route/apis/ingressadmission/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..e75a7b7a07f22 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/apis/ingressadmission/zz_generated.deepcopy.go @@ -0,0 +1,34 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package ingressadmission + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressAdmissionConfig) DeepCopyInto(out *IngressAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressAdmissionConfig. +func (in *IngressAdmissionConfig) DeepCopy() *IngressAdmissionConfig { + if in == nil { + return nil + } + out := new(IngressAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/route/ingress_admission.go b/openshift-kube-apiserver/admission/route/ingress_admission.go new file mode 100644 index 0000000000000..f59104fe51a08 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/ingress_admission.go @@ -0,0 +1,162 @@ +// This plugin supplements upstream Ingress admission validation +// It takes care of current Openshift specific constraints on Ingress resources +package admission + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + kextensions "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/networking" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission/v1" +) + +const ( + IngressAdmission = "route.openshift.io/IngressAdmission" +) + +func Register(plugins *admission.Plugins) { + plugins.Register(IngressAdmission, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + return NewIngressAdmission(pluginConfig), nil + }) +} + +type ingressAdmission struct { + *admission.Handler + config *ingressadmission.IngressAdmissionConfig + authorizer authorizer.Authorizer +} + +var _ = initializer.WantsAuthorizer(&ingressAdmission{}) +var _ = admission.ValidationInterface(&ingressAdmission{}) + +func NewIngressAdmission(config *ingressadmission.IngressAdmissionConfig) *ingressAdmission { + return &ingressAdmission{ + Handler: admission.NewHandler(admission.Create, admission.Update), + config: config, + } +} + +func readConfig(reader io.Reader) (*ingressadmission.IngressAdmissionConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, ingressadmission.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*ingressadmission.IngressAdmissionConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +func (r *ingressAdmission) SetAuthorizer(a authorizer.Authorizer) { + r.authorizer = a +} + +func (r *ingressAdmission) ValidateInitialization() error { + if r.authorizer == nil { + return fmt.Errorf("%s needs an Openshift Authorizer", IngressAdmission) + } + return nil +} + +func (r *ingressAdmission) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) error { + if a.GetResource().GroupResource() == kextensions.Resource("ingresses") { + switch a.GetOperation() { + case admission.Create: + if ingress, ok := a.GetObject().(*networking.Ingress); ok { + // if any rules have a host, check whether the user has permission to set them + for i, rule := range ingress.Spec.Rules { + if len(rule.Host) > 0 { + attr := authorizer.AttributesRecord{ + User: a.GetUserInfo(), + Verb: "create", + Namespace: a.GetNamespace(), + Resource: "routes", + Subresource: "custom-host", + APIGroup: "route.openshift.io", + ResourceRequest: true, + } + kind := schema.GroupKind{Group: a.GetResource().Group, Kind: a.GetResource().Resource} + authorized, _, err := r.authorizer.Authorize(ctx, attr) + if err != nil { + return errors.NewInvalid(kind, ingress.Name, field.ErrorList{field.InternalError(field.NewPath("spec", "rules").Index(i), err)}) + } + if authorized != authorizer.DecisionAllow { + return errors.NewInvalid(kind, ingress.Name, field.ErrorList{field.Forbidden(field.NewPath("spec", "rules").Index(i), "you do not have permission to set host fields in ingress rules")}) + } + break + } + } + } + case admission.Update: + if r.config == nil || r.config.AllowHostnameChanges == false { + oldIngress, ok := a.GetOldObject().(*networking.Ingress) + if !ok { + return nil + } + newIngress, ok := a.GetObject().(*networking.Ingress) + if !ok { + return nil + } + if !haveHostnamesChanged(oldIngress, newIngress) { + attr := authorizer.AttributesRecord{ + User: a.GetUserInfo(), + Verb: "update", + Namespace: a.GetNamespace(), + Name: a.GetName(), + Resource: "routes", + Subresource: "custom-host", + APIGroup: "route.openshift.io", + ResourceRequest: true, + } + kind := schema.GroupKind{Group: a.GetResource().Group, Kind: a.GetResource().Resource} + authorized, _, err := r.authorizer.Authorize(ctx, attr) + if err != nil { + return errors.NewInvalid(kind, newIngress.Name, field.ErrorList{field.InternalError(field.NewPath("spec", "rules"), err)}) + } + if authorized == authorizer.DecisionAllow { + return nil + } + return fmt.Errorf("cannot change hostname") + } + } + } + } + return nil +} + +func haveHostnamesChanged(oldIngress, newIngress *networking.Ingress) bool { + hostnameSet := sets.NewString() + for _, element := range oldIngress.Spec.Rules { + hostnameSet.Insert(element.Host) + } + + for _, element := range newIngress.Spec.Rules { + if present := hostnameSet.Has(element.Host); !present { + return false + } + } + + return true +} diff --git a/openshift-kube-apiserver/admission/route/ingress_admission_test.go b/openshift-kube-apiserver/admission/route/ingress_admission_test.go new file mode 100644 index 0000000000000..b1013b8346a30 --- /dev/null +++ b/openshift-kube-apiserver/admission/route/ingress_admission_test.go @@ -0,0 +1,171 @@ +package admission + +import ( + "context" + "testing" + + "k8s.io/kubernetes/pkg/apis/networking" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/authorization/authorizer" + kextensions "k8s.io/kubernetes/pkg/apis/extensions" + + "k8s.io/kubernetes/openshift-kube-apiserver/admission/route/apis/ingressadmission" +) + +type fakeAuthorizer struct { + allow authorizer.Decision + err error +} + +func (a *fakeAuthorizer) Authorize(context.Context, authorizer.Attributes) (authorizer.Decision, string, error) { + return a.allow, "", a.err +} + +func TestAdmission(t *testing.T) { + var newIngress *networking.Ingress + var oldIngress *networking.Ingress + + tests := []struct { + config *ingressadmission.IngressAdmissionConfig + testName string + oldHost, newHost string + op admission.Operation + admit bool + allow authorizer.Decision + }{ + { + admit: true, + config: emptyConfig(), + op: admission.Create, + testName: "No errors on create", + }, + { + admit: true, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "foo.com", + testName: "keeping the host the same should pass", + }, + { + admit: true, + config: emptyConfig(), + op: admission.Update, + oldHost: "foo.com", + testName: "deleting a hostname should pass", + }, + { + admit: false, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "changing hostname should fail", + }, + { + admit: true, + allow: authorizer.DecisionAllow, + config: emptyConfig(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "changing hostname should succeed if the user has permission", + }, + { + admit: false, + config: nil, + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "unconfigured plugin should still fail", + }, + { + admit: true, + config: testConfigUpdateAllow(), + op: admission.Update, + newHost: "foo.com", + oldHost: "bar.com", + testName: "Upstream Hostname updates enabled", + }, + { + admit: true, + config: testConfigUpdateAllow(), + op: admission.Update, + newHost: "foo.com", + testName: "add new hostname with upstream rules", + }, + { + admit: false, + allow: authorizer.DecisionNoOpinion, + config: emptyConfig(), + op: admission.Create, + newHost: "foo.com", + testName: "setting the host should require permission", + }, + { + admit: true, + allow: authorizer.DecisionAllow, + config: emptyConfig(), + op: admission.Create, + newHost: "foo.com", + testName: "setting the host should pass if user has permission", + }, + } + for _, test := range tests { + if len(test.newHost) > 0 { + newIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: test.newHost, + }, + }, + }, + } + } else { + //Used to test deleting a hostname + newIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + } + } + handler := NewIngressAdmission(test.config) + handler.SetAuthorizer(&fakeAuthorizer{allow: test.allow}) + + if len(test.oldHost) > 0 { + //Provides the previous state of an ingress object + oldIngress = &networking.Ingress{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: networking.IngressSpec{ + Rules: []networking.IngressRule{ + { + Host: test.oldHost, + }, + }, + }, + } + } else { + oldIngress = nil + } + + err := handler.Validate(context.TODO(), admission.NewAttributesRecord(newIngress, oldIngress, kextensions.Kind("ingresses").WithVersion("Version"), "namespace", newIngress.ObjectMeta.Name, kextensions.Resource("ingresses").WithVersion("version"), "", test.op, nil, false, nil), nil) + if test.admit && err != nil { + t.Errorf("%s: expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("%s: expected an error", test.testName) + } + } + +} + +func emptyConfig() *ingressadmission.IngressAdmissionConfig { + return &ingressadmission.IngressAdmissionConfig{} +} + +func testConfigUpdateAllow() *ingressadmission.IngressAdmissionConfig { + return &ingressadmission.IngressAdmissionConfig{ + AllowHostnameChanges: true, + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go new file mode 100644 index 0000000000000..ae163f472d40a --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package api is the internal version of the API. +package podnodeconstraints diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go new file mode 100644 index 0000000000000..5b8add00bb815 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/register.go @@ -0,0 +1,33 @@ +package podnodeconstraints + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var GroupVersion = schema.GroupVersion{Group: "scheduling.openshift.io", Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return GroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + Install = schemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &PodNodeConstraintsConfig{}, + ) + return nil +} + +func (obj *PodNodeConstraintsConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go new file mode 100644 index 0000000000000..27cebad199ed0 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/types.go @@ -0,0 +1,19 @@ +package podnodeconstraints + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNodeConstraintsConfig is the configuration for the pod node name +// and node selector constraint plug-in. For accounts, serviceaccounts, +// and groups which lack the "pods/binding" permission, Loading this +// plugin will prevent setting NodeName on pod specs and will prevent +// setting NodeSelectors whose labels appear in the blacklist field +// "NodeSelectorLabelBlacklist" +type PodNodeConstraintsConfig struct { + metav1.TypeMeta + // NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the "pods/binding" permission + NodeSelectorLabelBlacklist []string +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go new file mode 100644 index 0000000000000..54d718cfc91af --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults.go @@ -0,0 +1,19 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func SetDefaults_PodNodeConstraintsConfig(obj *PodNodeConstraintsConfig) { + if obj.NodeSelectorLabelBlacklist == nil { + obj.NodeSelectorLabelBlacklist = []string{ + corev1.LabelHostname, + } + } +} + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&PodNodeConstraintsConfig{}, func(obj interface{}) { SetDefaults_PodNodeConstraintsConfig(obj.(*PodNodeConstraintsConfig)) }) + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go new file mode 100644 index 0000000000000..513084ad95122 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/defaults_test.go @@ -0,0 +1,59 @@ +package v1 + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { + scheme, codecs := apitesting.SchemeForOrDie(Install) + data, err := runtime.Encode(codecs.LegacyCodec(GroupVersion), obj) + if err != nil { + t.Errorf("%v\n %#v", err, obj) + return nil + } + obj2, err := runtime.Decode(codecs.UniversalDecoder(), data) + if err != nil { + t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj) + return nil + } + obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object) + err = scheme.Convert(obj2, obj3, nil) + if err != nil { + t.Errorf("%v\nSource: %#v", err, obj2) + return nil + } + return obj3 +} + +func TestDefaults(t *testing.T) { + tests := []struct { + original *PodNodeConstraintsConfig + expected *PodNodeConstraintsConfig + }{ + { + original: &PodNodeConstraintsConfig{}, + expected: &PodNodeConstraintsConfig{ + NodeSelectorLabelBlacklist: []string{"kubernetes.io/hostname"}, + }, + }, + } + for i, test := range tests { + t.Logf("test %d", i) + original := test.original + expected := test.expected + obj2 := roundTrip(t, runtime.Object(original)) + got, ok := obj2.(*PodNodeConstraintsConfig) + if !ok { + t.Errorf("unexpected object: %v", got) + t.FailNow() + } + if !reflect.DeepEqual(got, expected) { + t.Errorf("got different than expected:\nA:\t%#v\nB:\t%#v\n\nDiff:\n%s\n\n%s", got, expected, diff.ObjectDiff(expected, got), diff.ObjectGoPrintSideBySide(expected, got)) + } + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go new file mode 100644 index 0000000000000..602ddf4d19a41 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints + +// Package v1 is the v1 version of the API. +package v1 diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go new file mode 100644 index 0000000000000..b836b750fdb3f --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/register.go @@ -0,0 +1,28 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" +) + +func (obj *PodNodeConstraintsConfig) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +var GroupVersion = schema.GroupVersion{Group: "scheduling.openshift.io", Version: "v1"} + +var ( + localSchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypes, + podnodeconstraints.Install, + + addDefaultingFuncs, + ) + Install = localSchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &PodNodeConstraintsConfig{}, + ) + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go new file mode 100644 index 0000000000000..95e3d2220841c --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/swagger_doc.go @@ -0,0 +1,15 @@ +package v1 + +// This file contains methods that can be used by the go-restful package to generate Swagger +// documentation for the object types found in 'types.go' This file is automatically generated +// by hack/update-generated-swagger-descriptions.sh and should be run after a full build of OpenShift. +// ==== DO NOT EDIT THIS FILE MANUALLY ==== + +var map_PodNodeConstraintsConfig = map[string]string{ + "": "PodNodeConstraintsConfig is the configuration for the pod node name and node selector constraint plug-in. For accounts, serviceaccounts and groups which lack the \"pods/binding\" permission, Loading this plugin will prevent setting NodeName on pod specs and will prevent setting NodeSelectors whose labels appear in the blacklist field \"NodeSelectorLabelBlacklist\"", + "nodeSelectorLabelBlacklist": "NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the \"pods/binding\" permission", +} + +func (PodNodeConstraintsConfig) SwaggerDoc() map[string]string { + return map_PodNodeConstraintsConfig +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go new file mode 100644 index 0000000000000..3ffd5acdb8952 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/types.go @@ -0,0 +1,20 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNodeConstraintsConfig is the configuration for the pod node name +// and node selector constraint plug-in. For accounts, serviceaccounts +// and groups which lack the "pods/binding" permission, Loading this +// plugin will prevent setting NodeName on pod specs and will prevent +// setting NodeSelectors whose labels appear in the blacklist field +// "NodeSelectorLabelBlacklist" +type PodNodeConstraintsConfig struct { + metav1.TypeMeta `json:",inline"` + + // NodeSelectorLabelBlacklist specifies a list of labels which cannot be set by entities without the "pods/binding" permission + NodeSelectorLabelBlacklist []string `json:"nodeSelectorLabelBlacklist" description:"list of labels which cannot be set by entities without the 'pods/binding' permission"` +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..5d7b49dc20032 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNodeConstraintsConfig) DeepCopyInto(out *PodNodeConstraintsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.NodeSelectorLabelBlacklist != nil { + in, out := &in.NodeSelectorLabelBlacklist, &out.NodeSelectorLabelBlacklist + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNodeConstraintsConfig. +func (in *PodNodeConstraintsConfig) DeepCopy() *PodNodeConstraintsConfig { + if in == nil { + return nil + } + out := new(PodNodeConstraintsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodNodeConstraintsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/zz_generated.deepcopy.go b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/zz_generated.deepcopy.go new file mode 100644 index 0000000000000..46ccf9f25bd18 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/zz_generated.deepcopy.go @@ -0,0 +1,39 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package podnodeconstraints + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNodeConstraintsConfig) DeepCopyInto(out *PodNodeConstraintsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.NodeSelectorLabelBlacklist != nil { + in, out := &in.NodeSelectorLabelBlacklist, &out.NodeSelectorLabelBlacklist + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNodeConstraintsConfig. +func (in *PodNodeConstraintsConfig) DeepCopy() *PodNodeConstraintsConfig { + if in == nil { + return nil + } + out := new(PodNodeConstraintsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodNodeConstraintsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go new file mode 100644 index 0000000000000..991ba30174c35 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission.go @@ -0,0 +1,148 @@ +package nodeenv + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector" + coreapi "k8s.io/kubernetes/pkg/apis/core" + + projectv1 "github.com/openshift/api/project/v1" +) + +func Register(plugins *admission.Plugins) { + plugins.Register("scheduling.openshift.io/OriginPodNodeEnvironment", + func(config io.Reader) (admission.Interface, error) { + return NewPodNodeEnvironment() + }) +} + +const ( + timeToWaitForCacheSync = 10 * time.Second + kubeProjectNodeSelector = "scheduler.alpha.kubernetes.io/node-selector" +) + +// podNodeEnvironment is an implementation of admission.MutationInterface. +type podNodeEnvironment struct { + *admission.Handler + nsLister corev1listers.NamespaceLister + nsListerSynced func() bool + // TODO this should become a piece of config passed to the admission plugin + defaultNodeSelector string +} + +var _ = initializer.WantsExternalKubeInformerFactory(&podNodeEnvironment{}) +var _ = WantsDefaultNodeSelector(&podNodeEnvironment{}) +var _ = admission.ValidationInterface(&podNodeEnvironment{}) +var _ = admission.MutationInterface(&podNodeEnvironment{}) + +// Admit enforces that pod and its project node label selectors matches at least a node in the cluster. +func (p *podNodeEnvironment) admit(ctx context.Context, a admission.Attributes, mutationAllowed bool) (err error) { + resource := a.GetResource().GroupResource() + if resource != corev1.Resource("pods") { + return nil + } + if a.GetSubresource() != "" { + // only run the checks below on pods proper and not subresources + return nil + } + + obj := a.GetObject() + pod, ok := obj.(*coreapi.Pod) + if !ok { + return nil + } + + name := pod.Name + + if !p.waitForSyncedStore(time.After(timeToWaitForCacheSync)) { + return admission.NewForbidden(a, errors.New("scheduling.openshift.io/OriginPodNodeEnvironment: caches not synchronized")) + } + namespace, err := p.nsLister.Get(a.GetNamespace()) + if err != nil { + return apierrors.NewForbidden(resource, name, err) + } + + // If scheduler.alpha.kubernetes.io/node-selector is set on the pod, + // do not process the pod further. + if _, ok := namespace.ObjectMeta.Annotations[kubeProjectNodeSelector]; ok { + return nil + } + + selector := p.defaultNodeSelector + if projectNodeSelector, ok := namespace.ObjectMeta.Annotations[projectv1.ProjectNodeSelector]; ok { + selector = projectNodeSelector + } + projectNodeSelector, err := labelselector.Parse(selector) + if err != nil { + return err + } + + if labelselector.Conflicts(projectNodeSelector, pod.Spec.NodeSelector) { + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector conflicts with its project node label selector")) + } + + if !mutationAllowed && len(labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector)) != len(pod.Spec.NodeSelector) { + // no conflict, different size => pod.Spec.NodeSelector does not contain projectNodeSelector + return apierrors.NewForbidden(resource, name, fmt.Errorf("pod node label selector does not extend project node label selector")) + } + + // modify pod node selector = project node selector + current pod node selector + pod.Spec.NodeSelector = labelselector.Merge(projectNodeSelector, pod.Spec.NodeSelector) + + return nil +} + +func (p *podNodeEnvironment) Admit(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + return p.admit(ctx, a, true) +} + +func (p *podNodeEnvironment) Validate(ctx context.Context, a admission.Attributes, _ admission.ObjectInterfaces) (err error) { + return p.admit(ctx, a, false) +} + +func (p *podNodeEnvironment) SetDefaultNodeSelector(in string) { + p.defaultNodeSelector = in +} + +func (p *podNodeEnvironment) SetExternalKubeInformerFactory(kubeInformers informers.SharedInformerFactory) { + p.nsLister = kubeInformers.Core().V1().Namespaces().Lister() + p.nsListerSynced = kubeInformers.Core().V1().Namespaces().Informer().HasSynced +} + +func (p *podNodeEnvironment) waitForSyncedStore(timeout <-chan time.Time) bool { + for !p.nsListerSynced() { + select { + case <-time.After(100 * time.Millisecond): + case <-timeout: + return p.nsListerSynced() + } + } + + return true +} + +func (p *podNodeEnvironment) ValidateInitialization() error { + if p.nsLister == nil { + return fmt.Errorf("project node environment plugin needs a namespace lister") + } + if p.nsListerSynced == nil { + return fmt.Errorf("project node environment plugin needs a namespace lister synced") + } + return nil +} + +func NewPodNodeEnvironment() (admission.Interface, error) { + return &podNodeEnvironment{ + Handler: admission.NewHandler(admission.Create), + }, nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go new file mode 100644 index 0000000000000..6738a9b097dd9 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/admission_test.go @@ -0,0 +1,160 @@ +package nodeenv + +import ( + "context" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/admission" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector" + kapi "k8s.io/kubernetes/pkg/apis/core" + + projectv1 "github.com/openshift/api/project/v1" +) + +// TestPodAdmission verifies various scenarios involving pod/project/global node label selectors +func TestPodAdmission(t *testing.T) { + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testProject", + Namespace: "", + }, + } + + handler := &podNodeEnvironment{} + pod := &kapi.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "testPod"}, + } + + tests := []struct { + defaultNodeSelector string + projectNodeSelector string + podNodeSelector map[string]string + mergedNodeSelector map[string]string + ignoreProjectNodeSelector bool + admit bool + testName string + }{ + { + defaultNodeSelector: "", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{}, + ignoreProjectNodeSelector: true, + admit: true, + testName: "No node selectors", + }, + { + defaultNodeSelector: "infra = false", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "false"}, + ignoreProjectNodeSelector: true, + admit: true, + testName: "Default node selector and no conflicts", + }, + { + defaultNodeSelector: "", + projectNodeSelector: "infra = false", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "false"}, + admit: true, + testName: "Project node selector and no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{}, + admit: true, + testName: "Empty project node selector and no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{}, + mergedNodeSelector: map[string]string{"infra": "true"}, + admit: true, + testName: "Default and project node selector, no conflicts", + }, + { + defaultNodeSelector: "infra = false", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{"env": "test"}, + mergedNodeSelector: map[string]string{"infra": "true", "env": "test"}, + admit: true, + testName: "Project and pod node selector, no conflicts", + }, + { + defaultNodeSelector: "env = test", + projectNodeSelector: "infra=true", + podNodeSelector: map[string]string{"infra": "false"}, + mergedNodeSelector: map[string]string{"infra": "false"}, + admit: false, + testName: "Conflicting pod and project node selector, one label", + }, + { + defaultNodeSelector: "env=dev", + projectNodeSelector: "infra=false, env = test", + podNodeSelector: map[string]string{"env": "dev", "color": "blue"}, + mergedNodeSelector: map[string]string{"env": "dev", "color": "blue"}, + admit: false, + testName: "Conflicting pod and project node selector, multiple labels", + }, + } + for _, test := range tests { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(namespace) + handler.nsLister = corev1listers.NewNamespaceLister(indexer) + handler.nsListerSynced = func() bool { return true } + handler.defaultNodeSelector = test.defaultNodeSelector + + if !test.ignoreProjectNodeSelector { + namespace.ObjectMeta.Annotations = map[string]string{projectv1.ProjectNodeSelector: test.projectNodeSelector} + } + pod.Spec = kapi.PodSpec{NodeSelector: test.podNodeSelector} + + attrs := admission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "testProject", namespace.ObjectMeta.Name, kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, nil) + err := handler.Admit(context.TODO(), attrs, nil) + if test.admit && err != nil { + t.Errorf("Test: %s, expected no error but got: %s", test.testName, err) + } else if !test.admit && err == nil { + t.Errorf("Test: %s, expected an error", test.testName) + } else if err == nil { + if err := handler.Validate(context.TODO(), attrs, nil); err != nil { + t.Errorf("Test: %s, unexpected Validate error after Admit succeeded: %v", test.testName, err) + } + } + + if !labelselector.Equals(test.mergedNodeSelector, pod.Spec.NodeSelector) { + t.Errorf("Test: %s, expected: %s but got: %s", test.testName, test.mergedNodeSelector, pod.Spec.NodeSelector) + } else if len(test.projectNodeSelector) > 0 { + firstProjectKey := strings.TrimSpace(strings.Split(test.projectNodeSelector, "=")[0]) + delete(pod.Spec.NodeSelector, firstProjectKey) + if err := handler.Validate(context.TODO(), attrs, nil); err == nil { + t.Errorf("Test: %s, expected Validate error after removing project key %q", test.testName, firstProjectKey) + } + } + } +} + +func TestHandles(t *testing.T) { + for op, shouldHandle := range map[admission.Operation]bool{ + admission.Create: true, + admission.Update: false, + admission.Connect: false, + admission.Delete: false, + } { + nodeEnvionment, err := NewPodNodeEnvironment() + if err != nil { + t.Errorf("%v: error getting node environment: %v", op, err) + continue + } + + if e, a := shouldHandle, nodeEnvionment.Handles(op); e != a { + t.Errorf("%v: shouldHandle=%t, handles=%t", op, e, a) + } + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go new file mode 100644 index 0000000000000..534905cb06120 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/intializers.go @@ -0,0 +1,28 @@ +package nodeenv + +import ( + "k8s.io/apiserver/pkg/admission" +) + +func NewInitializer(defaultNodeSelector string) admission.PluginInitializer { + return &localInitializer{ + defaultNodeSelector: defaultNodeSelector, + } +} + +type WantsDefaultNodeSelector interface { + SetDefaultNodeSelector(string) + admission.InitializationValidator +} + +type localInitializer struct { + defaultNodeSelector string +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsDefaultNodeSelector); ok { + wants.SetDefaultNodeSelector(i.defaultNodeSelector) + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector.go new file mode 100644 index 0000000000000..046d253396c98 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector.go @@ -0,0 +1,359 @@ +// labelselector is trim down version of k8s/pkg/labels/selector.go +// It only accepts exact label matches +// Example: "k1=v1, k2 = v2" +package labelselector + +import ( + "fmt" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// constants definition for lexer token +type Token int + +const ( + ErrorToken Token = iota + EndOfStringToken + CommaToken + EqualsToken + IdentifierToken // to represent keys and values +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ",": CommaToken, + "=": EqualsToken, +} + +// ScannedItem are the item produced by the lexer. It contains the Token and the literal. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', ',': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIdOrKeyword scans string to recognize literal token or an identifier. +func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators: "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIdOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead() (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + return tok, lit +} + +// consume returns current token and string. Increments the the position +func (p *Parser) consume() (Token, string) { + p.position++ + if p.position > len(p.scannedItems) { + return EndOfStringToken, "" + } + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of map[key]value. +func (p *Parser) parse() (map[string]string, error) { + p.scan() // init scannedItems + + labelsMap := map[string]string{} + for { + tok, lit := p.lookahead() + switch tok { + case IdentifierToken: + key, value, err := p.parseLabel() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + labelsMap[key] = value + t, l := p.consume() + switch t { + case EndOfStringToken: + return labelsMap, nil + case CommaToken: + t2, l2 := p.lookahead() + if t2 != IdentifierToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return labelsMap, nil + default: + return nil, fmt.Errorf("found '%s', expected: identifier or 'end of string'", lit) + } + } +} + +func (p *Parser) parseLabel() (string, string, error) { + key, err := p.parseKey() + if err != nil { + return "", "", err + } + op, err := p.parseOperator() + if err != nil { + return "", "", err + } + if op != "=" { + return "", "", fmt.Errorf("invalid operator: %s, expected: '='", op) + } + value, err := p.parseExactValue() + if err != nil { + return "", "", err + } + return key, value, nil +} + +// parseKey parse literals. +func (p *Parser) parseKey() (string, error) { + tok, literal := p.consume() + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", err + } + if err := validateLabelKey(literal); err != nil { + return "", err + } + return literal, nil +} + +// parseOperator returns operator +func (p *Parser) parseOperator() (op string, err error) { + tok, lit := p.consume() + switch tok { + case EqualsToken: + op = "=" + default: + return "", fmt.Errorf("found '%s', expected: '='", lit) + } + return op, nil +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (string, error) { + tok, lit := p.consume() + if tok != IdentifierToken && tok != EndOfStringToken { + return "", fmt.Errorf("found '%s', expected: identifier", lit) + } + if err := validateLabelValue(lit); err != nil { + return "", err + } + return lit, nil +} + +// Parse takes a string representing a selector and returns +// map[key]value, or an error. +// The input will cause an error if it does not follow this form: +// +// ::= [ | "," ] +// ::= KEY "=" VALUE +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 64 character. +// Delimiter is white space: (' ', '\t') +// +// +func Parse(selector string) (map[string]string, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + labels, error := p.parse() + if error != nil { + return map[string]string{}, error + } + return labels, nil +} + +// Conflicts takes 2 maps +// returns true if there a key match between the maps but the value doesn't match +// returns false in other cases +func Conflicts(labels1, labels2 map[string]string) bool { + for k, v := range labels1 { + if val, match := labels2[k]; match { + if val != v { + return true + } + } + } + return false +} + +// Merge combines given maps +// Note: It doesn't not check for any conflicts between the maps +func Merge(labels1, labels2 map[string]string) map[string]string { + mergedMap := map[string]string{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 map[string]string) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +const qualifiedNameErrorMsg string = "must match format [ DNS 1123 subdomain / ] DNS 1123 label" + +func validateLabelKey(k string) error { + if len(kvalidation.IsQualifiedName(k)) != 0 { + return field.Invalid(field.NewPath("label key"), k, qualifiedNameErrorMsg) + } + return nil +} + +func validateLabelValue(v string) error { + if len(kvalidation.IsValidLabelValue(v)) != 0 { + return field.Invalid(field.NewPath("label value"), v, qualifiedNameErrorMsg) + } + return nil +} diff --git a/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector_test.go b/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector_test.go new file mode 100644 index 0000000000000..f0729ae1b27a5 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/nodeenv/labelselector/labelselector_test.go @@ -0,0 +1,181 @@ +package labelselector + +import ( + "testing" +) + +func TestLabelSelectorParse(t *testing.T) { + tests := []struct { + selector string + labels map[string]string + valid bool + }{ + { + selector: "", + labels: map[string]string{}, + valid: true, + }, + { + selector: " ", + labels: map[string]string{}, + valid: true, + }, + { + selector: "x=a", + labels: map[string]string{"x": "a"}, + valid: true, + }, + { + selector: "x=a,y=b,z=c", + labels: map[string]string{"x": "a", "y": "b", "z": "c"}, + valid: true, + }, + { + selector: "x = a, y=b ,z = c ", + labels: map[string]string{"x": "a", "y": "b", "z": "c"}, + valid: true, + }, + { + selector: "color=green, env = test ,service= front ", + labels: map[string]string{"color": "green", "env": "test", "service": "front"}, + valid: true, + }, + { + selector: ",", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x,y", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x=$y", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x!=y", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x==y", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x=a||y=b", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x in (y)", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x notin (y)", + labels: map[string]string{}, + valid: false, + }, + { + selector: "x y", + labels: map[string]string{}, + valid: false, + }, + { + selector: "node-role.kubernetes.io/infra=", + labels: map[string]string{"node-role.kubernetes.io/infra": ""}, + valid: true, + }, + } + for _, test := range tests { + labels, err := Parse(test.selector) + if test.valid && err != nil { + t.Errorf("selector: %s, expected no error but got: %s", test.selector, err) + } else if !test.valid && err == nil { + t.Errorf("selector: %s, expected an error", test.selector) + } + + if !Equals(labels, test.labels) { + t.Errorf("expected: %s but got: %s", test.labels, labels) + } + } +} + +func TestLabelConflict(t *testing.T) { + tests := []struct { + labels1 map[string]string + labels2 map[string]string + conflict bool + }{ + { + labels1: map[string]string{}, + labels2: map[string]string{}, + conflict: false, + }, + { + labels1: map[string]string{"env": "test"}, + labels2: map[string]string{"infra": "true"}, + conflict: false, + }, + { + labels1: map[string]string{"env": "test"}, + labels2: map[string]string{"infra": "true", "env": "test"}, + conflict: false, + }, + { + labels1: map[string]string{"env": "test"}, + labels2: map[string]string{"env": "dev"}, + conflict: true, + }, + { + labels1: map[string]string{"env": "test", "infra": "false"}, + labels2: map[string]string{"infra": "true", "color": "blue"}, + conflict: true, + }, + } + for _, test := range tests { + conflict := Conflicts(test.labels1, test.labels2) + if conflict != test.conflict { + t.Errorf("expected: %v but got: %v", test.conflict, conflict) + } + } +} + +func TestLabelMerge(t *testing.T) { + tests := []struct { + labels1 map[string]string + labels2 map[string]string + mergedLabels map[string]string + }{ + { + labels1: map[string]string{}, + labels2: map[string]string{}, + mergedLabels: map[string]string{}, + }, + { + labels1: map[string]string{"infra": "true"}, + labels2: map[string]string{}, + mergedLabels: map[string]string{"infra": "true"}, + }, + { + labels1: map[string]string{"infra": "true"}, + labels2: map[string]string{"env": "test", "color": "blue"}, + mergedLabels: map[string]string{"infra": "true", "env": "test", "color": "blue"}, + }, + } + for _, test := range tests { + mergedLabels := Merge(test.labels1, test.labels2) + if !Equals(mergedLabels, test.mergedLabels) { + t.Errorf("expected: %v but got: %v", test.mergedLabels, mergedLabels) + } + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go new file mode 100644 index 0000000000000..05ef26277fcac --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission.go @@ -0,0 +1,205 @@ +package podnodeconstraints + +import ( + "context" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" + coreapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" + + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" + v1 "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints/v1" +) + +const PluginName = "scheduling.openshift.io/PodNodeConstraints" + +func Register(plugins *admission.Plugins) { + plugins.Register(PluginName, + func(config io.Reader) (admission.Interface, error) { + pluginConfig, err := readConfig(config) + if err != nil { + return nil, err + } + if pluginConfig == nil { + klog.Infof("Admission plugin %q is not configured so it will be disabled.", PluginName) + return nil, nil + } + return NewPodNodeConstraints(pluginConfig, nodeidentifier.NewDefaultNodeIdentifier()), nil + }) +} + +// NewPodNodeConstraints creates a new admission plugin to prevent objects that contain pod templates +// from containing node bindings by name or selector based on role permissions. +func NewPodNodeConstraints(config *podnodeconstraints.PodNodeConstraintsConfig, nodeIdentifier nodeidentifier.NodeIdentifier) admission.Interface { + plugin := podNodeConstraints{ + config: config, + Handler: admission.NewHandler(admission.Create, admission.Update), + nodeIdentifier: nodeIdentifier, + } + if config != nil { + plugin.selectorLabelBlacklist = sets.NewString(config.NodeSelectorLabelBlacklist...) + } + + return &plugin +} + +type podNodeConstraints struct { + *admission.Handler + selectorLabelBlacklist sets.String + config *podnodeconstraints.PodNodeConstraintsConfig + authorizer authorizer.Authorizer + nodeIdentifier nodeidentifier.NodeIdentifier +} + +var _ = initializer.WantsAuthorizer(&podNodeConstraints{}) +var _ = admission.ValidationInterface(&podNodeConstraints{}) + +func shouldCheckResource(resource schema.GroupResource, kind schema.GroupKind) (bool, error) { + expectedKind, shouldCheck := resourcesToCheck[resource] + if !shouldCheck { + return false, nil + } + if expectedKind != kind { + return false, fmt.Errorf("Unexpected resource kind %v for resource %v", &kind, &resource) + } + return true, nil +} + +// resourcesToCheck is a map of resources and corresponding kinds of things that we want handled in this plugin +var resourcesToCheck = map[schema.GroupResource]schema.GroupKind{ + coreapi.Resource("pods"): coreapi.Kind("Pod"), +} + +func readConfig(reader io.Reader) (*podnodeconstraints.PodNodeConstraintsConfig, error) { + obj, err := helpers.ReadYAMLToInternal(reader, podnodeconstraints.Install, v1.Install) + if err != nil { + return nil, err + } + if obj == nil { + return nil, nil + } + config, ok := obj.(*podnodeconstraints.PodNodeConstraintsConfig) + if !ok { + return nil, fmt.Errorf("unexpected config object: %#v", obj) + } + // No validation needed since config is just list of strings + return config, nil +} + +func (o *podNodeConstraints) Validate(ctx context.Context, attr admission.Attributes, _ admission.ObjectInterfaces) error { + switch { + case o.config == nil, + attr.GetSubresource() != "": + return nil + } + shouldCheck, err := shouldCheckResource(attr.GetResource().GroupResource(), attr.GetKind().GroupKind()) + if err != nil { + return err + } + if !shouldCheck { + return nil + } + // Only check Create operation on pods + if attr.GetResource().GroupResource() == coreapi.Resource("pods") && attr.GetOperation() != admission.Create { + return nil + } + + return o.validatePodSpec(ctx, attr, attr.GetObject().(*coreapi.Pod).Spec) +} + +// validate PodSpec if NodeName or NodeSelector are specified +func (o *podNodeConstraints) validatePodSpec(ctx context.Context, attr admission.Attributes, ps coreapi.PodSpec) error { + // a node creating a mirror pod that targets itself is allowed + // see the NodeRestriction plugin for further details + if o.isNodeSelfTargetWithMirrorPod(attr, ps.NodeName) { + return nil + } + + matchingLabels := []string{} + // nodeSelector blacklist filter + for nodeSelectorLabel := range ps.NodeSelector { + if o.selectorLabelBlacklist.Has(nodeSelectorLabel) { + matchingLabels = append(matchingLabels, nodeSelectorLabel) + } + } + // nodeName constraint + if len(ps.NodeName) > 0 || len(matchingLabels) > 0 { + allow, err := o.checkPodsBindAccess(ctx, attr) + if err != nil { + return err + } + if !allow { + switch { + case len(ps.NodeName) > 0 && len(matchingLabels) == 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by nodeName is prohibited by policy for your role")) + case len(ps.NodeName) == 0 && len(matchingLabels) > 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by label(s) %v is prohibited by policy for your role", matchingLabels)) + case len(ps.NodeName) > 0 && len(matchingLabels) > 0: + return admission.NewForbidden(attr, fmt.Errorf("node selection by nodeName and label(s) %v is prohibited by policy for your role", matchingLabels)) + } + } + } + return nil +} + +func (o *podNodeConstraints) SetAuthorizer(a authorizer.Authorizer) { + o.authorizer = a +} + +func (o *podNodeConstraints) ValidateInitialization() error { + if o.authorizer == nil { + return fmt.Errorf("%s requires an authorizer", PluginName) + } + if o.nodeIdentifier == nil { + return fmt.Errorf("%s requires a node identifier", PluginName) + } + return nil +} + +// build LocalSubjectAccessReview struct to validate role via checkAccess +func (o *podNodeConstraints) checkPodsBindAccess(ctx context.Context, attr admission.Attributes) (bool, error) { + authzAttr := authorizer.AttributesRecord{ + User: attr.GetUserInfo(), + Verb: "create", + Namespace: attr.GetNamespace(), + Resource: "pods", + Subresource: "binding", + APIGroup: coreapi.GroupName, + ResourceRequest: true, + } + if attr.GetResource().GroupResource() == coreapi.Resource("pods") { + authzAttr.Name = attr.GetName() + } + authorized, _, err := o.authorizer.Authorize(ctx, authzAttr) + return authorized == authorizer.DecisionAllow, err +} + +func (o *podNodeConstraints) isNodeSelfTargetWithMirrorPod(attr admission.Attributes, nodeName string) bool { + // make sure we are actually trying to target a node + if len(nodeName) == 0 { + return false + } + // this check specifically requires the object to be pod (unlike the other checks where we want any pod spec) + pod, ok := attr.GetObject().(*coreapi.Pod) + if !ok { + return false + } + // note that anyone can create a mirror pod, but they are not privileged in any way + // they are actually highly constrained since they cannot reference secrets + // nodes can only create and delete them, and they will delete any "orphaned" mirror pods + if _, isMirrorPod := pod.Annotations[coreapi.MirrorPodAnnotationKey]; !isMirrorPod { + return false + } + // we are targeting a node with a mirror pod + // confirm the user is a node that is targeting itself + actualNodeName, isNode := o.nodeIdentifier.NodeIdentity(attr.GetUserInfo()) + return isNode && actualNodeName == nodeName +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go new file mode 100644 index 0000000000000..a5587c5d0ee88 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/admission_test.go @@ -0,0 +1,283 @@ +package podnodeconstraints + +import ( + "bytes" + "context" + "fmt" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/apis/podnodeconstraints" +) + +func TestPodNodeConstraints(t *testing.T) { + ns := metav1.NamespaceDefault + tests := []struct { + config *podnodeconstraints.PodNodeConstraintsConfig + resource runtime.Object + kind schema.GroupKind + groupresource schema.GroupResource + userinfo user.Info + reviewResponse *authorizationv1.SubjectAccessReviewResponse + expectedResource string + expectedErrorMsg string + }{ + // 0: expect unspecified defaults to not error + { + config: emptyConfig(), + resource: defaultPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 1: expect nodeSelector to error with user which lacks "pods/binding" access + { + config: testConfig(), + resource: nodeSelectorPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by label(s) [bogus] is prohibited by policy for your role", + }, + // 2: expect nodeName to fail with user that lacks "pods/binding" access + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: serviceaccount.UserInfo("herpy", "derpy", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 3: expect nodeName and nodeSelector to fail with user that lacks "pods/binding" access + { + config: testConfig(), + resource: nodeNameNodeSelectorPod(), + userinfo: serviceaccount.UserInfo("herpy", "derpy", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "node selection by nodeName and label(s) [bogus] is prohibited by policy for your role", + }, + // 4: expect nodeSelector to succeed with user that has "pods/binding" access + { + config: testConfig(), + resource: nodeSelectorPod(), + userinfo: serviceaccount.UserInfo("openshift-infra", "daemonset-controller", ""), + reviewResponse: reviewResponse(true, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 5: expect nodeName to succeed with user that has "pods/binding" access + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: serviceaccount.UserInfo("openshift-infra", "daemonset-controller", ""), + reviewResponse: reviewResponse(true, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 6: expect nil config to bypass admission + { + config: nil, + resource: defaultPod(), + userinfo: serviceaccount.UserInfo("", "", ""), + reviewResponse: reviewResponse(false, ""), + expectedResource: "pods/binding", + expectedErrorMsg: "", + }, + // 7: expect nodeName to succeed with node user self targeting mirror pod + { + config: testConfig(), + resource: nodeNameMirrorPod(), + userinfo: &user.DefaultInfo{Name: "system:node:frank", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "", + }, + // 8: expect nodeName to fail with node user self targeting non-mirror pod + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: &user.DefaultInfo{Name: "system:node:frank", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 9: expect nodeName to fail with node user non-self targeting mirror pod + { + config: testConfig(), + resource: nodeNameMirrorPod(), + userinfo: &user.DefaultInfo{Name: "system:node:bob", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + // 10: expect nodeName to fail with node user non-self targeting non-mirror pod + { + config: testConfig(), + resource: nodeNamePod(), + userinfo: &user.DefaultInfo{Name: "system:node:bob", Groups: []string{user.NodesGroup}}, + expectedErrorMsg: "node selection by nodeName is prohibited by policy for your role", + }, + } + for i, tc := range tests { + var expectedError error + errPrefix := fmt.Sprintf("%d", i) + prc := NewPodNodeConstraints(tc.config, nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + continue + } + attrs := admission.NewAttributesRecord(tc.resource, nil, kapi.Kind("Pod").WithVersion("version"), ns, "test", kapi.Resource("pods").WithVersion("version"), "", admission.Create, nil, false, tc.userinfo) + if tc.expectedErrorMsg != "" { + expectedError = admission.NewForbidden(attrs, fmt.Errorf(tc.expectedErrorMsg)) + } + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) + } +} + +func TestPodNodeConstraintsPodUpdate(t *testing.T) { + ns := metav1.NamespaceDefault + var expectedError error + errPrefix := "PodUpdate" + prc := NewPodNodeConstraints(testConfig(), nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + return + } + attrs := admission.NewAttributesRecord(nodeNamePod(), nodeNamePod(), kapi.Kind("Pod").WithVersion("version"), ns, "test", kapi.Resource("pods").WithVersion("version"), "", admission.Update, nil, false, serviceaccount.UserInfo("", "", "")) + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) +} + +func TestPodNodeConstraintsNonHandledResources(t *testing.T) { + ns := metav1.NamespaceDefault + errPrefix := "ResourceQuotaTest" + var expectedError error + prc := NewPodNodeConstraints(testConfig(), nodeidentifier.NewDefaultNodeIdentifier()) + prc.(initializer.WantsAuthorizer).SetAuthorizer(fakeAuthorizer(t)) + err := prc.(admission.InitializationValidator).ValidateInitialization() + if err != nil { + checkAdmitError(t, err, expectedError, errPrefix) + return + } + attrs := admission.NewAttributesRecord(resourceQuota(), nil, kapi.Kind("ResourceQuota").WithVersion("version"), ns, "test", kapi.Resource("resourcequotas").WithVersion("version"), "", admission.Create, nil, false, serviceaccount.UserInfo("", "", "")) + err = prc.(admission.ValidationInterface).Validate(context.TODO(), attrs, nil) + checkAdmitError(t, err, expectedError, errPrefix) +} + +func emptyConfig() *podnodeconstraints.PodNodeConstraintsConfig { + return &podnodeconstraints.PodNodeConstraintsConfig{} +} + +func testConfig() *podnodeconstraints.PodNodeConstraintsConfig { + return &podnodeconstraints.PodNodeConstraintsConfig{ + NodeSelectorLabelBlacklist: []string{"bogus"}, + } +} + +func defaultPod() *kapi.Pod { + pod := &kapi.Pod{} + return pod +} + +func nodeNameNodeSelectorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeName = "frank" + pod.Spec.NodeSelector = map[string]string{"bogus": "frank"} + return pod +} + +func nodeNamePod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeName = "frank" + return pod +} + +func nodeNameMirrorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Annotations = map[string]string{kapi.MirrorPodAnnotationKey: "true"} + pod.Spec.NodeName = "frank" + return pod +} + +func nodeSelectorPod() *kapi.Pod { + pod := &kapi.Pod{} + pod.Spec.NodeSelector = map[string]string{"bogus": "frank"} + return pod +} + +func resourceQuota() runtime.Object { + rq := &kapi.ResourceQuota{} + return rq +} + +func checkAdmitError(t *testing.T, err error, expectedError error, prefix string) { + switch { + case expectedError == nil && err == nil: + // continue + case expectedError != nil && err != nil && err.Error() != expectedError.Error(): + t.Errorf("%s: expected error %q, got: %q", prefix, expectedError.Error(), err.Error()) + case expectedError == nil && err != nil: + t.Errorf("%s: expected no error, got: %q", prefix, err.Error()) + case expectedError != nil && err == nil: + t.Errorf("%s: expected error %q, no error received", prefix, expectedError.Error()) + } +} + +type fakeTestAuthorizer struct { + t *testing.T +} + +func fakeAuthorizer(t *testing.T) authorizer.Authorizer { + return &fakeTestAuthorizer{ + t: t, + } +} + +func (a *fakeTestAuthorizer) Authorize(_ context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + ui := attributes.GetUser() + if ui == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("No valid UserInfo for Context") + } + // User with pods/bindings. permission: + if ui.GetName() == "system:serviceaccount:openshift-infra:daemonset-controller" { + return authorizer.DecisionAllow, "", nil + } + // User without pods/bindings. permission: + return authorizer.DecisionNoOpinion, "", nil +} + +func reviewResponse(allowed bool, msg string) *authorizationv1.SubjectAccessReviewResponse { + return &authorizationv1.SubjectAccessReviewResponse{ + Allowed: allowed, + Reason: msg, + } +} + +func TestReadConfig(t *testing.T) { + configStr := `apiVersion: scheduling.openshift.io/v1 +kind: PodNodeConstraintsConfig +nodeSelectorLabelBlacklist: + - bogus + - foo +` + buf := bytes.NewBufferString(configStr) + config, err := readConfig(buf) + if err != nil { + t.Fatalf("unexpected error reading config: %v", err) + } + if len(config.NodeSelectorLabelBlacklist) == 0 { + t.Fatalf("NodeSelectorLabelBlacklist didn't take specified value") + } +} diff --git a/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go new file mode 100644 index 0000000000000..dfdf50a8102f0 --- /dev/null +++ b/openshift-kube-apiserver/admission/scheduler/podnodeconstraints/doc.go @@ -0,0 +1,44 @@ +/* +Package podnodeconstraints contains the PodNodeConstraints admission +control plugin. This plugin allows administrators to set policy +governing the use of the NodeName and NodeSelector attributes in pod +specs. + +Enabling this plugin will prevent the use of the NodeName field in Pod +templates for users and serviceaccounts which lack the "pods/binding" +permission, and which don't belong to groups which have the +"pods/binding" permission. + +This plugin will also prevent users, serviceaccounts and groups which +lack the "pods/binding" permission from specifying the NodeSelector field +in Pod templates for labels which appear in the +nodeSelectorLabelBlacklist list field. + +Configuration + +The plugin is configured via a PodNodeConstraintsConfig object in the +origin and kubernetes Master configs: + +admissionConfig: + pluginConfig: + PodNodeConstraints: + configuration: + apiVersion: v1 + kind: PodNodeConstraintsConfig + nodeSelectorLabelBlacklist: + - label1 + - label2 +... +kubernetesMasterConfig: + admissionConfig: + pluginConfig: + PodNodeConstraints: + configuration: + apiVersion: v1 + kind: PodNodeConstraintsConfig + nodeSelectorLabelBlacklist: + - label1 + - label2 +*/ + +package podnodeconstraints diff --git a/openshift-kube-apiserver/authentication/oauth/bootstrapauthenticator.go b/openshift-kube-apiserver/authentication/oauth/bootstrapauthenticator.go new file mode 100644 index 0000000000000..3d4fa46511f3b --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/bootstrapauthenticator.go @@ -0,0 +1,102 @@ +package oauth + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kauthenticator "k8s.io/apiserver/pkg/authentication/authenticator" + kuser "k8s.io/apiserver/pkg/authentication/user" + + authorizationv1 "github.com/openshift/api/authorization/v1" + userv1 "github.com/openshift/api/user/v1" + oauthclient "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1" + bootstrap "github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator" +) + +const ClusterAdminGroup = "system:cluster-admins" + +type bootstrapAuthenticator struct { + tokens oauthclient.OAuthAccessTokenInterface + getter bootstrap.BootstrapUserDataGetter + validator OAuthTokenValidator + implicitAudiences kauthenticator.Audiences +} + +func NewBootstrapAuthenticator(tokens oauthclient.OAuthAccessTokenInterface, getter bootstrap.BootstrapUserDataGetter, implicitAudiences kauthenticator.Audiences, validators ...OAuthTokenValidator) kauthenticator.Token { + return &bootstrapAuthenticator{ + tokens: tokens, + getter: getter, + validator: OAuthTokenValidators(validators), + implicitAudiences: implicitAudiences, + } +} + +func (a *bootstrapAuthenticator) AuthenticateToken(ctx context.Context, name string) (*kauthenticator.Response, bool, error) { + token, err := a.tokens.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, false, errLookup // mask the error so we do not leak token data in logs + } + + if token.UserName != bootstrap.BootstrapUser { + return nil, false, nil + } + + data, ok, err := a.getter.Get() + if err != nil || !ok { + return nil, ok, err + } + + // this allows us to reuse existing validators + // since the uid is based on the secret, if the secret changes, all + // tokens issued for the bootstrap user before that change stop working + fakeUser := &userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID(data.UID), + }, + } + + if err := a.validator.Validate(token, fakeUser); err != nil { + return nil, false, err + } + + tokenAudiences := a.implicitAudiences + requestedAudiences, ok := kauthenticator.AudiencesFrom(ctx) + if !ok { + // default to apiserver audiences + requestedAudiences = a.implicitAudiences + } + + auds := kauthenticator.Audiences(tokenAudiences).Intersect(requestedAudiences) + if len(auds) == 0 && len(a.implicitAudiences) != 0 { + return nil, false, fmt.Errorf("token audiences %q is invalid for the target audiences %q", tokenAudiences, requestedAudiences) + } + + // we explicitly do not set UID as we do not want to leak any derivative of the password + return &kauthenticator.Response{ + Audiences: auds, + User: &kuser.DefaultInfo{ + Name: bootstrap.BootstrapUser, + // we cannot use SystemPrivilegedGroup because it cannot be properly scoped. + // see openshift/origin#18922 and how loopback connections are handled upstream via AuthorizeClientBearerToken. + // api aggregation with delegated authorization makes this impossible to control, see WithAlwaysAllowGroups. + // an openshift specific cluster role binding binds ClusterAdminGroup to the cluster role cluster-admin. + // thus this group is authorized to do everything via RBAC. + // this does make the bootstrap user susceptible to anything that causes the RBAC authorizer to fail. + // this is a safe trade-off because scopes must always be evaluated before RBAC for them to work at all. + // a failure in that logic means scopes are broken instead of a specific failure related to the bootstrap user. + // if this becomes a problem in the future, we could generate a custom extra value based on the secret content + // and store it in BootstrapUserData, similar to how UID is calculated. this extra value would then be wired + // to a custom authorizer that allows all actions. the problem with such an approach is that since we do not + // allow remote authorizers in OpenShift, the BootstrapUserDataGetter logic would have to be shared between the + // the kube api server and osin instead of being an implementation detail hidden inside of osin. currently the + // only shared code is the value of the BootstrapUser constant (since it is special cased in validation). + Groups: []string{ClusterAdminGroup}, + Extra: map[string][]string{ + // this user still needs scopes because it can be used in OAuth flows (unlike cert based users) + authorizationv1.ScopesKey: token.Scopes, + }, + }, + }, true, nil +} diff --git a/openshift-kube-apiserver/authentication/oauth/expirationvalidator.go b/openshift-kube-apiserver/authentication/oauth/expirationvalidator.go new file mode 100644 index 0000000000000..67e2f26e2fbc8 --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/expirationvalidator.go @@ -0,0 +1,31 @@ +package oauth + +import ( + "errors" + "time" + + oauthv1 "github.com/openshift/api/oauth/v1" + userv1 "github.com/openshift/api/user/v1" +) + +var errExpired = errors.New("token is expired") + +func NewExpirationValidator() OAuthTokenValidator { + return OAuthTokenValidatorFunc( + func(token *oauthv1.OAuthAccessToken, _ *userv1.User) error { + if token.ExpiresIn > 0 { + if expire(token).Before(time.Now()) { + return errExpired + } + } + if token.DeletionTimestamp != nil { + return errExpired + } + return nil + }, + ) +} + +func expire(token *oauthv1.OAuthAccessToken) time.Time { + return token.CreationTimestamp.Add(time.Duration(token.ExpiresIn) * time.Second) +} diff --git a/openshift-kube-apiserver/authentication/oauth/expirationvalidator_test.go b/openshift-kube-apiserver/authentication/oauth/expirationvalidator_test.go new file mode 100644 index 0000000000000..f538d1a237565 --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/expirationvalidator_test.go @@ -0,0 +1,72 @@ +package oauth + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + oauthv1 "github.com/openshift/api/oauth/v1" + userv1 "github.com/openshift/api/user/v1" + oauthfake "github.com/openshift/client-go/oauth/clientset/versioned/fake" + userfake "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func TestAuthenticateTokenExpired(t *testing.T) { + fakeOAuthClient := oauthfake.NewSimpleClientset( + // expired token that had a lifetime of 10 minutes + &oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "token1", CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)}}, + ExpiresIn: 600, + UserName: "foo", + }, + // non-expired token that has a lifetime of 10 minutes, but has a non-nil deletion timestamp + &oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "token2", CreationTimestamp: metav1.Time{Time: time.Now()}, DeletionTimestamp: &metav1.Time{}}, + ExpiresIn: 600, + UserName: "foo", + }, + ) + fakeUserClient := userfake.NewSimpleClientset(&userv1.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar"}}) + + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, nil, NewExpirationValidator()) + + for _, tokenName := range []string{"token1", "token2"} { + userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), tokenName) + if found { + t.Error("Found token, but it should be missing!") + } + if err != errExpired { + t.Errorf("Unexpected error: %v", err) + } + if userInfo != nil { + t.Errorf("Unexpected user: %v", userInfo) + } + } +} + +func TestAuthenticateTokenValidated(t *testing.T) { + fakeOAuthClient := oauthfake.NewSimpleClientset( + &oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "token", CreationTimestamp: metav1.Time{Time: time.Now()}}, + ExpiresIn: 600, // 10 minutes + UserName: "foo", + UserUID: string("bar"), + }, + ) + fakeUserClient := userfake.NewSimpleClientset(&userv1.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar"}}) + + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, nil, NewExpirationValidator(), NewUIDValidator()) + + userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") + if !found { + t.Error("Did not find a token!") + } + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if userInfo == nil { + t.Error("Did not get a user!") + } +} diff --git a/openshift-kube-apiserver/authentication/oauth/interfaces.go b/openshift-kube-apiserver/authentication/oauth/interfaces.go new file mode 100644 index 0000000000000..002f30421cf1d --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/interfaces.go @@ -0,0 +1,41 @@ +package oauth + +import ( + oauthv1 "github.com/openshift/api/oauth/v1" + userv1 "github.com/openshift/api/user/v1" +) + +type OAuthTokenValidator interface { + Validate(token *oauthv1.OAuthAccessToken, user *userv1.User) error +} + +var _ OAuthTokenValidator = OAuthTokenValidatorFunc(nil) + +type OAuthTokenValidatorFunc func(token *oauthv1.OAuthAccessToken, user *userv1.User) error + +func (f OAuthTokenValidatorFunc) Validate(token *oauthv1.OAuthAccessToken, user *userv1.User) error { + return f(token, user) +} + +var _ OAuthTokenValidator = OAuthTokenValidators(nil) + +type OAuthTokenValidators []OAuthTokenValidator + +func (v OAuthTokenValidators) Validate(token *oauthv1.OAuthAccessToken, user *userv1.User) error { + for _, validator := range v { + if err := validator.Validate(token, user); err != nil { + return err + } + } + return nil +} + +type UserToGroupMapper interface { + GroupsFor(username string) ([]*userv1.Group, error) +} + +type NoopGroupMapper struct{} + +func (n NoopGroupMapper) GroupsFor(username string) ([]*userv1.Group, error) { + return []*userv1.Group{}, nil +} diff --git a/openshift-kube-apiserver/authentication/oauth/rankedset/rankedset.go b/openshift-kube-apiserver/authentication/oauth/rankedset/rankedset.go new file mode 100644 index 0000000000000..892fede13429a --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/rankedset/rankedset.go @@ -0,0 +1,162 @@ +package rankedset + +import "github.com/google/btree" + +// Item represents a single object in a RankedSet. +type Item interface { + // Key returns the unique identifier for this item. + Key() string + // Rank is used to sort items. + // Items with the same rank are sorted lexicographically based on Key. + Rank() int64 +} + +// RankedSet stores Items based on Key (uniqueness) and Rank (sorting). +type RankedSet struct { + rank *btree.BTree + set map[string]*treeItem +} + +// StringItem implements Item using a string. +// It has two main uses: +// 1. If all items in a RankedSet are StringItems, the set becomes a store of unique strings sorted lexicographically. +// 2. It serves as a Key item that can be passed into methods that ignore Rank such as RankedSet.Delete. +type StringItem string + +func (s StringItem) Key() string { + return string(s) +} + +func (s StringItem) Rank() int64 { + return 0 +} + +func New() *RankedSet { + return &RankedSet{ + rank: btree.New(32), + set: make(map[string]*treeItem), + } +} + +// Insert adds the item into the set. +// If an item with the same Key existed in the set, it is deleted and returned. +func (s *RankedSet) Insert(item Item) Item { + old := s.Delete(item) + + key := item.Key() + value := &treeItem{item: item} + + s.rank.ReplaceOrInsert(value) // should always return nil because we call Delete first + s.set[key] = value + + return old +} + +// Delete removes the item from the set based on Key (Rank is ignored). +// The removed item is returned if it existed in the set. +func (s *RankedSet) Delete(item Item) Item { + key := item.Key() + value, ok := s.set[key] + if !ok { + return nil + } + + s.rank.Delete(value) // should always return the same data as value (non-nil) + delete(s.set, key) + + return value.item +} + +func (s *RankedSet) Min() Item { + if min := s.rank.Min(); min != nil { + return min.(*treeItem).item + } + return nil +} + +func (s *RankedSet) Max() Item { + if max := s.rank.Max(); max != nil { + return max.(*treeItem).item + } + return nil +} + +func (s *RankedSet) Len() int { + return len(s.set) +} + +func (s *RankedSet) Get(item Item) Item { + if value, ok := s.set[item.Key()]; ok { + return value.item + } + return nil +} + +func (s *RankedSet) Has(item Item) bool { + _, ok := s.set[item.Key()] + return ok +} + +// List returns all items in the set in ranked order. +// If delete is set to true, the returned items are removed from the set. +func (s *RankedSet) List(delete bool) []Item { + return s.ascend( + func(item Item) bool { + return true + }, + delete, + ) +} + +// LessThan returns all items less than the given rank in ranked order. +// If delete is set to true, the returned items are removed from the set. +func (s *RankedSet) LessThan(rank int64, delete bool) []Item { + return s.ascend( + func(item Item) bool { + return item.Rank() < rank + }, + delete, + ) +} + +// setItemIterator allows callers of ascend to iterate in-order over the set. +// When this function returns false, iteration will stop. +type setItemIterator func(item Item) bool + +func (s *RankedSet) ascend(iterator setItemIterator, delete bool) []Item { + var items []Item + s.rank.Ascend(func(i btree.Item) bool { + item := i.(*treeItem).item + if !iterator(item) { + return false + } + items = append(items, item) + return true + }) + // delete after Ascend since it is probably not safe to remove while iterating + if delete { + for _, item := range items { + s.Delete(item) + } + } + return items +} + +var _ btree.Item = &treeItem{} + +type treeItem struct { + item Item +} + +func (i *treeItem) Less(than btree.Item) bool { + other := than.(*treeItem).item + + selfRank := i.item.Rank() + otherRank := other.Rank() + + if selfRank == otherRank { + return i.item.Key() < other.Key() + } + + return selfRank < otherRank +} diff --git a/openshift-kube-apiserver/authentication/oauth/rankedset/rankedset_test.go b/openshift-kube-apiserver/authentication/oauth/rankedset/rankedset_test.go new file mode 100644 index 0000000000000..92d56dc7aa63e --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/rankedset/rankedset_test.go @@ -0,0 +1,273 @@ +package rankedset + +import "testing" + +func TestRankedSet(t *testing.T) { + s := New() + a := newTestSetItem("A", 5, "AD") + b := newTestSetItem("B", 6, "BD") + c := newTestSetItem("C", 4, "CD") + d := newTestSetItem("D", 6, "DD") + e := newTestSetItem("E", 1, "ED") + + for _, tc := range []struct { + name string + f func(*testing.T) + }{ + { + name: "insert", + f: func(t *testing.T) { + assertLen(s, 0, t) + s.Insert(a) + assertLen(s, 1, t) + s.Insert(b) + assertLen(s, 2, t) + s.Insert(c) + assertLen(s, 3, t) + s.Insert(d) + assertLen(s, 4, t) + s.Insert(e) + assertLen(s, 5, t) + }, + }, + { + name: "list order", + f: func(t *testing.T) { + assertOrder(s.List(false), t, e, c, a, b, d) + assertItem(e, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "delete list order 1", + f: func(t *testing.T) { + assertItem(a, s.Delete(a), t) + assertOrder(s.List(false), t, e, c, b, d) + assertLen(s, 4, t) + assertItem(e, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "delete list order 2", + f: func(t *testing.T) { + assertItem(b, s.Delete(b), t) + assertOrder(s.List(false), t, e, c, d) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "has", + f: func(t *testing.T) { + assertHas("A", false, s, t) + assertHas("B", false, s, t) + assertHas("C", true, s, t) + assertHas("D", true, s, t) + assertHas("E", true, s, t) + assertHas("F", false, s, t) + }, + }, + { + name: "get", + f: func(t *testing.T) { + assertItem(nil, s.Get(StringItem("A")), t) + assertItem(nil, s.Get(StringItem("B")), t) + assertItem(c, s.Get(StringItem("C")), t) + assertItem(d, s.Get(StringItem("D")), t) + assertItem(e, s.Get(StringItem("E")), t) + assertItem(nil, s.Get(StringItem("F")), t) + }, + }, + { + name: "delete list order 3", + f: func(t *testing.T) { + assertItem(nil, s.Delete(b), t) + assertOrder(s.List(false), t, e, c, d) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "delete list order 4", + f: func(t *testing.T) { + assertItem(c, s.Delete(c), t) + assertOrder(s.List(false), t, e, d) + assertLen(s, 2, t) + assertItem(e, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "insert list order", + f: func(t *testing.T) { + assertItem(nil, s.Insert(a), t) + assertOrder(s.List(false), t, e, a, d) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "less than order", + f: func(t *testing.T) { + assertOrder(s.LessThan(6, false), t, e, a) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "less than order delete", + f: func(t *testing.T) { + assertOrder(s.LessThan(6, true), t, e, a) + assertLen(s, 1, t) + assertItem(d, s.Min(), t) + assertItem(d, s.Max(), t) + }, + }, + { + name: "list order delete", + f: func(t *testing.T) { + assertOrder(s.List(true), t, d) + assertLen(s, 0, t) + assertItem(nil, s.Min(), t) + assertItem(nil, s.Max(), t) + }, + }, + { + name: "insert min max", + f: func(t *testing.T) { + assertItem(nil, s.Insert(b), t) + assertItem(nil, s.Insert(a), t) + assertItem(nil, s.Insert(e), t) + assertOrder(s.List(false), t, e, a, b) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(b, s.Max(), t) + assertItem(e, s.Delete(e), t) + assertLen(s, 2, t) + assertItem(a, s.Min(), t) + assertItem(b, s.Max(), t) + }, + }, + { + name: "insert replace", + f: func(t *testing.T) { + a0 := newTestSetItem("A", 1, "AD0") + a1 := newTestSetItem("A", 2, "AD1") + a2 := newTestSetItem("A", 3, "AD2") + + assertItem(nil, s.Insert(e), t) + assertOrder(s.List(false), t, e, a, b) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(b, s.Max(), t) + + assertItem(a, s.Insert(a0), t) + assertOrder(s.List(false), t, a0, e, b) + assertLen(s, 3, t) + assertItem(a0, s.Min(), t) + assertItem(b, s.Max(), t) + + assertItem(a0, s.Insert(a1), t) + assertOrder(s.List(false), t, e, a1, b) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(b, s.Max(), t) + + assertItem(a1, s.Insert(a2), t) + assertOrder(s.List(false), t, e, a2, b) + assertLen(s, 3, t) + assertItem(e, s.Min(), t) + assertItem(b, s.Max(), t) + }, + }, + } { + t.Run(tc.name, tc.f) + } +} + +func assertLen(s *RankedSet, length int, t *testing.T) { + if s.Len() != length { + t.Errorf("%s expected len: %d got %d for %v", t.Name(), length, s.Len(), noPointerItems(s.List(false))) + } +} + +func assertOrder(actual []Item, t *testing.T, items ...*testSetItem) { + if len(items) != len(actual) { + t.Errorf("%s expected len: %d got %d for %v and %v", t.Name(), len(items), len(actual), noPointers(items), noPointerItems(actual)) + return + } + for i, item := range items { + if actualItem := actual[i].(*testSetItem); *item != *actualItem { + t.Errorf("%s expected item: %v got %v for idx %d", t.Name(), *item, *actualItem, i) + } + } +} + +func assertItem(item *testSetItem, actual Item, t *testing.T) { + itemNil := item == nil + actualNil := actual == nil + + if itemNil != actualNil { + t.Errorf("%s expected or actual is nil: %v vs %v", t.Name(), item, actual) + return + } + + if itemNil { + return + } + + if actualItem := actual.(*testSetItem); *item != *actualItem { + t.Errorf("%s expected item: %v got %v", t.Name(), *item, *actualItem) + } +} + +func assertHas(key string, expected bool, s *RankedSet, t *testing.T) { + if expected != s.Has(StringItem(key)) { + t.Errorf("%s expected %v for %s with %v", t.Name(), expected, key, noPointerItems(s.List(false))) + } +} + +func newTestSetItem(key string, rank int64, data string) *testSetItem { + return &testSetItem{ + key: key, + rank: rank, + data: data, + } +} + +type testSetItem struct { + key string + rank int64 + data string +} + +func (i *testSetItem) Key() string { + return i.key +} + +func (i *testSetItem) Rank() int64 { + return i.rank +} + +// funcs below make the printing of these slices better + +func noPointers(items []*testSetItem) []testSetItem { + var out []testSetItem + for _, item := range items { + out = append(out, *item) + } + return out +} + +func noPointerItems(items []Item) []testSetItem { + var out []testSetItem + for _, item := range items { + out = append(out, *(item.(*testSetItem))) + } + return out +} diff --git a/openshift-kube-apiserver/authentication/oauth/timeoutvalidator.go b/openshift-kube-apiserver/authentication/oauth/timeoutvalidator.go new file mode 100644 index 0000000000000..4d51502fd4569 --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/timeoutvalidator.go @@ -0,0 +1,233 @@ +package oauth + +import ( + "context" + "errors" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/klog/v2" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/runtime" + + oauthv1 "github.com/openshift/api/oauth/v1" + userv1 "github.com/openshift/api/user/v1" + oauthclient "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1" + oauthclientlister "github.com/openshift/client-go/oauth/listers/oauth/v1" + "k8s.io/kubernetes/openshift-kube-apiserver/authentication/oauth/rankedset" +) + +var errTimedout = errors.New("token timed out") + +// Implements rankedset.Item +var _ = rankedset.Item(&tokenData{}) + +type tokenData struct { + token *oauthv1.OAuthAccessToken + seen time.Time +} + +func (a *tokenData) timeout() time.Time { + return a.token.CreationTimestamp.Time.Add(time.Duration(a.token.InactivityTimeoutSeconds) * time.Second) +} + +func (a *tokenData) Key() string { + return a.token.Name +} + +func (a *tokenData) Rank() int64 { + return a.timeout().Unix() +} + +func timeoutAsDuration(timeout int32) time.Duration { + return time.Duration(timeout) * time.Second +} + +type TimeoutValidator struct { + oauthClients oauthclientlister.OAuthClientLister + tokens oauthclient.OAuthAccessTokenInterface + tokenChannel chan *tokenData + data *rankedset.RankedSet + defaultTimeout time.Duration + tickerInterval time.Duration + + // fields that are used to have a deterministic order of events in unit tests + flushHandler func(flushHorizon time.Time) // allows us to decorate this func during unit tests + putTokenHandler func(td *tokenData) // allows us to decorate this func during unit tests + clock clock.Clock // allows us to control time during unit tests +} + +func NewTimeoutValidator(tokens oauthclient.OAuthAccessTokenInterface, oauthClients oauthclientlister.OAuthClientLister, defaultTimeout int32, minValidTimeout int32) *TimeoutValidator { + a := &TimeoutValidator{ + oauthClients: oauthClients, + tokens: tokens, + tokenChannel: make(chan *tokenData), + data: rankedset.New(), + defaultTimeout: timeoutAsDuration(defaultTimeout), + tickerInterval: timeoutAsDuration(minValidTimeout / 3), // we tick at least 3 times within each timeout period + clock: clock.RealClock{}, + } + a.flushHandler = a.flush + a.putTokenHandler = a.putToken + klog.V(5).Infof("Token Timeout Validator primed with defaultTimeout=%s tickerInterval=%s", a.defaultTimeout, a.tickerInterval) + return a +} + +// Validate is called with a token when it is seen by an authenticator +// it touches only the tokenChannel so it is safe to call from other threads +func (a *TimeoutValidator) Validate(token *oauthv1.OAuthAccessToken, _ *userv1.User) error { + if token.InactivityTimeoutSeconds == 0 { + // We care only if the token was created with a timeout to start with + return nil + } + + td := &tokenData{ + token: token, + seen: a.clock.Now(), + } + if td.timeout().Before(td.seen) { + return errTimedout + } + + if token.ExpiresIn != 0 && token.ExpiresIn <= int64(token.InactivityTimeoutSeconds) { + // skip if the timeout is already larger than expiration deadline + return nil + } + // After a positive timeout check we need to update the timeout and + // schedule an update so that we can either set or update the Timeout + // we do that launching a micro goroutine to avoid blocking + go a.putTokenHandler(td) + + return nil +} + +func (a *TimeoutValidator) putToken(td *tokenData) { + a.tokenChannel <- td +} + +func (a *TimeoutValidator) clientTimeout(name string) time.Duration { + oauthClient, err := a.oauthClients.Get(name) + if err != nil { + klog.V(5).Infof("Failed to fetch OAuthClient %q for timeout value: %v", name, err) + return a.defaultTimeout + } + if oauthClient.AccessTokenInactivityTimeoutSeconds == nil { + return a.defaultTimeout + } + return timeoutAsDuration(*oauthClient.AccessTokenInactivityTimeoutSeconds) +} + +func (a *TimeoutValidator) update(td *tokenData) error { + // Obtain the timeout interval for this client + delta := a.clientTimeout(td.token.ClientName) + // if delta is 0 it means the OAuthClient has been changed to the + // no-timeout value. In this case we set newTimeout also to 0 so + // that the token will no longer timeout once updated. + newTimeout := int32(0) + if delta > 0 { + // InactivityTimeoutSeconds is the number of seconds since creation: + // InactivityTimeoutSeconds = Seen(Time) - CreationTimestamp(Time) + delta(Duration) + newTimeout = int32((td.seen.Sub(td.token.CreationTimestamp.Time) + delta) / time.Second) + } + // We need to get the token again here because it may have changed in the + // DB and we need to verify it is still worth updating + token, err := a.tokens.Get(context.TODO(), td.token.Name, v1.GetOptions{}) + if err != nil { + return err + } + if newTimeout != 0 && token.InactivityTimeoutSeconds >= newTimeout { + // if the token was already updated with a higher or equal timeout we + // do not have anything to do + return nil + } + token.InactivityTimeoutSeconds = newTimeout + _, err = a.tokens.Update(context.TODO(), token, v1.UpdateOptions{}) + return err +} + +func (a *TimeoutValidator) flush(flushHorizon time.Time) { + // flush all tokens that are about to expire before the flushHorizon. + // Typically the flushHorizon is set to a time slightly past the next + // ticker interval, so that not token ends up timing out between flushes + klog.V(5).Infof("Flushing tokens timing out before %s", flushHorizon) + + // grab all tokens that need to be update in this flush interval + // and remove them from the stored data, they either flush now or never + tokenList := a.data.LessThan(flushHorizon.Unix(), true) + + var retryList []*tokenData + flushedTokens := 0 + + for _, item := range tokenList { + td := item.(*tokenData) + err := a.update(td) + // not logging the full errors here as it would leak the token. + switch { + case err == nil: + flushedTokens++ + case apierrors.IsConflict(err) || apierrors.IsServerTimeout(err): + klog.V(5).Infof("Token update deferred for token belonging to %s", + td.token.UserName) + retryList = append(retryList, td) + default: + klog.V(5).Infof("Token timeout for user=%q client=%q scopes=%v was not updated", + td.token.UserName, td.token.ClientName, td.token.Scopes) + } + } + + // we try once more and if it still fails we stop trying here and defer + // to a future regular update if the token is used again + for _, td := range retryList { + err := a.update(td) + if err != nil { + klog.V(5).Infof("Token timeout for user=%q client=%q scopes=%v was not updated", + td.token.UserName, td.token.ClientName, td.token.Scopes) + } else { + flushedTokens++ + } + } + + klog.V(5).Infof("Successfully flushed %d tokens out of %d", + flushedTokens, len(tokenList)) +} + +func (a *TimeoutValidator) nextTick() time.Time { + // Add a small safety Margin so flushes tend to + // overlap a little rather than have gaps + return a.clock.Now().Add(a.tickerInterval + 10*time.Second) +} + +func (a *TimeoutValidator) Run(stopCh <-chan struct{}) { + defer runtime.HandleCrash() + klog.V(5).Infof("Started Token Timeout Flush Handling thread!") + + ticker := a.clock.NewTicker(a.tickerInterval) + // make sure to kill the ticker when we exit + defer ticker.Stop() + + nextTick := a.nextTick() + + for { + select { + case <-stopCh: + // if channel closes terminate + return + + case td := <-a.tokenChannel: + a.data.Insert(td) + // if this token is going to time out before the timer, flush now + tokenTimeout := td.timeout() + if tokenTimeout.Before(nextTick) { + klog.V(5).Infof("Timeout for user=%q client=%q scopes=%v falls before next ticker (%s < %s), forcing flush!", + td.token.UserName, td.token.ClientName, td.token.Scopes, tokenTimeout, nextTick) + a.flushHandler(nextTick) + } + + case <-ticker.C(): + nextTick = a.nextTick() + a.flushHandler(nextTick) + } + } +} diff --git a/openshift-kube-apiserver/authentication/oauth/tokenauthenticator.go b/openshift-kube-apiserver/authentication/oauth/tokenauthenticator.go new file mode 100644 index 0000000000000..7153b79b150b6 --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/tokenauthenticator.go @@ -0,0 +1,84 @@ +package oauth + +import ( + "context" + "errors" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kauthenticator "k8s.io/apiserver/pkg/authentication/authenticator" + kuser "k8s.io/apiserver/pkg/authentication/user" + + authorizationv1 "github.com/openshift/api/authorization/v1" + oauthclient "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1" + userclient "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" +) + +var errLookup = errors.New("token lookup failed") + +type tokenAuthenticator struct { + tokens oauthclient.OAuthAccessTokenInterface + users userclient.UserInterface + groupMapper UserToGroupMapper + validators OAuthTokenValidator + implicitAuds kauthenticator.Audiences +} + +func NewTokenAuthenticator(tokens oauthclient.OAuthAccessTokenInterface, users userclient.UserInterface, groupMapper UserToGroupMapper, implicitAuds kauthenticator.Audiences, validators ...OAuthTokenValidator) kauthenticator.Token { + return &tokenAuthenticator{ + tokens: tokens, + users: users, + groupMapper: groupMapper, + validators: OAuthTokenValidators(validators), + implicitAuds: implicitAuds, + } +} + +func (a *tokenAuthenticator) AuthenticateToken(ctx context.Context, name string) (*kauthenticator.Response, bool, error) { + token, err := a.tokens.Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, false, errLookup // mask the error so we do not leak token data in logs + } + + user, err := a.users.Get(context.TODO(), token.UserName, metav1.GetOptions{}) + if err != nil { + return nil, false, err + } + + if err := a.validators.Validate(token, user); err != nil { + return nil, false, err + } + + groups, err := a.groupMapper.GroupsFor(user.Name) + if err != nil { + return nil, false, err + } + groupNames := make([]string, 0, len(groups)) + for _, group := range groups { + groupNames = append(groupNames, group.Name) + } + + tokenAudiences := a.implicitAuds + requestedAudiences, ok := kauthenticator.AudiencesFrom(ctx) + if !ok { + // default to apiserver audiences + requestedAudiences = a.implicitAuds + } + + auds := kauthenticator.Audiences(tokenAudiences).Intersect(requestedAudiences) + if len(auds) == 0 && len(a.implicitAuds) != 0 { + return nil, false, fmt.Errorf("token audiences %q is invalid for the target audiences %q", tokenAudiences, requestedAudiences) + } + + return &kauthenticator.Response{ + User: &kuser.DefaultInfo{ + Name: user.Name, + UID: string(user.UID), + Groups: groupNames, + Extra: map[string][]string{ + authorizationv1.ScopesKey: token.Scopes, + }, + }, + Audiences: auds, + }, true, nil +} diff --git a/openshift-kube-apiserver/authentication/oauth/tokenauthenticator_test.go b/openshift-kube-apiserver/authentication/oauth/tokenauthenticator_test.go new file mode 100644 index 0000000000000..dae9e7445a150 --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/tokenauthenticator_test.go @@ -0,0 +1,354 @@ +package oauth + +import ( + "context" + "errors" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apiserver/pkg/authentication/authenticator" + clienttesting "k8s.io/client-go/testing" + + oauthv1 "github.com/openshift/api/oauth/v1" + userv1 "github.com/openshift/api/user/v1" + oauthfake "github.com/openshift/client-go/oauth/clientset/versioned/fake" + oauthclient "github.com/openshift/client-go/oauth/clientset/versioned/typed/oauth/v1" + userfake "github.com/openshift/client-go/user/clientset/versioned/fake" +) + +func TestAuthenticateTokenInvalidUID(t *testing.T) { + fakeOAuthClient := oauthfake.NewSimpleClientset( + &oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "token", CreationTimestamp: metav1.Time{Time: time.Now()}}, + ExpiresIn: 600, // 10 minutes + UserName: "foo", + UserUID: string("bar1"), + }, + ) + fakeUserClient := userfake.NewSimpleClientset(&userv1.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar2"}}) + + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, nil, NewUIDValidator()) + + userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") + if found { + t.Error("Found token, but it should be missing!") + } + if err.Error() != "user.UID (bar2) does not match token.userUID (bar1)" { + t.Errorf("Unexpected error: %v", err) + } + if userInfo != nil { + t.Errorf("Unexpected user: %v", userInfo) + } +} + +func TestAuthenticateTokenNotFoundSuppressed(t *testing.T) { + fakeOAuthClient := oauthfake.NewSimpleClientset() + fakeUserClient := userfake.NewSimpleClientset() + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, nil) + + userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") + if found { + t.Error("Found token, but it should be missing!") + } + if err != errLookup { + t.Error("Expected not found error to be suppressed with lookup error") + } + if userInfo != nil { + t.Errorf("Unexpected user: %v", userInfo) + } +} + +func TestAuthenticateTokenOtherGetErrorSuppressed(t *testing.T) { + fakeOAuthClient := oauthfake.NewSimpleClientset() + fakeOAuthClient.PrependReactor("get", "oauthaccesstokens", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("get error") + }) + fakeUserClient := userfake.NewSimpleClientset() + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, nil) + + userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") + if found { + t.Error("Found token, but it should be missing!") + } + if err != errLookup { + t.Error("Expected custom get error to be suppressed with lookup error") + } + if userInfo != nil { + t.Errorf("Unexpected user: %v", userInfo) + } +} + +func TestAuthenticateTokenTimeout(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + + testClock := clock.NewFakeClock(time.Time{}) + + defaultTimeout := int32(30) // 30 seconds + clientTimeout := int32(15) // 15 seconds + minTimeout := int32(10) // 10 seconds -> 10/3 = a tick per 3 seconds + + testClient := oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "testClient"}, + AccessTokenInactivityTimeoutSeconds: &clientTimeout, + } + quickClient := oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "quickClient"}, + AccessTokenInactivityTimeoutSeconds: &minTimeout, + } + slowClient := oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "slowClient"}, + } + testToken := oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "testToken", CreationTimestamp: metav1.Time{Time: testClock.Now()}}, + ClientName: "testClient", + ExpiresIn: 600, // 10 minutes + UserName: "foo", + UserUID: string("bar"), + InactivityTimeoutSeconds: clientTimeout, + } + quickToken := oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "quickToken", CreationTimestamp: metav1.Time{Time: testClock.Now()}}, + ClientName: "quickClient", + ExpiresIn: 600, // 10 minutes + UserName: "foo", + UserUID: string("bar"), + InactivityTimeoutSeconds: minTimeout, + } + slowToken := oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "slowToken", CreationTimestamp: metav1.Time{Time: testClock.Now()}}, + ClientName: "slowClient", + ExpiresIn: 600, // 10 minutes + UserName: "foo", + UserUID: string("bar"), + InactivityTimeoutSeconds: defaultTimeout, + } + emergToken := oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: "emergToken", CreationTimestamp: metav1.Time{Time: testClock.Now()}}, + ClientName: "quickClient", + ExpiresIn: 600, // 10 minutes + UserName: "foo", + UserUID: string("bar"), + InactivityTimeoutSeconds: 5, // super short timeout + } + fakeOAuthClient := oauthfake.NewSimpleClientset(&testToken, &quickToken, &slowToken, &emergToken, &testClient, &quickClient, &slowClient) + fakeUserClient := userfake.NewSimpleClientset(&userv1.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar"}}) + accessTokenGetter := fakeOAuthClient.OauthV1().OAuthAccessTokens() + oauthClients := fakeOAuthClient.OauthV1().OAuthClients() + lister := &fakeOAuthClientLister{ + clients: oauthClients, + } + + timeouts := NewTimeoutValidator(accessTokenGetter, lister, defaultTimeout, minTimeout) + + // inject fake clock, which has some interesting properties + // 1. A sleep will cause at most one ticker event, regardless of how long the sleep was + // 2. The clock will hold one tick event and will drop the next one if something does not consume it first + timeouts.clock = testClock + + // decorate flush + // The fake clock 1. and 2. require that we issue a wait(t, timeoutsSync) after each testClock.Sleep that causes a tick + originalFlush := timeouts.flushHandler + timeoutsSync := make(chan struct{}) + timeouts.flushHandler = func(flushHorizon time.Time) { + originalFlush(flushHorizon) + timeoutsSync <- struct{}{} // signal that flush is complete so we never race against it + } + + // decorate putToken + // We must issue a wait(t, putTokenSync) after each call to checkToken that should be successful + originalPutToken := timeouts.putTokenHandler + putTokenSync := make(chan struct{}) + timeouts.putTokenHandler = func(td *tokenData) { + originalPutToken(td) + putTokenSync <- struct{}{} // signal that putToken is complete so we never race against it + } + + // add some padding to all sleep invocations to make sure we are not failing on any boundary values + buffer := time.Nanosecond + + tokenAuthenticator := NewTokenAuthenticator(accessTokenGetter, fakeUserClient.UserV1().Users(), NoopGroupMapper{}, nil, timeouts) + + go timeouts.Run(stopCh) + + // TIME: 0 seconds have passed here + + // first time should succeed for all + checkToken(t, "testToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + checkToken(t, "quickToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + wait(t, timeoutsSync) // from emergency flush because quickToken has a short enough timeout + + checkToken(t, "slowToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + // this should cause an emergency flush, if not the next auth will fail, + // as the token will be timed out + checkToken(t, "emergToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + wait(t, timeoutsSync) // from emergency flush because emergToken has a super short timeout + + // wait 6 seconds + testClock.Sleep(5*time.Second + buffer) + + // a tick happens every 3 seconds + wait(t, timeoutsSync) + + // TIME: 6th second + + // See if emergency flush happened + checkToken(t, "emergToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + wait(t, timeoutsSync) // from emergency flush because emergToken has a super short timeout + + // wait for timeout (minTimeout + 1 - the previously waited 6 seconds) + testClock.Sleep(time.Duration(minTimeout-5)*time.Second + buffer) + wait(t, timeoutsSync) + + // TIME: 11th second + + // now we change the testClient and see if the testToken will still be + // valid instead of timing out + changeClient, ret := oauthClients.Get(context.TODO(), "testClient", metav1.GetOptions{}) + if ret != nil { + t.Error("Failed to get testClient") + } else { + longTimeout := int32(20) + changeClient.AccessTokenInactivityTimeoutSeconds = &longTimeout + _, ret = oauthClients.Update(context.TODO(), changeClient, metav1.UpdateOptions{}) + if ret != nil { + t.Error("Failed to update testClient") + } + } + + // this should fail, thus no call to wait(t, putTokenSync) + checkToken(t, "quickToken", tokenAuthenticator, accessTokenGetter, testClock, false) + + // while this should get updated + checkToken(t, "testToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + wait(t, timeoutsSync) + + // wait for timeout + testClock.Sleep(time.Duration(clientTimeout+1)*time.Second + buffer) + + // 16 seconds equals 5 more flushes, but the fake clock will only tick once during this time + wait(t, timeoutsSync) + + // TIME: 27th second + + // this should get updated + checkToken(t, "slowToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + wait(t, timeoutsSync) + + // while this should not fail + checkToken(t, "testToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + wait(t, timeoutsSync) + // and should be updated to last at least till the 31st second + token, err := accessTokenGetter.Get(context.TODO(), "testToken", metav1.GetOptions{}) + if err != nil { + t.Error("Failed to get testToken") + } else { + if token.InactivityTimeoutSeconds < 31 { + t.Errorf("Expected timeout in more than 31 seconds, found: %d", token.InactivityTimeoutSeconds) + } + } + + //now change testClient again, so that tokens do not expire anymore + changeclient, ret := oauthClients.Get(context.TODO(), "testClient", metav1.GetOptions{}) + if ret != nil { + t.Error("Failed to get testClient") + } else { + changeclient.AccessTokenInactivityTimeoutSeconds = new(int32) + _, ret = oauthClients.Update(context.TODO(), changeclient, metav1.UpdateOptions{}) + if ret != nil { + t.Error("Failed to update testClient") + } + } + + // and wait until test token should time out, and has been flushed for sure + testClock.Sleep(time.Duration(minTimeout)*time.Second + buffer) + wait(t, timeoutsSync) + + // while this should not fail + checkToken(t, "testToken", tokenAuthenticator, accessTokenGetter, testClock, true) + wait(t, putTokenSync) + + wait(t, timeoutsSync) + + // and should be updated to have a ZERO timeout + token, err = accessTokenGetter.Get(context.TODO(), "testToken", metav1.GetOptions{}) + if err != nil { + t.Error("Failed to get testToken") + } else { + if token.InactivityTimeoutSeconds != 0 { + t.Errorf("Expected timeout of 0 seconds, found: %d", token.InactivityTimeoutSeconds) + } + } +} + +type fakeOAuthClientLister struct { + clients oauthclient.OAuthClientInterface +} + +func (f fakeOAuthClientLister) Get(name string) (*oauthv1.OAuthClient, error) { + return f.clients.Get(context.TODO(), name, metav1.GetOptions{}) +} + +func (f fakeOAuthClientLister) List(selector labels.Selector) ([]*oauthv1.OAuthClient, error) { + panic("not used") +} + +func checkToken(t *testing.T, name string, authf authenticator.Token, tokens oauthclient.OAuthAccessTokenInterface, current clock.Clock, present bool) { + t.Helper() + userInfo, found, err := authf.AuthenticateToken(context.TODO(), name) + if present { + if !found { + t.Errorf("Did not find token %s!", name) + } + if err != nil { + t.Errorf("Unexpected error checking for token %s: %v", name, err) + } + if userInfo == nil { + t.Errorf("Did not get a user for token %s!", name) + } + } else { + if found { + token, tokenErr := tokens.Get(context.TODO(), name, metav1.GetOptions{}) + if tokenErr != nil { + t.Fatal(tokenErr) + } + t.Errorf("Found token (created=%s, timeout=%di, now=%s), but it should be gone!", + token.CreationTimestamp, token.InactivityTimeoutSeconds, current.Now()) + } + if err != errTimedout { + t.Errorf("Unexpected error checking absence of token %s: %v", name, err) + } + if userInfo != nil { + t.Errorf("Unexpected user checking absence of token %s: %v", name, userInfo) + } + } +} + +func wait(t *testing.T, c chan struct{}) { + t.Helper() + select { + case <-c: + case <-time.After(30 * time.Second): + t.Fatal("failed to see channel event") + } +} diff --git a/openshift-kube-apiserver/authentication/oauth/uidvalidator.go b/openshift-kube-apiserver/authentication/oauth/uidvalidator.go new file mode 100644 index 0000000000000..a6029e5d51030 --- /dev/null +++ b/openshift-kube-apiserver/authentication/oauth/uidvalidator.go @@ -0,0 +1,21 @@ +package oauth + +import ( + "fmt" + + oauthv1 "github.com/openshift/api/oauth/v1" + userv1 "github.com/openshift/api/user/v1" +) + +const errInvalidUIDStr = "user.UID (%s) does not match token.userUID (%s)" + +func NewUIDValidator() OAuthTokenValidator { + return OAuthTokenValidatorFunc( + func(token *oauthv1.OAuthAccessToken, user *userv1.User) error { + if string(user.UID) != token.UserUID { + return fmt.Errorf(errInvalidUIDStr, user.UID, token.UserUID) + } + return nil + }, + ) +} diff --git a/openshift-kube-apiserver/authorization/browsersafe/authorizer.go b/openshift-kube-apiserver/authorization/browsersafe/authorizer.go new file mode 100644 index 0000000000000..2b39b309f69b8 --- /dev/null +++ b/openshift-kube-apiserver/authorization/browsersafe/authorizer.go @@ -0,0 +1,107 @@ +package browsersafe + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +const ( + proxyAction = "proxy" + unsafeProxy = "unsafeproxy" +) + +type browserSafeAuthorizer struct { + delegate authorizer.Authorizer + + // list of groups, any of which indicate the request is authenticated + authenticatedGroups sets.String +} + +func NewBrowserSafeAuthorizer(delegate authorizer.Authorizer, authenticatedGroups ...string) authorizer.Authorizer { + return &browserSafeAuthorizer{ + delegate: delegate, + authenticatedGroups: sets.NewString(authenticatedGroups...), + } +} + +func (a *browserSafeAuthorizer) Authorize(ctx context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + attrs := a.getBrowserSafeAttributes(attributes) + decision, reason, err := a.delegate.Authorize(ctx, attrs) + safeAttributes, changed := attrs.(*browserSafeAttributes) + + // check if the request was not allowed and we changed the attributes + if decision == authorizer.DecisionAllow || !changed { + return decision, reason, err + } + + // if so, use this information to update the reason + return decision, safeAttributes.reason(reason), err +} + +func (a *browserSafeAuthorizer) getBrowserSafeAttributes(attributes authorizer.Attributes) authorizer.Attributes { + if !attributes.IsResourceRequest() { + return attributes + } + + isProxyVerb := attributes.GetVerb() == proxyAction + isProxySubresource := attributes.GetSubresource() == proxyAction + + if !isProxyVerb && !isProxySubresource { + // Requests to non-proxy resources don't expose HTML or HTTP-handling user content to browsers + return attributes + } + + if user := attributes.GetUser(); user != nil { + if a.authenticatedGroups.HasAny(user.GetGroups()...) { + // An authenticated request indicates this isn't a browser page load. + // Browsers cannot make direct authenticated requests. + // This depends on the API not enabling basic or cookie-based auth. + return attributes + } + } + + return &browserSafeAttributes{ + Attributes: attributes, + isProxyVerb: isProxyVerb, + isProxySubresource: isProxySubresource, + } +} + +type browserSafeAttributes struct { + authorizer.Attributes + + isProxyVerb, isProxySubresource bool +} + +func (b *browserSafeAttributes) GetVerb() string { + if b.isProxyVerb { + return unsafeProxy + } + return b.Attributes.GetVerb() +} + +func (b *browserSafeAttributes) GetSubresource() string { + if b.isProxySubresource { + return unsafeProxy + } + return b.Attributes.GetSubresource() +} + +func (b *browserSafeAttributes) reason(reason string) string { + if b.isProxyVerb { + if len(reason) != 0 { + reason += ", " + } + reason += fmt.Sprintf("%s verb changed to %s", proxyAction, unsafeProxy) + } + if b.isProxySubresource { + if len(reason) != 0 { + reason += ", " + } + reason += fmt.Sprintf("%s subresource changed to %s", proxyAction, unsafeProxy) + } + return reason +} diff --git a/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go b/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go new file mode 100644 index 0000000000000..1d14a86daddeb --- /dev/null +++ b/openshift-kube-apiserver/authorization/browsersafe/authorizer_test.go @@ -0,0 +1,80 @@ +package browsersafe + +import ( + "context" + "testing" + + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +func TestBrowserSafeAuthorizer(t *testing.T) { + for name, tc := range map[string]struct { + attributes authorizer.Attributes + + expectedVerb string + expectedSubresource string + expectedReason string + }{ + "non-resource": { + attributes: authorizer.AttributesRecord{ResourceRequest: false, Verb: "GET"}, + expectedVerb: "GET", + }, + + "non-proxy": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "logs"}, + expectedVerb: "get", + expectedSubresource: "logs", + }, + + "unsafe proxy subresource": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "proxy"}, + expectedVerb: "get", + expectedSubresource: "unsafeproxy", + expectedReason: "proxy subresource changed to unsafeproxy", + }, + "unsafe proxy verb": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "proxy", Resource: "nodes"}, + expectedVerb: "unsafeproxy", + expectedReason: "proxy verb changed to unsafeproxy", + }, + "unsafe proxy verb anonymous": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "proxy", Resource: "nodes", + User: &user.DefaultInfo{Name: "system:anonymous", Groups: []string{"system:unauthenticated"}}}, + expectedVerb: "unsafeproxy", + expectedReason: "proxy verb changed to unsafeproxy", + }, + + "proxy subresource authenticated": { + attributes: authorizer.AttributesRecord{ResourceRequest: true, Verb: "get", Resource: "pods", Subresource: "proxy", + User: &user.DefaultInfo{Name: "bob", Groups: []string{"system:authenticated"}}}, + expectedVerb: "get", + expectedSubresource: "proxy", + }, + } { + delegateAuthorizer := &recordingAuthorizer{} + safeAuthorizer := NewBrowserSafeAuthorizer(delegateAuthorizer, "system:authenticated") + + authorized, reason, err := safeAuthorizer.Authorize(context.TODO(), tc.attributes) + if authorized == authorizer.DecisionAllow || reason != tc.expectedReason || err != nil { + t.Errorf("%s: unexpected output: %v %s %v", name, authorized, reason, err) + continue + } + + if delegateAuthorizer.attributes.GetVerb() != tc.expectedVerb { + t.Errorf("%s: expected verb %s, got %s", name, tc.expectedVerb, delegateAuthorizer.attributes.GetVerb()) + } + if delegateAuthorizer.attributes.GetSubresource() != tc.expectedSubresource { + t.Errorf("%s: expected verb %s, got %s", name, tc.expectedSubresource, delegateAuthorizer.attributes.GetSubresource()) + } + } +} + +type recordingAuthorizer struct { + attributes authorizer.Attributes +} + +func (t *recordingAuthorizer) Authorize(_ context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { + t.attributes = a + return authorizer.DecisionNoOpinion, "", nil +} diff --git a/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go new file mode 100644 index 0000000000000..989f70609528d --- /dev/null +++ b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer.go @@ -0,0 +1,49 @@ +package scopeauthorizer + +import ( + "context" + "fmt" + + "k8s.io/apiserver/pkg/authorization/authorizer" + rbaclisters "k8s.io/client-go/listers/rbac/v1" + authorizerrbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "github.com/openshift/apiserver-library-go/pkg/authorization/scope" +) + +type scopeAuthorizer struct { + clusterRoleGetter rbaclisters.ClusterRoleLister +} + +func NewAuthorizer(clusterRoleGetter rbaclisters.ClusterRoleLister) authorizer.Authorizer { + return &scopeAuthorizer{clusterRoleGetter: clusterRoleGetter} +} + +func (a *scopeAuthorizer) Authorize(ctx context.Context, attributes authorizer.Attributes) (authorizer.Decision, string, error) { + user := attributes.GetUser() + if user == nil { + return authorizer.DecisionNoOpinion, "", fmt.Errorf("user missing from context") + } + + scopes := user.GetExtra()[authorizationv1.ScopesKey] + if len(scopes) == 0 { + return authorizer.DecisionNoOpinion, "", nil + } + + nonFatalErrors := "" + + // scopeResolutionErrors aren't fatal. If any of the scopes we find allow this, then the overall scope limits allow it + rules, err := scope.ScopesToRules(scopes, attributes.GetNamespace(), a.clusterRoleGetter) + if err != nil { + nonFatalErrors = fmt.Sprintf(", additionally the following non-fatal errors were reported: %v", err) + } + + // check rules against attributes + if authorizerrbac.RulesAllow(attributes, rules...) { + return authorizer.DecisionNoOpinion, "", nil + } + + // the scope prevent this. We need to authoritatively deny + return authorizer.DecisionDeny, fmt.Sprintf("scopes %v prevent this action%s", scopes, nonFatalErrors), nil +} diff --git a/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go new file mode 100644 index 0000000000000..9b73e6c2e23ac --- /dev/null +++ b/openshift-kube-apiserver/authorization/scopeauthorizer/authorizer_test.go @@ -0,0 +1,150 @@ +package scopeauthorizer + +import ( + "context" + "strings" + "testing" + + "k8s.io/apiserver/pkg/authentication/user" + kauthorizer "k8s.io/apiserver/pkg/authorization/authorizer" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +func TestAuthorize(t *testing.T) { + testCases := []struct { + name string + attributes kauthorizer.AttributesRecord + expectedAllowed kauthorizer.Decision + expectedErr string + expectedMsg string + }{ + { + name: "no user", + attributes: kauthorizer.AttributesRecord{ + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + expectedErr: `user missing from context`, + }, + { + name: "no extra", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "empty extra", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "empty scopes", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "bad scope", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"does-not-exist"}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [does-not-exist] prevent this action, additionally the following non-fatal errors were reported: no scope evaluator found for "does-not-exist"`, + }, + { + name: "bad scope 2", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"role:dne"}}}, + ResourceRequest: true, + Namespace: "ns", + }, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [role:dne] prevent this action, additionally the following non-fatal errors were reported: bad format for scope role:dne`, + }, + { + name: "scope doesn't cover", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "get", Resource: "users", Name: "harold"}, + expectedAllowed: kauthorizer.DecisionDeny, + expectedMsg: `scopes [user:info] prevent this action`, + }, + { + name: "scope covers", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "get", Resource: "users", Name: "~"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "scope covers for discovery", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:info"}}}, + ResourceRequest: false, + Namespace: "ns", + Verb: "get", Path: "/api"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "user:full covers any resource", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:full"}}}, + ResourceRequest: true, + Namespace: "ns", + Verb: "update", Resource: "users", Name: "harold"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + { + name: "user:full covers any non-resource", + attributes: kauthorizer.AttributesRecord{ + User: &user.DefaultInfo{Extra: map[string][]string{authorizationv1.ScopesKey: {"user:full"}}}, + ResourceRequest: false, + Namespace: "ns", + Verb: "post", Path: "/foo/bar/baz"}, + expectedAllowed: kauthorizer.DecisionNoOpinion, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + authorizer := NewAuthorizer(nil) + + actualAllowed, actualMsg, actualErr := authorizer.Authorize(context.TODO(), tc.attributes) + switch { + case len(tc.expectedErr) == 0 && actualErr == nil: + case len(tc.expectedErr) == 0 && actualErr != nil: + t.Errorf("%s: unexpected error: %v", tc.name, actualErr) + case len(tc.expectedErr) != 0 && actualErr == nil: + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErr) + case len(tc.expectedErr) != 0 && actualErr != nil: + if !strings.Contains(actualErr.Error(), tc.expectedErr) { + t.Errorf("expected %v, got %v", tc.expectedErr, actualErr) + } + } + if tc.expectedMsg != actualMsg { + t.Errorf("expected %v, got %v", tc.expectedMsg, actualMsg) + } + if tc.expectedAllowed != actualAllowed { + t.Errorf("expected %v, got %v", tc.expectedAllowed, actualAllowed) + } + }) + } +} diff --git a/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go b/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go new file mode 100644 index 0000000000000..7e48ecea2ec9a --- /dev/null +++ b/openshift-kube-apiserver/configdefault/kubecontrolplane_default.go @@ -0,0 +1,115 @@ +package configdefault + +import ( + "io/ioutil" + "os" + "path/filepath" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + "github.com/openshift/library-go/pkg/config/configdefaults" + "k8s.io/klog/v2" +) + +// ResolveDirectoriesForSATokenVerification takes our config (which allows directories) and navigates one level of +// those directories for files. This makes it easy to build a single configmap that contains lots of aggregated files. +// if we fail to open the file for inspection, the resolving code in kube-apiserver may have drifted from us +// we include the raw file and let the kube-apiserver succeed or fail. +func ResolveDirectoriesForSATokenVerification(config *kubecontrolplanev1.KubeAPIServerConfig) { + // kube doesn't honor directories, but we want to allow them in our sa token validators + resolvedSATokenValidationCerts := []string{} + for _, filename := range config.ServiceAccountPublicKeyFiles { + file, err := os.Open(filename) + if err != nil { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + klog.Warningf(err.Error()) + continue + } + fileInfo, err := file.Stat() + if err != nil { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + klog.Warningf(err.Error()) + continue + } + if !fileInfo.IsDir() { + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filename) + continue + } + + contents, err := ioutil.ReadDir(filename) + switch { + case os.IsNotExist(err) || os.IsPermission(err): + klog.Warningf(err.Error()) + case err != nil: + panic(err) // some weird, unexpected error + default: + for _, content := range contents { + if !content.Mode().IsRegular() { + continue + } + resolvedSATokenValidationCerts = append(resolvedSATokenValidationCerts, filepath.Join(filename, content.Name())) + } + } + } + + config.ServiceAccountPublicKeyFiles = resolvedSATokenValidationCerts +} + +func SetRecommendedKubeAPIServerConfigDefaults(config *kubecontrolplanev1.KubeAPIServerConfig) { + configdefaults.DefaultString(&config.GenericAPIServerConfig.StorageConfig.StoragePrefix, "kubernetes.io") + configdefaults.DefaultString(&config.GenericAPIServerConfig.ServingInfo.BindAddress, "0.0.0.0:6443") + + configdefaults.SetRecommendedGenericAPIServerConfigDefaults(&config.GenericAPIServerConfig) + SetRecommendedMasterAuthConfigDefaults(&config.AuthConfig) + SetRecommendedAggregatorConfigDefaults(&config.AggregatorConfig) + SetRecommendedKubeletConnectionInfoDefaults(&config.KubeletClientInfo) + + configdefaults.DefaultString(&config.ServicesSubnet, "10.0.0.0/24") + configdefaults.DefaultString(&config.ServicesNodePortRange, "30000-32767") + + if len(config.ServiceAccountPublicKeyFiles) == 0 { + config.ServiceAccountPublicKeyFiles = append([]string{}, "/etc/kubernetes/static-pod-resources/configmaps/sa-token-signing-certs") + } + + // after the aggregator defaults are set, we can default the auth config values + // TODO this indicates that we're set two different things to the same value + if config.AuthConfig.RequestHeader == nil { + config.AuthConfig.RequestHeader = &kubecontrolplanev1.RequestHeaderAuthenticationOptions{} + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.ClientCommonNames, []string{"system:openshift-aggregator"}) + configdefaults.DefaultString(&config.AuthConfig.RequestHeader.ClientCA, "/var/run/configmaps/aggregator-client-ca/ca-bundle.crt") + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.UsernameHeaders, []string{"X-Remote-User"}) + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.GroupHeaders, []string{"X-Remote-Group"}) + configdefaults.DefaultStringSlice(&config.AuthConfig.RequestHeader.ExtraHeaderPrefixes, []string{"X-Remote-Extra-"}) + } + + // Set defaults Cache TTLs for external Webhook Token Reviewers + for i := range config.AuthConfig.WebhookTokenAuthenticators { + if len(config.AuthConfig.WebhookTokenAuthenticators[i].CacheTTL) == 0 { + config.AuthConfig.WebhookTokenAuthenticators[i].CacheTTL = "2m" + } + } + + if config.OAuthConfig != nil { + for i := range config.OAuthConfig.IdentityProviders { + // By default, only let one identity provider authenticate a particular user + // If multiple identity providers collide, the second one in will fail to auth + // The admin can set this to "add" if they want to allow new identities to join existing users + configdefaults.DefaultString(&config.OAuthConfig.IdentityProviders[i].MappingMethod, "claim") + } + } +} + +func SetRecommendedMasterAuthConfigDefaults(config *kubecontrolplanev1.MasterAuthConfig) { +} + +func SetRecommendedAggregatorConfigDefaults(config *kubecontrolplanev1.AggregatorConfig) { + configdefaults.DefaultString(&config.ProxyClientInfo.KeyFile, "/var/run/secrets/aggregator-client/tls.key") + configdefaults.DefaultString(&config.ProxyClientInfo.CertFile, "/var/run/secrets/aggregator-client/tls.crt") +} + +func SetRecommendedKubeletConnectionInfoDefaults(config *kubecontrolplanev1.KubeletConnectionInfo) { + if config.Port == 0 { + config.Port = 10250 + } + configdefaults.DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/kubelet-client/tls.key") + configdefaults.DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/kubelet-client/tls.crt") +} diff --git a/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go b/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go new file mode 100644 index 0000000000000..449952e5650d1 --- /dev/null +++ b/openshift-kube-apiserver/configdefault/kubecontrolplane_refs.go @@ -0,0 +1,122 @@ +package configdefault + +import ( + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + osinv1 "github.com/openshift/api/osin/v1" + "github.com/openshift/library-go/pkg/config/helpers" +) + +func GetKubeAPIServerConfigFileReferences(config *kubecontrolplanev1.KubeAPIServerConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + + refs = append(refs, helpers.GetGenericAPIServerConfigFileReferences(&config.GenericAPIServerConfig)...) + refs = append(refs, GetKubeletConnectionInfoFileReferences(&config.KubeletClientInfo)...) + + if config.OAuthConfig != nil { + refs = append(refs, GetOAuthConfigFileReferences(config.OAuthConfig)...) + } + + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.CertFile) + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.KeyFile) + + if config.AuthConfig.RequestHeader != nil { + refs = append(refs, &config.AuthConfig.RequestHeader.ClientCA) + } + for k := range config.AuthConfig.WebhookTokenAuthenticators { + refs = append(refs, &config.AuthConfig.WebhookTokenAuthenticators[k].ConfigFile) + } + if len(config.AuthConfig.OAuthMetadataFile) > 0 { + refs = append(refs, &config.AuthConfig.OAuthMetadataFile) + } + + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.CertFile) + refs = append(refs, &config.AggregatorConfig.ProxyClientInfo.KeyFile) + + for i := range config.ServiceAccountPublicKeyFiles { + refs = append(refs, &config.ServiceAccountPublicKeyFiles[i]) + } + + return refs +} + +func GetKubeletConnectionInfoFileReferences(config *kubecontrolplanev1.KubeletConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, helpers.GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetOAuthConfigFileReferences(config *osinv1.OAuthConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + + if config.MasterCA != nil { + refs = append(refs, config.MasterCA) + } + + refs = append(refs, GetSessionConfigFileReferences(config.SessionConfig)...) + for _, identityProvider := range config.IdentityProviders { + switch provider := identityProvider.Provider.Object.(type) { + case (*osinv1.RequestHeaderIdentityProvider): + refs = append(refs, &provider.ClientCA) + + case (*osinv1.HTPasswdPasswordIdentityProvider): + refs = append(refs, &provider.File) + + case (*osinv1.LDAPPasswordIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.BindPassword)...) + + case (*osinv1.BasicAuthPasswordIdentityProvider): + refs = append(refs, helpers.GetRemoteConnectionInfoFileReferences(&provider.RemoteConnectionInfo)...) + + case (*osinv1.KeystonePasswordIdentityProvider): + refs = append(refs, helpers.GetRemoteConnectionInfoFileReferences(&provider.RemoteConnectionInfo)...) + + case (*osinv1.GitLabIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.OpenIDIdentityProvider): + refs = append(refs, &provider.CA) + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.GoogleIdentityProvider): + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + + case (*osinv1.GitHubIdentityProvider): + refs = append(refs, helpers.GetStringSourceFileReferences(&provider.ClientSecret)...) + refs = append(refs, &provider.CA) + + } + } + + if config.Templates != nil { + refs = append(refs, &config.Templates.Login) + refs = append(refs, &config.Templates.ProviderSelection) + refs = append(refs, &config.Templates.Error) + } + + return refs +} + +func GetSessionConfigFileReferences(config *osinv1.SessionConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.SessionSecretsFile) + return refs +} diff --git a/openshift-kube-apiserver/enablement/enablement.go b/openshift-kube-apiserver/enablement/enablement.go new file mode 100644 index 0000000000000..d955f66825181 --- /dev/null +++ b/openshift-kube-apiserver/enablement/enablement.go @@ -0,0 +1,71 @@ +package enablement + +import ( + "fmt" + "runtime/debug" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/rest" +) + +func ForceOpenShift(newOpenshiftConfig *kubecontrolplanev1.KubeAPIServerConfig) { + isOpenShift = true + openshiftConfig = newOpenshiftConfig +} + +func SetLoopbackClientConfig(kubeClientConfig *rest.Config) { + loopbackClientConfig = rest.CopyConfig(kubeClientConfig) +} + +var ( + isOpenShift = false + openshiftConfig *kubecontrolplanev1.KubeAPIServerConfig + postStartHooks = map[string]PostStartHookConfigEntry{} + appendPostStartHooksCalled = false + loopbackClientConfig *rest.Config +) + +type PostStartHookConfigEntry struct { + Hook genericapiserver.PostStartHookFunc + // originatingStack holds the stack that registered postStartHooks. This allows us to show a more helpful message + // for duplicate registration. + OriginatingStack string +} + +func IsOpenShift() bool { + return isOpenShift +} + +func OpenshiftConfig() *kubecontrolplanev1.KubeAPIServerConfig { + return openshiftConfig +} + +func LoopbackClientConfig() *rest.Config { + return loopbackClientConfig +} + +func AddPostStartHookOrDie(name string, hook genericapiserver.PostStartHookFunc) { + if appendPostStartHooksCalled { + panic(fmt.Errorf("already appended post start hooks")) + } + if len(name) == 0 { + panic(fmt.Errorf("missing name")) + } + if hook == nil { + panic(fmt.Errorf("hook func may not be nil: %q", name)) + } + + if postStartHook, exists := postStartHooks[name]; exists { + // this is programmer error, but it can be hard to debug + panic(fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.OriginatingStack)) + } + postStartHooks[name] = PostStartHookConfigEntry{Hook: hook, OriginatingStack: string(debug.Stack())} +} + +func AppendPostStartHooksOrDie(config *genericapiserver.Config) { + appendPostStartHooksCalled = true + for name, curr := range postStartHooks { + config.AddPostStartHookOrDie(name, curr.Hook) + } +} diff --git a/openshift-kube-apiserver/enablement/intialization.go b/openshift-kube-apiserver/enablement/intialization.go new file mode 100644 index 0000000000000..37da08f69ed09 --- /dev/null +++ b/openshift-kube-apiserver/enablement/intialization.go @@ -0,0 +1,81 @@ +package enablement + +import ( + "io/ioutil" + "path" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/clientcmd/api" + aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" + "k8s.io/kubernetes/openshift-kube-apiserver/configdefault" + "k8s.io/kubernetes/pkg/capabilities" + kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" + + configv1 "github.com/openshift/api/config/v1" + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + osinv1 "github.com/openshift/api/osin/v1" + "github.com/openshift/library-go/pkg/config/helpers" +) + +func GetOpenshiftConfig(openshiftConfigFile string) (*kubecontrolplanev1.KubeAPIServerConfig, error) { + // try to decode into our new types first. right now there is no validation, no file path resolution. this unsticks the operator to start. + // TODO add those things + configContent, err := ioutil.ReadFile(openshiftConfigFile) + if err != nil { + return nil, err + } + scheme := runtime.NewScheme() + utilruntime.Must(kubecontrolplanev1.Install(scheme)) + codecs := serializer.NewCodecFactory(scheme) + obj, err := runtime.Decode(codecs.UniversalDecoder(kubecontrolplanev1.GroupVersion, configv1.GroupVersion, osinv1.GroupVersion), configContent) + if err != nil { + + return nil, err + } + + // Resolve relative to CWD + absoluteConfigFile, err := api.MakeAbs(openshiftConfigFile, "") + if err != nil { + return nil, err + } + configFileLocation := path.Dir(absoluteConfigFile) + + config := obj.(*kubecontrolplanev1.KubeAPIServerConfig) + if err := helpers.ResolvePaths(configdefault.GetKubeAPIServerConfigFileReferences(config), configFileLocation); err != nil { + return nil, err + } + configdefault.SetRecommendedKubeAPIServerConfigDefaults(config) + configdefault.ResolveDirectoriesForSATokenVerification(config) + + return config, nil +} + +func ForceGlobalInitializationForOpenShift() { + // This allows to move crqs, sccs, and rbrs to CRD + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/quota.openshift.io/v1/clusterresourcequotas") + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/security.openshift.io/v1/securitycontextconstraints") + aggregatorapiserver.AddAlwaysLocalDelegateForPrefix("/apis/authorization.openshift.io/v1/rolebindingrestrictions") + aggregatorapiserver.AddAlwaysLocalDelegateGroupResource(schema.GroupResource{Group: "authorization.openshift.io", Resource: "rolebindingrestrictions"}) + + // This allows the CRD registration to avoid fighting with the APIService from the operator + aggregatorapiserver.AddOverlappingGroupVersion(schema.GroupVersion{Group: "authorization.openshift.io", Version: "v1"}) + + // Allow privileged containers + capabilities.Initialize(capabilities.Capabilities{ + AllowPrivileged: true, + PrivilegedSources: capabilities.PrivilegedSources{ + HostNetworkSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + HostPIDSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + HostIPCSources: []string{kubelettypes.ApiserverSource, kubelettypes.FileSource}, + }, + }) + + // add permissions we require on our kube-apiserver + // TODO, we should scrub these out + bootstrappolicy.ClusterRoles = bootstrappolicy.OpenshiftClusterRoles + bootstrappolicy.ClusterRoleBindings = bootstrappolicy.OpenshiftClusterRoleBindings +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/flags.go b/openshift-kube-apiserver/openshiftkubeapiserver/flags.go new file mode 100644 index 0000000000000..b3faf6bd5452d --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/flags.go @@ -0,0 +1,222 @@ +package openshiftkubeapiserver + +import ( + "fmt" + "io/ioutil" + "net" + "strings" + + configv1 "github.com/openshift/api/config/v1" + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + "github.com/openshift/apiserver-library-go/pkg/configflags" + "github.com/openshift/library-go/pkg/config/helpers" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + apiserverv1alpha1 "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" +) + +func ConfigToFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) ([]string, error) { + args := unmaskArgs(kubeAPIServerConfig.APIServerArguments) + + host, portString, err := net.SplitHostPort(kubeAPIServerConfig.ServingInfo.BindAddress) + if err != nil { + return nil, err + } + + // TODO this list (and the content below) will be used to drive a config struct and a reflective test matching config to flags + // these flags are overridden by a patch + // admission-control + // authentication-token-webhook-cache-ttl + // authentication-token-webhook-config-file + // authorization-mode + // authorization-policy-file + // authorization-webhook-cache-authorized-ttl + // authorization-webhook-cache-unauthorized-ttl + // authorization-webhook-config-file + // basic-auth-file + // enable-aggregator-routing + // enable-bootstrap-token-auth + // oidc-client-id + // oidc-groups-claim + // oidc-groups-prefix + // oidc-issuer-url + // oidc-required-claim + // oidc-signing-algs + // oidc-username-claim + // oidc-username-prefix + // token-auth-file + + // alsologtostderr - don't know whether to change it + // apiserver-count - ignored, hopefully we don't have to fix via patch + // cert-dir - ignored because we set certs + + // these flags were never supported via config + // cloud-config + // cloud-provider + // cloud-provider-gce-lb-src-cidrs + // contention-profiling + // default-not-ready-toleration-seconds + // default-unreachable-toleration-seconds + // default-watch-cache-size + // delete-collection-workers + // deserialization-cache-size + // enable-garbage-collector + // etcd-compaction-interval + // etcd-count-metric-poll-period + // etcd-servers-overrides + // experimental-encryption-provider-config + // feature-gates + // http2-max-streams-per-connection + // insecure-bind-address + // kubelet-timeout + // log-backtrace-at + // log-dir + // log-flush-frequency + // logtostderr + // master-service-namespace + // max-connection-bytes-per-sec + // profiling + // request-timeout + // runtime-config + // service-account-api-audiences + // service-account-issuer + // service-account-key-file + // service-account-max-token-expiration + // stderrthreshold + // storage-versions + // target-ram-mb + // v + // version + // vmodule + // watch-cache + // watch-cache-sizes + + // TODO, we need to set these in order to enable the right admission plugins in each of the servers + // TODO this is needed for a viable cluster up + admissionFlags, err := admissionFlags(kubeAPIServerConfig.AdmissionConfig) + if err != nil { + return nil, err + } + for flag, value := range admissionFlags { + configflags.SetIfUnset(args, flag, value...) + } + configflags.SetIfUnset(args, "allow-privileged", "true") + configflags.SetIfUnset(args, "anonymous-auth", "true") + configflags.SetIfUnset(args, "authorization-mode", "RBAC", "Node") // overridden later, but this runs the poststarthook for bootstrapping RBAC + for flag, value := range configflags.AuditFlags(&kubeAPIServerConfig.AuditConfig, configflags.ArgsWithPrefix(args, "audit-")) { + configflags.SetIfUnset(args, flag, value...) + } + configflags.SetIfUnset(args, "bind-address", host) + configflags.SetIfUnset(args, "client-ca-file", kubeAPIServerConfig.ServingInfo.ClientCA) + configflags.SetIfUnset(args, "cors-allowed-origins", kubeAPIServerConfig.CORSAllowedOrigins...) + configflags.SetIfUnset(args, "enable-logs-handler", "false") + configflags.SetIfUnset(args, "enable-swagger-ui", "true") + configflags.SetIfUnset(args, "endpoint-reconciler-type", "lease") + configflags.SetIfUnset(args, "etcd-cafile", kubeAPIServerConfig.StorageConfig.CA) + configflags.SetIfUnset(args, "etcd-certfile", kubeAPIServerConfig.StorageConfig.CertFile) + configflags.SetIfUnset(args, "etcd-keyfile", kubeAPIServerConfig.StorageConfig.KeyFile) + configflags.SetIfUnset(args, "etcd-prefix", kubeAPIServerConfig.StorageConfig.StoragePrefix) + configflags.SetIfUnset(args, "etcd-servers", kubeAPIServerConfig.StorageConfig.URLs...) + configflags.SetIfUnset(args, "event-ttl", "3h") // set a TTL long enough to last for our CI tests so we see the first set of events. + configflags.SetIfUnset(args, "insecure-port", "0") + configflags.SetIfUnset(args, "kubelet-certificate-authority", kubeAPIServerConfig.KubeletClientInfo.CA) + configflags.SetIfUnset(args, "kubelet-client-certificate", kubeAPIServerConfig.KubeletClientInfo.CertFile) + configflags.SetIfUnset(args, "kubelet-client-key", kubeAPIServerConfig.KubeletClientInfo.KeyFile) + configflags.SetIfUnset(args, "kubelet-https", "true") + configflags.SetIfUnset(args, "kubelet-preferred-address-types", "Hostname", "InternalIP", "ExternalIP") + configflags.SetIfUnset(args, "kubelet-read-only-port", "0") + configflags.SetIfUnset(args, "kubernetes-service-node-port", "0") + configflags.SetIfUnset(args, "max-mutating-requests-inflight", fmt.Sprintf("%d", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight/2)) + configflags.SetIfUnset(args, "max-requests-inflight", fmt.Sprintf("%d", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight)) + configflags.SetIfUnset(args, "min-request-timeout", fmt.Sprintf("%d", kubeAPIServerConfig.ServingInfo.RequestTimeoutSeconds)) + configflags.SetIfUnset(args, "proxy-client-cert-file", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.CertFile) + configflags.SetIfUnset(args, "proxy-client-key-file", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.KeyFile) + configflags.SetIfUnset(args, "requestheader-allowed-names", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCommonNames...) + configflags.SetIfUnset(args, "requestheader-client-ca-file", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCA) + configflags.SetIfUnset(args, "requestheader-extra-headers-prefix", kubeAPIServerConfig.AuthConfig.RequestHeader.ExtraHeaderPrefixes...) + configflags.SetIfUnset(args, "requestheader-group-headers", kubeAPIServerConfig.AuthConfig.RequestHeader.GroupHeaders...) + configflags.SetIfUnset(args, "requestheader-username-headers", kubeAPIServerConfig.AuthConfig.RequestHeader.UsernameHeaders...) + configflags.SetIfUnset(args, "secure-port", portString) + configflags.SetIfUnset(args, "service-account-key-file", kubeAPIServerConfig.ServiceAccountPublicKeyFiles...) + configflags.SetIfUnset(args, "service-account-lookup", "true") + configflags.SetIfUnset(args, "service-cluster-ip-range", kubeAPIServerConfig.ServicesSubnet) + configflags.SetIfUnset(args, "service-node-port-range", kubeAPIServerConfig.ServicesNodePortRange) + configflags.SetIfUnset(args, "storage-backend", "etcd3") + configflags.SetIfUnset(args, "storage-media-type", "application/vnd.kubernetes.protobuf") + configflags.SetIfUnset(args, "tls-cert-file", kubeAPIServerConfig.ServingInfo.CertFile) + configflags.SetIfUnset(args, "tls-cipher-suites", kubeAPIServerConfig.ServingInfo.CipherSuites...) + configflags.SetIfUnset(args, "tls-min-version", kubeAPIServerConfig.ServingInfo.MinTLSVersion) + configflags.SetIfUnset(args, "tls-private-key-file", kubeAPIServerConfig.ServingInfo.KeyFile) + configflags.SetIfUnset(args, "tls-sni-cert-key", sniCertKeys(kubeAPIServerConfig.ServingInfo.NamedCertificates)...) + configflags.SetIfUnset(args, "secure-port", portString) + + return configflags.ToFlagSlice(args), nil +} + +func admissionFlags(admissionConfig configv1.AdmissionConfig) (map[string][]string, error) { + args := map[string][]string{} + + upstreamAdmissionConfig, err := ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(admissionConfig.PluginConfig) + if err != nil { + return nil, err + } + configBytes, err := helpers.WriteYAML(upstreamAdmissionConfig, apiserverv1alpha1.AddToScheme) + if err != nil { + return nil, err + } + + tempFile, err := ioutil.TempFile("", "kubeapiserver-admission-config.yaml") + if err != nil { + return nil, err + } + if _, err := tempFile.Write(configBytes); err != nil { + return nil, err + } + tempFile.Close() + + configflags.SetIfUnset(args, "admission-control-config-file", tempFile.Name()) + configflags.SetIfUnset(args, "disable-admission-plugins", admissionConfig.DisabledAdmissionPlugins...) + configflags.SetIfUnset(args, "enable-admission-plugins", admissionConfig.EnabledAdmissionPlugins...) + + return args, nil +} + +func sniCertKeys(namedCertificates []configv1.NamedCertificate) []string { + args := []string{} + for _, nc := range namedCertificates { + names := "" + if len(nc.Names) > 0 { + names = ":" + strings.Join(nc.Names, ",") + } + args = append(args, fmt.Sprintf("%s,%s%s", nc.CertFile, nc.KeyFile, names)) + } + return args +} + +func unmaskArgs(args map[string]kubecontrolplanev1.Arguments) map[string][]string { + ret := map[string][]string{} + for key, slice := range args { + for _, val := range slice { + ret[key] = append(ret[key], val) + } + } + return ret +} + +func ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(in map[string]configv1.AdmissionPluginConfig) (*apiserverv1alpha1.AdmissionConfiguration, error) { + ret := &apiserverv1alpha1.AdmissionConfiguration{} + + for _, pluginName := range sets.StringKeySet(in).List() { + kubeConfig := apiserverv1alpha1.AdmissionPluginConfiguration{ + Name: pluginName, + Path: in[pluginName].Location, + Configuration: &runtime.Unknown{ + Raw: in[pluginName].Configuration.Raw, + }, + } + + ret.Plugins = append(ret.Plugins, kubeConfig) + } + + return ret, nil +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go b/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go new file mode 100644 index 0000000000000..3241b9b432981 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/flags_test.go @@ -0,0 +1,26 @@ +package openshiftkubeapiserver + +import ( + "testing" + + "github.com/openshift/api/config/v1" +) + +func TestSNICertKeys(t *testing.T) { + testCases := []struct { + names []string + expected string + }{ + {names: []string{"foo"}, expected: "secret.crt,secret.key:foo"}, + {names: []string{"foo", "bar"}, expected: "secret.crt,secret.key:foo,bar"}, + {expected: "secret.crt,secret.key"}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + result := sniCertKeys([]v1.NamedCertificate{{Names: tc.names, CertInfo: v1.CertInfo{CertFile: "secret.crt", KeyFile: "secret.key"}}}) + if len(result) != 1 || result[0] != tc.expected { + t.Errorf("expected: %v, actual: %v", []string{tc.expected}, result) + } + }) + } +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/paramtoken/paramtoken.go b/openshift-kube-apiserver/openshiftkubeapiserver/paramtoken/paramtoken.go new file mode 100644 index 0000000000000..1993002e5daf8 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/paramtoken/paramtoken.go @@ -0,0 +1,45 @@ +package paramtoken + +import ( + "net/http" + "strings" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/util/wsstream" +) + +// Authenticator provides a way to authenticate tokens provided as a parameter +// This only exists to allow websocket connections to use an API token, since they cannot set an Authorize header +// For this authenticator to work, tokens will be part of the request URL, and are more likely to be logged or otherwise exposed. +// Every effort should be made to filter tokens from being logged when using this authenticator. +type Authenticator struct { + // param is the query param to use as a token + param string + // auth is the token authenticator to use to validate the token + auth authenticator.Token + // removeParam indicates whether the parameter should be stripped from the incoming request + removeParam bool +} + +func New(param string, auth authenticator.Token, removeParam bool) *Authenticator { + return &Authenticator{param, auth, removeParam} +} + +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + // Only accept query param auth for websocket connections + if !wsstream.IsWebSocketRequest(req) { + return nil, false, nil + } + + q := req.URL.Query() + token := strings.TrimSpace(q.Get(a.param)) + if token == "" { + return nil, false, nil + } + authResponse, ok, err := a.auth.AuthenticateToken(req.Context(), token) + if ok && a.removeParam { + q.Del(a.param) + req.URL.RawQuery = q.Encode() + } + return authResponse, ok, err +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/patch.go b/openshift-kube-apiserver/openshiftkubeapiserver/patch.go new file mode 100644 index 0000000000000..f66d9a1ab5452 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/patch.go @@ -0,0 +1,157 @@ +package openshiftkubeapiserver + +import ( + "time" + + "k8s.io/kubernetes/openshift-kube-apiserver/enablement" + + "k8s.io/apiserver/pkg/admission" + "k8s.io/apiserver/pkg/quota/v1/generic" + genericapiserver "k8s.io/apiserver/pkg/server" + clientgoinformers "k8s.io/client-go/informers" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/quota/v1/install" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy" + "github.com/openshift/apiserver-library-go/pkg/admission/imagepolicy/imagereferencemutators" + "github.com/openshift/apiserver-library-go/pkg/admission/quota/clusterresourcequota" + "github.com/openshift/apiserver-library-go/pkg/securitycontextconstraints/sccadmission" + quotaclient "github.com/openshift/client-go/quota/clientset/versioned" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions" + quotav1informer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + securityv1client "github.com/openshift/client-go/security/clientset/versioned" + securityv1informer "github.com/openshift/client-go/security/informers/externalversions" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" + "github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig" + "github.com/openshift/library-go/pkg/apiserver/apiserverconfig" + "github.com/openshift/library-go/pkg/quota/clusterquotamapping" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers/usercache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/scheduler/nodeenv" +) + +type KubeAPIServerConfigFunc func(config *genericapiserver.Config, versionedInformers clientgoinformers.SharedInformerFactory, pluginInitializers *[]admission.PluginInitializer) error + +func NewOpenShiftKubeAPIServerConfigPatch(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) KubeAPIServerConfigFunc { + return func(genericConfig *genericapiserver.Config, kubeInformers clientgoinformers.SharedInformerFactory, pluginInitializers *[]admission.PluginInitializer) error { + openshiftInformers, err := newInformers(genericConfig.LoopbackClientConfig) + if err != nil { + return err + } + + // AUTHORIZER + genericConfig.RequestInfoResolver = apiserverconfig.OpenshiftRequestInfoResolver() + authorizer := NewAuthorizer(kubeInformers) + genericConfig.Authorization.Authorizer = authorizer + // END AUTHORIZER + + // Inject OpenShift API long running endpoints (like for binary builds). + // TODO: We should disable the timeout code for aggregated endpoints as this can cause problems when upstream add additional endpoints. + genericConfig.LongRunningFunc = apiserverconfig.IsLongRunningRequest + + // ADMISSION + clusterQuotaMappingController := newClusterQuotaMappingController(kubeInformers.Core().V1().Namespaces(), openshiftInformers.OpenshiftQuotaInformers.Quota().V1().ClusterResourceQuotas()) + genericConfig.AddPostStartHookOrDie("quota.openshift.io-clusterquotamapping", func(context genericapiserver.PostStartHookContext) error { + go clusterQuotaMappingController.Run(5, context.StopCh) + return nil + }) + + *pluginInitializers = append(*pluginInitializers, + imagepolicy.NewInitializer(imagereferencemutators.KubeImageMutators{}, kubeAPIServerConfig.ImagePolicyConfig.InternalRegistryHostname), + restrictusers.NewInitializer(openshiftInformers.getOpenshiftUserInformers()), + sccadmission.NewInitializer(openshiftInformers.getOpenshiftSecurityInformers().Security().V1().SecurityContextConstraints()), + clusterresourcequota.NewInitializer( + openshiftInformers.getOpenshiftQuotaInformers().Quota().V1().ClusterResourceQuotas(), + clusterQuotaMappingController.GetClusterQuotaMapper(), + generic.NewRegistry(install.NewQuotaConfigurationForAdmission().Evaluators()), + ), + nodeenv.NewInitializer(kubeAPIServerConfig.ProjectConfig.DefaultNodeSelector), + admissionrestconfig.NewInitializer(*rest.CopyConfig(genericConfig.LoopbackClientConfig)), + ) + // END ADMISSION + + // HANDLER CHAIN (with oauth server and web console) + genericConfig.BuildHandlerChainFunc, err = BuildHandlerChain(kubeAPIServerConfig.ConsolePublicURL, kubeAPIServerConfig.AuthConfig.OAuthMetadataFile) + if err != nil { + return err + } + // END HANDLER CHAIN + + genericConfig.AddPostStartHookOrDie("openshift.io-startkubeinformers", func(context genericapiserver.PostStartHookContext) error { + go kubeInformers.Start(context.StopCh) + go openshiftInformers.Start(context.StopCh) + return nil + }) + enablement.AppendPostStartHooksOrDie(genericConfig) + + return nil + } +} + +// newInformers is only exposed for the build's integration testing until it can be fixed more appropriately. +func newInformers(loopbackClientConfig *rest.Config) (*kubeAPIServerInformers, error) { + // ClusterResourceQuota is served using CRD resource any status update must use JSON + jsonLoopbackClientConfig := rest.CopyConfig(loopbackClientConfig) + jsonLoopbackClientConfig.ContentConfig.AcceptContentTypes = "application/json" + jsonLoopbackClientConfig.ContentConfig.ContentType = "application/json" + + quotaClient, err := quotaclient.NewForConfig(jsonLoopbackClientConfig) + if err != nil { + return nil, err + } + securityClient, err := securityv1client.NewForConfig(jsonLoopbackClientConfig) + if err != nil { + return nil, err + } + userClient, err := userclient.NewForConfig(loopbackClientConfig) + if err != nil { + return nil, err + } + + // TODO find a single place to create and start informers. During the 1.7 rebase this will come more naturally in a config object, + // before then we should try to eliminate our direct to storage access. It's making us do weird things. + const defaultInformerResyncPeriod = 10 * time.Minute + + ret := &kubeAPIServerInformers{ + OpenshiftQuotaInformers: quotainformer.NewSharedInformerFactory(quotaClient, defaultInformerResyncPeriod), + OpenshiftSecurityInformers: securityv1informer.NewSharedInformerFactory(securityClient, defaultInformerResyncPeriod), + OpenshiftUserInformers: userinformer.NewSharedInformerFactory(userClient, defaultInformerResyncPeriod), + } + if err := ret.OpenshiftUserInformers.User().V1().Groups().Informer().AddIndexers(cache.Indexers{ + usercache.ByUserIndexName: usercache.ByUserIndexKeys, + }); err != nil { + return nil, err + } + + return ret, nil +} + +type kubeAPIServerInformers struct { + OpenshiftQuotaInformers quotainformer.SharedInformerFactory + OpenshiftSecurityInformers securityv1informer.SharedInformerFactory + OpenshiftUserInformers userinformer.SharedInformerFactory +} + +func (i *kubeAPIServerInformers) getOpenshiftQuotaInformers() quotainformer.SharedInformerFactory { + return i.OpenshiftQuotaInformers +} +func (i *kubeAPIServerInformers) getOpenshiftSecurityInformers() securityv1informer.SharedInformerFactory { + return i.OpenshiftSecurityInformers +} +func (i *kubeAPIServerInformers) getOpenshiftUserInformers() userinformer.SharedInformerFactory { + return i.OpenshiftUserInformers +} + +func (i *kubeAPIServerInformers) Start(stopCh <-chan struct{}) { + i.OpenshiftQuotaInformers.Start(stopCh) + i.OpenshiftSecurityInformers.Start(stopCh) + i.OpenshiftUserInformers.Start(stopCh) +} + +func newClusterQuotaMappingController(nsInternalInformer corev1informers.NamespaceInformer, clusterQuotaInformer quotav1informer.ClusterResourceQuotaInformer) *clusterquotamapping.ClusterQuotaMappingController { + return clusterquotamapping.NewClusterQuotaMappingController(nsInternalInformer, clusterQuotaInformer) +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/patch_authorizer.go b/openshift-kube-apiserver/openshiftkubeapiserver/patch_authorizer.go new file mode 100644 index 0000000000000..26a6f5273d631 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/patch_authorizer.go @@ -0,0 +1,52 @@ +package openshiftkubeapiserver + +import ( + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + authorizerunion "k8s.io/apiserver/pkg/authorization/union" + "k8s.io/client-go/informers" + "k8s.io/kubernetes/pkg/auth/nodeidentifier" + "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node" + rbacauthorizer "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" + kbootstrappolicy "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy" + + "k8s.io/kubernetes/openshift-kube-apiserver/authorization/browsersafe" + "k8s.io/kubernetes/openshift-kube-apiserver/authorization/scopeauthorizer" +) + +func NewAuthorizer(versionedInformers informers.SharedInformerFactory) authorizer.Authorizer { + rbacInformers := versionedInformers.Rbac().V1() + + scopeLimitedAuthorizer := scopeauthorizer.NewAuthorizer(rbacInformers.ClusterRoles().Lister()) + + kubeAuthorizer := rbacauthorizer.New( + &rbacauthorizer.RoleGetter{Lister: rbacInformers.Roles().Lister()}, + &rbacauthorizer.RoleBindingLister{Lister: rbacInformers.RoleBindings().Lister()}, + &rbacauthorizer.ClusterRoleGetter{Lister: rbacInformers.ClusterRoles().Lister()}, + &rbacauthorizer.ClusterRoleBindingLister{Lister: rbacInformers.ClusterRoleBindings().Lister()}, + ) + + graph := node.NewGraph() + node.AddGraphEventHandlers( + graph, + versionedInformers.Core().V1().Nodes(), + versionedInformers.Core().V1().Pods(), + versionedInformers.Core().V1().PersistentVolumes(), + versionedInformers.Storage().V1().VolumeAttachments(), + ) + nodeAuthorizer := node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), kbootstrappolicy.NodeRules()) + + openshiftAuthorizer := authorizerunion.New( + // Wrap with an authorizer that detects unsafe requests and modifies verbs/resources appropriately so policy can address them separately. + // Scopes are first because they will authoritatively deny and can logically be attached to anyone. + browsersafe.NewBrowserSafeAuthorizer(scopeLimitedAuthorizer, user.AllAuthenticated), + // authorizes system:masters to do anything, just like upstream + authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup), + nodeAuthorizer, + // Wrap with an authorizer that detects unsafe requests and modifies verbs/resources appropriately so policy can address them separately + browsersafe.NewBrowserSafeAuthorizer(kubeAuthorizer, user.AllAuthenticated), + ) + + return openshiftAuthorizer +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go b/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go new file mode 100644 index 0000000000000..804116c1efa1d --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/patch_handlerchain.go @@ -0,0 +1,97 @@ +package openshiftkubeapiserver + +import ( + "net/http" + "strings" + + authenticationv1 "k8s.io/api/authentication/v1" + genericapiserver "k8s.io/apiserver/pkg/server" + + authorizationv1 "github.com/openshift/api/authorization/v1" + "github.com/openshift/library-go/pkg/apiserver/httprequest" +) + +// TODO switch back to taking a kubeapiserver config. For now make it obviously safe for 3.11 +func BuildHandlerChain(consolePublicURL string, oauthMetadataFile string) (func(apiHandler http.Handler, kc *genericapiserver.Config) http.Handler, error) { + // load the oauthmetadata when we can return an error + oAuthMetadata := []byte{} + if len(oauthMetadataFile) > 0 { + var err error + oAuthMetadata, err = loadOAuthMetadataFile(oauthMetadataFile) + if err != nil { + return nil, err + } + } + + return func(apiHandler http.Handler, genericConfig *genericapiserver.Config) http.Handler { + // well-known comes after the normal handling chain. This shows where to connect for oauth information + handler := withOAuthInfo(apiHandler, oAuthMetadata) + + // this is the normal kube handler chain + handler = genericapiserver.DefaultBuildHandlerChain(handler, genericConfig) + + // these handlers are all before the normal kube chain + handler = translateLegacyScopeImpersonation(handler) + + // redirects from / and /console to consolePublicURL if you're using a browser + handler = withConsoleRedirect(handler, consolePublicURL) + + return handler + }, + nil +} + +// If we know the location of the asset server, redirect to it when / is requested +// and the Accept header supports text/html +func withOAuthInfo(handler http.Handler, oAuthMetadata []byte) http.Handler { + if len(oAuthMetadata) == 0 { + return handler + } + + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path != oauthMetadataEndpoint { + // Dispatch to the next handler + handler.ServeHTTP(w, req) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(oAuthMetadata) + }) +} + +// If we know the location of the asset server, redirect to it when / is requested +// and the Accept header supports text/html +func withConsoleRedirect(handler http.Handler, consolePublicURL string) http.Handler { + if len(consolePublicURL) == 0 { + return handler + } + + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if strings.HasPrefix(req.URL.Path, "/console") || + (req.URL.Path == "/" && httprequest.PrefersHTML(req)) { + http.Redirect(w, req, consolePublicURL, http.StatusFound) + return + } + // Dispatch to the next handler + handler.ServeHTTP(w, req) + }) +} + +// legacyImpersonateUserScopeHeader is the header name older servers were using +// just for scopes, so we need to translate it from clients that may still be +// using it. +const legacyImpersonateUserScopeHeader = "Impersonate-User-Scope" + +// translateLegacyScopeImpersonation is a filter that will translates user scope impersonation for openshift into the equivalent kube headers. +func translateLegacyScopeImpersonation(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + for _, scope := range req.Header[legacyImpersonateUserScopeHeader] { + req.Header[authenticationv1.ImpersonateUserExtraHeaderPrefix+authorizationv1.ScopesKey] = + append(req.Header[authenticationv1.ImpersonateUserExtraHeaderPrefix+authorizationv1.ScopesKey], scope) + } + + handler.ServeHTTP(w, req) + }) +} diff --git a/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go b/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go new file mode 100644 index 0000000000000..8b34da7aa3203 --- /dev/null +++ b/openshift-kube-apiserver/openshiftkubeapiserver/wellknown_oauth.go @@ -0,0 +1,57 @@ +package openshiftkubeapiserver + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/openshift/library-go/pkg/oauth/oauthdiscovery" +) + +const ( + // Discovery endpoint for OAuth 2.0 Authorization Server Metadata + // See IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + oauthMetadataEndpoint = "/.well-known/oauth-authorization-server" +) + +func validateURL(urlString string) error { + urlObj, err := url.Parse(urlString) + if err != nil { + return fmt.Errorf("%q is an invalid URL: %v", urlString, err) + } + if len(urlObj.Scheme) == 0 { + return fmt.Errorf("must contain a valid scheme") + } + if len(urlObj.Host) == 0 { + return fmt.Errorf("must contain a valid host") + } + return nil +} + +func loadOAuthMetadataFile(metadataFile string) ([]byte, error) { + data, err := ioutil.ReadFile(metadataFile) + if err != nil { + return nil, fmt.Errorf("unable to read External OAuth Metadata file: %v", err) + } + + oauthMetadata := &oauthdiscovery.OauthAuthorizationServerMetadata{} + if err := json.Unmarshal(data, oauthMetadata); err != nil { + return nil, fmt.Errorf("unable to decode External OAuth Metadata file: %v", err) + } + + if err := validateURL(oauthMetadata.Issuer); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata Issuer field: %v", err) + } + + if err := validateURL(oauthMetadata.AuthorizationEndpoint); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata AuthorizationEndpoint field: %v", err) + } + + if err := validateURL(oauthMetadata.TokenEndpoint); err != nil { + return nil, fmt.Errorf("error validating External OAuth Metadata TokenEndpoint field: %v", err) + } + + return data, nil +} diff --git a/pkg/kubeapiserver/authenticator/config.go b/pkg/kubeapiserver/authenticator/config.go index bc8033c04ca0e..fd8c66d35e469 100644 --- a/pkg/kubeapiserver/authenticator/config.go +++ b/pkg/kubeapiserver/authenticator/config.go @@ -174,6 +174,13 @@ func (config Config) New() (authenticator.Request, *spec.SecurityDefinitions, er } tokenAuthenticators = append(tokenAuthenticators, webhookTokenAuth) + } else { + // openshift gets injected here as a separate token authenticator because we also add a special group to our oauth authorized + // tokens that allows us to recognize human users differently than machine users. The openAPI is always correct because we always + // configuration service account tokens, so we just have to create and add another authenticator. + // TODO make this a webhook authenticator and remove this patch. + // TODO - remove in 4.7, kept here not to disrupt authentication during 4.5->4.6 upgrade + tokenAuthenticators = AddOAuthServerAuthenticatorIfNeeded(tokenAuthenticators, config.APIAudiences) } if len(tokenAuthenticators) > 0 { diff --git a/pkg/kubeapiserver/authenticator/patch_authenticator.go b/pkg/kubeapiserver/authenticator/patch_authenticator.go new file mode 100644 index 0000000000000..f52d951dcd4ab --- /dev/null +++ b/pkg/kubeapiserver/authenticator/patch_authenticator.go @@ -0,0 +1,86 @@ +package authenticator + +import ( + "time" + + oauthclient "github.com/openshift/client-go/oauth/clientset/versioned" + oauthinformer "github.com/openshift/client-go/oauth/informers/externalversions" + userclient "github.com/openshift/client-go/user/clientset/versioned" + userinformer "github.com/openshift/client-go/user/informers/externalversions" + bootstrap "github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/group" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/openshift-kube-apiserver/admission/authorization/restrictusers/usercache" + oauthvalidation "k8s.io/kubernetes/openshift-kube-apiserver/admission/customresourcevalidation/oauth" + "k8s.io/kubernetes/openshift-kube-apiserver/authentication/oauth" + "k8s.io/kubernetes/openshift-kube-apiserver/enablement" +) + +const authenticatedOAuthGroup = "system:authenticated:oauth" + +// TODO find a single place to create and start informers. During the 1.7 rebase this will come more naturally in a config object, +// before then we should try to eliminate our direct to storage access. It's making us do weird things. +const defaultInformerResyncPeriod = 10 * time.Minute + +func AddOAuthServerAuthenticatorIfNeeded(tokenAuthenticators []authenticator.Token, implicitAudiences authenticator.Audiences) []authenticator.Token { + if !enablement.IsOpenShift() { + return tokenAuthenticators + } + + kubeClient, err := kubernetes.NewForConfig(enablement.LoopbackClientConfig()) + if err != nil { + panic(err) + } + bootstrapUserDataGetter := bootstrap.NewBootstrapUserDataGetter(kubeClient.CoreV1(), kubeClient.CoreV1()) + + oauthClient, err := oauthclient.NewForConfig(enablement.LoopbackClientConfig()) + if err != nil { + panic(err) + } + userClient, err := userclient.NewForConfig(enablement.LoopbackClientConfig()) + if err != nil { + panic(err) + } + + oauthInformer := oauthinformer.NewSharedInformerFactory(oauthClient, defaultInformerResyncPeriod) + userInformer := userinformer.NewSharedInformerFactory(userClient, defaultInformerResyncPeriod) + if err := userInformer.User().V1().Groups().Informer().AddIndexers(cache.Indexers{ + usercache.ByUserIndexName: usercache.ByUserIndexKeys, + }); err != nil { + panic(err) + } + + // add our oauth token validator + validators := []oauth.OAuthTokenValidator{oauth.NewExpirationValidator(), oauth.NewUIDValidator()} + if enablement.OpenshiftConfig().OAuthConfig != nil { + if inactivityTimeout := enablement.OpenshiftConfig().OAuthConfig.TokenConfig.AccessTokenInactivityTimeoutSeconds; inactivityTimeout != nil { + timeoutValidator := oauth.NewTimeoutValidator(oauthClient.OauthV1().OAuthAccessTokens(), oauthInformer.Oauth().V1().OAuthClients().Lister(), *inactivityTimeout, oauthvalidation.MinimumInactivityTimeoutSeconds) + validators = append(validators, timeoutValidator) + enablement.AddPostStartHookOrDie("openshift.io-TokenTimeoutUpdater", func(context genericapiserver.PostStartHookContext) error { + go timeoutValidator.Run(context.StopCh) + return nil + }) + } + } + enablement.AddPostStartHookOrDie("openshift.io-StartOAuthInformers", func(context genericapiserver.PostStartHookContext) error { + go oauthInformer.Start(context.StopCh) + go userInformer.Start(context.StopCh) + return nil + }) + groupMapper := usercache.NewGroupCache(userInformer.User().V1().Groups()) + oauthTokenAuthenticator := oauth.NewTokenAuthenticator(oauthClient.OauthV1().OAuthAccessTokens(), userClient.UserV1().Users(), groupMapper, implicitAudiences, validators...) + tokenAuthenticators = append(tokenAuthenticators, + // if you have an OAuth bearer token, you're a human (usually) + group.NewTokenGroupAdder(oauthTokenAuthenticator, []string{authenticatedOAuthGroup})) + + // add the bootstrap user token authenticator + tokenAuthenticators = append(tokenAuthenticators, + // bootstrap oauth user that can do anything, backed by a secret + oauth.NewBootstrapAuthenticator(oauthClient.OauthV1().OAuthAccessTokens(), bootstrapUserDataGetter, implicitAudiences, validators...)) + + return tokenAuthenticators +}