Skip to content

Commit

Permalink
add router-scale regression script
Browse files Browse the repository at this point in the history
  • Loading branch information
qiliRedHat committed Jan 17, 2025
1 parent e6ed317 commit bd435be
Show file tree
Hide file tree
Showing 5 changed files with 237 additions and 5 deletions.
3 changes: 0 additions & 3 deletions perfscale_regression_ci/scripts/custom_workload_env.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
# Where to clone e2e-benchmarking from
export E2E_BENCHMARKING_REPOSITORY=${E2E_BENCHMARKING_REPOSITORY:-"https://github.com/cloud-bulldozer/e2e-benchmarking"}
export E2E_BENCHMARKING_BRANCH=${E2E_BENCHMARKING_BRANCH:-"master"}
# Common ENVs of custom workload for kube-burner
export WORKLOAD=${WORKLOAD:-"custom"}
export INDEXING=${INDEXING:-"false"}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#/!/bin/bash
################################################
## Auth=qili@redhat.com
## Desription: Script for router scale test
## Polarion test case: OCP-43281
## https://polarion.engineering.redhat.com/polarion/#/project/OSE/workitem?id=OCP-43281
## Cluster config: 3 master (m5.2xlarge), 9 workers(m5.xlarge), 3 infras(c5.4xlarge), no not move component to infra nodes before the test
## ingress-perf config: standard-3replicas.yml, standard-4replicas.yml
## optional PARAMETERS: number of JOB_ITERATION
################################################
set -o errexit

source ../../common.sh
source ../../../utils/run_workload.sh

echo "[INFO] Patch threadCount as 4"
oc -n openshift-ingress-operator patch ingresscontroller/default --type=merge -p '{"spec":{"tuningOptions": {"threadCount": 4}}}'
oc -n openshift-ingress get deploy router-default -o yaml | grep " ROUTER_THREADS" -A 1

echo "[INFO] run ingress-perf with thread=4, replica=2"
run_ingress_perf
#run ingress-perf with thread=4, replica=3
export CONFIG=../../../standard-3replicas.yml
run_ingress_perf

echo "[INFO] scale infra replicas to 4"
machineset=$(oc get machinesets -n openshift-machine-api --no-headers | head -n 1 | awk {'print $1'})
oc scale machineset --replicas=2 ${machineset} -n openshift-machine-api
oc get machinesets -n openshift-machine-api

echo "[INFO] run ingress-perf with thread=4, replica=4"
export CONFIG=../../../standard-4replicas.yml
run_ingress_perf

echo "[INFO] Patch threadCount as 8"
oc -n openshift-ingress-operator patch ingresscontroller/default --type=merge -p '{"spec":{"tuningOptions": {"threadCount": 8}}}'
oc -n openshift-ingress get deploy router-default -o yaml | grep " ROUTER_THREADS" -A 1

echo "[INFO] run ingress-perf with thread=8, replica=4"
export CONFIG=../../../standard-4replicas.yml
run_ingress_perf

echo "[INFO] run ingress-perf with thread=8, replica=3"
export CONFIG=../../../standard-3replicas.yml
run_ingress_perf

echo "[INFO] run ingress-perf with thread=8, replica=2"
unset CONFIG
run_ingress_perf

echo "[INFO] Test is finished. Pleaset check results in the grafana and dittybopper dashboards."
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# vi: expandtab shiftwidth=2 softtabstop=2

# First scenario is configured as warmup and it will also tune the default ingress-controller to assign the router pods to the infra nodes
- termination: http
connections: 200
samples: 5
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
tuningPatch: '{"spec":{"nodePlacement": {"nodeSelector": {"matchLabels": {"node-role.kubernetes.io/infra": ""}}}, "replicas": 3}}'
delay: 10s
requestTimeout: 10s
warmup: true

- termination: http
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: edge
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: reencrypt
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: passthrough
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: reencrypt
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: hloader
serverReplicas: 45
requestTimeout: 10s
tuningPatch: '{"metadata":{"annotations": {"ingress.operator.openshift.io/default-enable-http2": "true"}}}'
http2: true

- termination: passthrough
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: hloader
serverReplicas: 45
requestTimeout: 10s
http2: true
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# vi: expandtab shiftwidth=2 softtabstop=2

# First scenario is configured as warmup and it will also tune the default ingress-controller to assign the router pods to the infra nodes
- termination: http
connections: 200
samples: 5
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
tuningPatch: '{"spec":{"nodePlacement": {"nodeSelector": {"matchLabels": {"node-role.kubernetes.io/infra": ""}}}, "replicas": 4}}'
delay: 10s
requestTimeout: 10s
warmup: true

- termination: http
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: edge
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: reencrypt
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: passthrough
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: wrk
serverReplicas: 45
requestTimeout: 10s
delay: 10s

- termination: reencrypt
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: hloader
serverReplicas: 45
requestTimeout: 10s
tuningPatch: '{"metadata":{"annotations": {"ingress.operator.openshift.io/default-enable-http2": "true"}}}'
http2: true

- termination: passthrough
connections: 200
samples: 2
duration: 3m
path: /1024.html
concurrency: 18
tool: hloader
serverReplicas: 45
requestTimeout: 10s
http2: true
24 changes: 22 additions & 2 deletions perfscale_regression_ci/utils/run_workload.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,12 @@ set -ex

setup(){
rm -rf e2e-benchmarking
git clone --single-branch --branch ${E2E_BENCHMARKING_BRANCH} ${E2E_BENCHMARKING_REPOSITORY}
# Clone the e2e repo
REPO_URL="https://github.com/cloud-bulldozer/e2e-benchmarking";
LATEST_TAG=$(curl -s "https://api.github.com/repos/cloud-bulldozer/e2e-benchmarking/releases/latest" | jq -r '.tag_name');
E2E_VERSION="v2.2.5"
TAG_OPTION="--branch $(if [ "$E2E_VERSION" == "default" ]; then echo "$LATEST_TAG"; else echo "$E2E_VERSION"; fi)";
git clone $REPO_URL $TAG_OPTION --depth 1
}

cleanup(){
Expand All @@ -21,4 +26,19 @@ run_workload(){
cleanup
}

set +ex
run_ingress_perf(){
if [[ ! -d e2e-benchmarking/workloads/ingress-perf ]]; then
setup
fi
pushd e2e-benchmarking/workloads/ingress-perf
export ES_USERNAME=${ES_USERNAME}
export ES_PASSWORD=${ES_PASSWORD}
export ES_SERVER="https://$ES_USERNAME:$ES_PASSWORD@search-ocp-qe-perf-scale-test-elk-hcm7wtsqpxy7xogbu72bor4uve.us-east-1.es.amazonaws.com"
export ES_INDEX="ingress-performance"
export CONFIG=${CONFIG:-"config/standard.yml"}
echo "[INFO] Will run ingress-perf with config $CONFIG"
./run.sh |& tee "/tmp/ingress-perf-$(date +%Y%m%d%H%M%S).out"
popd
}

set +ex

0 comments on commit bd435be

Please sign in to comment.