From 773c6d3fbea94c545812b8e09a513acf0f0c9b04 Mon Sep 17 00:00:00 2001
From: topahadzi <topahadzi@gmail.com>
Date: Fri, 8 Dec 2023 14:46:44 +0700
Subject: [PATCH] clean and fix code

---
 bin/experiment/experiment.go                  | 140 -----
 .../litmus/aws-ssm-chaos/lib/ssm-chaos.go     | 173 ------
 .../lib/ssm/aws-ssm-chaos-by-id.go            |  86 ---
 .../lib/ssm/aws-ssm-chaos-by-tag.go           |  81 ---
 .../azure-disk-loss/lib/azure-disk-loss.go    | 290 ---------
 .../lib/azure-instance-stop.go                | 282 ---------
 .../container-kill/helper/container-kill.go   | 273 ---------
 .../container-kill/lib/container-kill.go      | 288 ---------
 chaoslib/litmus/disk-fill/helper/disk-fill.go | 368 -----------
 chaoslib/litmus/disk-fill/lib/disk-fill.go    | 298 ---------
 .../lib/docker-service-kill.go                | 208 -------
 .../lib/ebs-loss-by-id/lib/ebs-loss-by-id.go  |  78 ---
 .../ebs-loss-by-tag/lib/ebs-loss-by-tag.go    |  75 ---
 chaoslib/litmus/ebs-loss/lib/ebs-loss.go      | 232 -------
 .../lib/ec2-terminate-by-id.go                | 256 --------
 .../lib/ec2-terminate-by-tag.go               | 287 ---------
 .../lib/gcp-vm-disk-loss-by-label.go          | 303 ---------
 .../gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go  | 295 ---------
 .../lib/gcp-vm-instance-stop-by-label.go      | 285 ---------
 .../lib/gcp-vm-instance-stop.go               | 295 ---------
 .../litmus/http-chaos/helper/http-helper.go   | 327 ----------
 .../litmus/http-chaos/lib/header/header.go    |  31 -
 chaoslib/litmus/http-chaos/lib/http-chaos.go  | 292 ---------
 .../litmus/http-chaos/lib/latency/latency.go  |  28 -
 .../http-chaos/lib/modify-body/modify-body.go |  45 --
 chaoslib/litmus/http-chaos/lib/reset/reset.go |  28 -
 .../http-chaos/lib/statuscode/status-code.go  | 112 ----
 .../lib/pod-delete.go                         | 245 --------
 .../lib/kubelet-service-kill.go               | 210 -------
 chaoslib/litmus/network-chaos/helper/netem.go | 381 ------------
 .../lib/corruption/corruption.go              |  15 -
 .../lib/duplication/duplication.go            |  15 -
 .../network-chaos/lib/latency/latency.go      |  17 -
 .../litmus/network-chaos/lib/loss/loss.go     |  15 -
 .../litmus/network-chaos/lib/network-chaos.go | 514 ----------------
 .../litmus/node-cpu-hog/lib/node-cpu-hog.go   | 288 ---------
 chaoslib/litmus/node-drain/lib/node-drain.go  | 227 -------
 .../node-io-stress/lib/node-io-stress.go      | 300 ---------
 .../node-memory-hog/lib/node-memory-hog.go    | 373 -----------
 .../litmus/node-restart/lib/node-restart.go   | 154 -----
 chaoslib/litmus/node-taint/lib/node-taint.go  | 251 --------
 .../pod-autoscaler/lib/pod-autoscaler.go      | 439 -------------
 .../pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go  | 319 ----------
 chaoslib/litmus/pod-delete/lib/pod-delete.go  | 260 --------
 .../litmus/pod-dns-chaos/helper/dnschaos.go   | 293 ---------
 .../litmus/pod-dns-chaos/lib/pod-dns-chaos.go | 285 ---------
 .../pod-fio-stress/lib/pod-fio-stress.go      | 296 ---------
 .../lib/pod-memory-hog-exec.go                | 323 ----------
 .../lib/network-policy.go                     | 297 ---------
 .../lib/pod-network-partition.go              | 254 --------
 .../lib/redfish-node-restart.go               |  68 ---
 .../lib/spring-boot-chaos.go                  | 393 ------------
 .../stress-chaos/helper/stress-helper.go      | 577 ------------------
 .../litmus/stress-chaos/lib/stress-chaos.go   | 346 -----------
 .../vira/node-restart/lib/node-restart.go     |   7 +-
 .../litmus/vm-poweroff/lib/vm-poweroff.go     | 255 --------
 .../aws-ssm/aws-ssm-chaos-by-id/README.md     |  14 -
 .../experiment/aws-ssm-chaos-by-id.go         | 196 ------
 .../aws-ssm/aws-ssm-chaos-by-id/rbac.yaml     |  46 --
 .../aws-ssm/aws-ssm-chaos-by-id/test/test.yml |  44 --
 .../aws-ssm/aws-ssm-chaos-by-tag/README.md    |  14 -
 .../experiment/aws-ssm-chaos-by-tag.go        | 186 ------
 .../aws-ssm/aws-ssm-chaos-by-tag/rbac.yaml    |  46 --
 .../aws-ssm-chaos-by-tag/test/test.yml        |  44 --
 experiments/azure/azure-disk-loss/README.md   |  14 -
 .../experiment/azure-disk-loss.go             | 192 ------
 .../azure/azure-disk-loss/test/test.yml       |  81 ---
 experiments/azure/instance-stop/README.md     |  14 -
 .../experiment/azure-instance-stop.go         | 196 ------
 experiments/azure/instance-stop/rbac.yaml     |  37 --
 experiments/azure/instance-stop/test/test.yml |  78 ---
 .../baremetal/redfish-node-restart/README.md  |  17 -
 .../experiment/redfish-node-restart.go        | 218 -------
 .../baremetal/redfish-node-restart/rbac.yaml  |  37 --
 .../redfish-node-restart/test/test.yml        |  43 --
 experiments/cassandra/pod-delete/README.md    |  14 -
 .../pod-delete/experiment/pod-delete.go       | 222 -------
 experiments/cassandra/pod-delete/rbac.yaml    |  37 --
 .../cassandra/pod-delete/test/test.yml        |  72 ---
 .../experiment/gcp-vm-disk-loss-by-label.go   | 183 ------
 .../gcp-vm-disk-loss-by-label/test/test.yml   |  67 --
 experiments/gcp/gcp-vm-disk-loss/README.md    |  14 -
 .../experiment/gcp-vm-disk-loss.go            | 189 ------
 experiments/gcp/gcp-vm-disk-loss/rbac.yaml    |  46 --
 .../gcp/gcp-vm-disk-loss/test/test.yml        |  65 --
 .../gcp-vm-instance-stop-by-label.go          | 183 ------
 .../test/test.yml                             |  66 --
 .../gcp/gcp-vm-instance-stop/README.md        |  14 -
 .../experiment/gcp-vm-instance-stop.go        | 184 ------
 .../gcp/gcp-vm-instance-stop/rbac.yaml        |  49 --
 .../gcp/gcp-vm-instance-stop/test/test.yml    |  51 --
 experiments/generic/container-kill/README.md  |  15 -
 .../experiment/container-kill.go              | 173 ------
 experiments/generic/container-kill/rbac.yaml  |  37 --
 .../generic/container-kill/test/test.yml      |  77 ---
 experiments/generic/disk-fill/README.md       |  14 -
 .../generic/disk-fill/experiment/disk-fill.go | 170 ------
 experiments/generic/disk-fill/rbac.yaml       |  34 --
 experiments/generic/disk-fill/test/test.yml   |  60 --
 .../generic/docker-service-kill/README.md     |  14 -
 .../experiment/docker-service-kill.go         | 205 -------
 .../generic/docker-service-kill/rbac.yaml     |  60 --
 .../generic/docker-service-kill/test/test.yml |  69 ---
 .../generic/kubelet-service-kill/README.md    |  15 -
 .../experiment/kubelet-service-kill.go        | 206 -------
 .../generic/kubelet-service-kill/rbac.yaml    |  37 --
 .../kubelet-service-kill/test/test.yml        |  51 --
 experiments/generic/node-cpu-hog/README.md    |  14 -
 .../node-cpu-hog/experiment/node-cpu-hog.go   | 207 -------
 experiments/generic/node-cpu-hog/rbac.yaml    |  37 --
 .../generic/node-cpu-hog/test/test.yml        |  57 --
 experiments/generic/node-drain/README.md      |  14 -
 .../node-drain/experiment/node-drain.go       | 206 -------
 experiments/generic/node-drain/rbac.yaml      |  37 --
 experiments/generic/node-drain/test/test.yml  |  51 --
 experiments/generic/node-io-stress/README.md  |  14 -
 .../experiment/node-io-stress.go              | 209 -------
 experiments/generic/node-io-stress/rbac.yaml  |  37 --
 .../generic/node-io-stress/test/test.yml      |  60 --
 experiments/generic/node-memory-hog/README.md |  14 -
 .../experiment/node-memory-hog.go             | 208 -------
 experiments/generic/node-memory-hog/rbac.yaml |  37 --
 .../generic/node-memory-hog/test/test.yml     |  58 --
 experiments/generic/node-restart/README.md    |  14 -
 .../node-restart/experiment/node-restart.go   | 202 ------
 experiments/generic/node-restart/rbac.yaml    |  37 --
 .../generic/node-restart/test/test.yml        |  72 ---
 experiments/generic/node-taint/README.md      |  14 -
 .../node-taint/experiment/node-taint.go       | 207 -------
 experiments/generic/node-taint/rbac.yaml      |  37 --
 experiments/generic/node-taint/test/test.yml  |  54 --
 experiments/generic/pod-autoscaler/README.md  |  14 -
 .../experiment/pod-autoscaler.go              | 175 ------
 experiments/generic/pod-autoscaler/rbac.yaml  |  37 --
 .../generic/pod-autoscaler/test/test.yml      |  52 --
 .../generic/pod-cpu-hog-exec/README.md        |  14 -
 .../experiment/pod-cpu-hog-exec.go            | 173 ------
 .../generic/pod-cpu-hog-exec/rbac.yaml        |  36 --
 .../generic/pod-cpu-hog-exec/test/test.yml    |  64 --
 experiments/generic/pod-cpu-hog/README.md     |  14 -
 .../pod-cpu-hog/experiment/pod-cpu-hog.go     | 173 ------
 experiments/generic/pod-cpu-hog/rbac.yaml     |  36 --
 experiments/generic/pod-cpu-hog/test/test.yml |  64 --
 experiments/generic/pod-delete/README.md      |  15 -
 .../pod-delete/experiment/pod-delete.go       | 181 ------
 experiments/generic/pod-delete/rbac.yaml      |  37 --
 experiments/generic/pod-delete/test/test.yml  |  54 --
 experiments/generic/pod-dns-error/README.md   |  14 -
 .../pod-dns-error/experiment/pod-dns-error.go | 173 ------
 experiments/generic/pod-dns-error/rbac.yaml   |  54 --
 .../generic/pod-dns-error/test/test.yml       |  86 ---
 experiments/generic/pod-dns-spoof/README.md   |  14 -
 .../pod-dns-spoof/experiment/pod-dns-spoof.go | 175 ------
 experiments/generic/pod-dns-spoof/rbac.yaml   |  54 --
 .../generic/pod-dns-spoof/test/test.yml       |  82 ---
 experiments/generic/pod-fio-stress/README.md  |  16 -
 .../experiment/pod-fio-stress.go              | 171 ------
 experiments/generic/pod-fio-stress/rbac.yaml  |  36 --
 .../generic/pod-fio-stress/test/test.yml      |  77 ---
 .../generic/pod-http-latency/README.md        |  14 -
 .../experiment/pod-http-latency.go            | 173 ------
 .../generic/pod-http-latency/rbac.yaml        |  80 ---
 .../generic/pod-http-latency/test/test.yaml   | 103 ----
 .../generic/pod-http-modify-body/README.md    |  14 -
 .../experiment/pod-http-modify-body.go        | 172 ------
 .../generic/pod-http-modify-body/rbac.yaml    |  60 --
 .../pod-http-modify-body/test/test.yml        |  91 ---
 .../generic/pod-http-modify-header/README.md  |  14 -
 .../experiment/pod-http-modify-header.go      | 173 ------
 .../generic/pod-http-modify-header/rbac.yaml  |  80 ---
 .../pod-http-modify-header/test/test.yaml     | 100 ---
 .../generic/pod-http-reset-peer/README.md     |  14 -
 .../experiment/pod-http-reset-peer.go         | 172 ------
 .../generic/pod-http-reset-peer/rbac.yaml     |  80 ---
 .../generic/pod-http-reset-peer/test/test.yml |  99 ---
 .../generic/pod-http-status-code/README.md    |  14 -
 .../experiment/pod-http-status-code.go        | 180 ------
 .../generic/pod-http-status-code/rbac.yaml    |  60 --
 .../pod-http-status-code/test/test.yml        | 100 ---
 experiments/generic/pod-io-stress/README.md   |  14 -
 .../pod-io-stress/experiment/pod-io-stress.go | 173 ------
 experiments/generic/pod-io-stress/rbac.yaml   |  36 --
 .../generic/pod-io-stress/test/test.yml       |  61 --
 .../generic/pod-memory-hog-exec/README.md     |  14 -
 .../experiment/pod-memory-hog-exec.go         | 173 ------
 .../generic/pod-memory-hog-exec/rbac.yaml     |  36 --
 .../generic/pod-memory-hog-exec/test/test.yml |  64 --
 experiments/generic/pod-memory-hog/README.md  |  14 -
 .../experiment/pod-memory-hog.go              | 173 ------
 experiments/generic/pod-memory-hog/rbac.yaml  |  36 --
 .../generic/pod-memory-hog/test/test.yml      |  64 --
 .../generic/pod-network-corruption/README.md  |  15 -
 .../experiment/pod-network-corruption.go      | 174 ------
 .../generic/pod-network-corruption/rbac.yaml  |  36 --
 .../pod-network-corruption/test/test.yml      |  83 ---
 .../generic/pod-network-duplication/README.md |  15 -
 .../experiment/pod-network-duplication.go     | 174 ------
 .../generic/pod-network-duplication/rbac.yaml |  35 --
 .../pod-network-duplication/test/test.yml     |  83 ---
 .../generic/pod-network-latency/README.md     |  15 -
 .../experiment/pod-network-latency.go         | 174 ------
 .../generic/pod-network-latency/rbac.yaml     |  36 --
 .../generic/pod-network-latency/test/test.yml |  87 ---
 .../generic/pod-network-loss/README.md        |  15 -
 .../experiment/pod-network-loss.go            | 173 ------
 .../generic/pod-network-loss/rbac.yaml        |  35 --
 .../generic/pod-network-loss/test/test.yml    |  83 ---
 .../generic/pod-network-partition/README.md   |  14 -
 .../experiment/pod-network-partition.go       | 171 ------
 .../generic/pod-network-partition/rbac.yaml   |  48 --
 .../pod-network-partition/test/test.yml       |  70 ---
 .../kafka/kafka-broker-pod-failure/README.md  |  14 -
 .../experiment/kafka-broker-pod-failure.go    | 210 -------
 .../kafka/kafka-broker-pod-failure/rbac.yaml  |  36 --
 .../kafka-broker-pod-failure/test/test.yml    | 101 ---
 experiments/kube-aws/ebs-loss-by-id/README.md |  14 -
 .../experiment/ebs-loss-by-id.go              | 183 ------
 experiments/kube-aws/ebs-loss-by-id/rbac.yaml |  46 --
 .../kube-aws/ebs-loss-by-id/test/test.yml     |  60 --
 .../kube-aws/ebs-loss-by-tag/README.md        |  14 -
 .../experiment/ebs-loss-by-tag.go             | 181 ------
 .../kube-aws/ebs-loss-by-tag/rbac.yaml        |  46 --
 .../kube-aws/ebs-loss-by-tag/test/test.yml    |  57 --
 .../kube-aws/ec2-terminate-by-id/README.md    |  14 -
 .../experiment/ec2-terminate-by-id.go         | 213 -------
 .../kube-aws/ec2-terminate-by-id/rbac.yaml    |  49 --
 .../ec2-terminate-by-id/test/test.yml         |  43 --
 .../kube-aws/ec2-terminate-by-tag/README.md   |  14 -
 .../experiment/ec2-terminate-tag.go           | 208 -------
 .../kube-aws/ec2-terminate-by-tag/rbac.yaml   |  49 --
 .../ec2-terminate-by-tag/test/test.yml        |  44 --
 .../node-restart/experiment/node-restart.go   |  14 +-
 .../spring-boot/spring-boot-faults/README.md  |  40 --
 .../experiment/spring-boot-faults.go          | 196 ------
 experiments/vmware/vm-poweroff/README.md      |  14 -
 .../vm-poweroff/experiment/vm-poweroff.go     | 203 ------
 experiments/vmware/vm-poweroff/test/test.yml  |  87 ---
 .../aws-ssm-chaos/environment/environment.go  |  42 --
 pkg/aws-ssm/aws-ssm-chaos/types/types.go      |  36 --
 .../disk-loss/environment/environment.go      |  30 -
 pkg/azure/disk-loss/types/types.go            |  25 -
 .../instance-stop/environment/environment.go  |  30 -
 pkg/azure/instance-stop/types/types.go        |  25 -
 .../environment/environment.go                |  29 -
 .../redfish-node-restart/types/types.go       |  24 -
 pkg/baremetal/redfish/redfish.go              |  87 ---
 pkg/cassandra/liveness.go                     | 376 ------------
 pkg/cassandra/node-tools.go                   | 107 ----
 .../pod-delete/environment/environment.go     |  52 --
 pkg/cassandra/pod-delete/types/types.go       |  18 -
 .../environment/environment.go                |  32 -
 pkg/gcp/gcp-vm-disk-loss/types/types.go       |  30 -
 .../environment/environment.go                |  33 -
 pkg/gcp/gcp-vm-instance-stop/types/types.go   |  29 -
 pkg/kafka/environment/environment.go          |  63 --
 pkg/kafka/kafka-cluster-health.go             |  32 -
 pkg/kafka/kafka-liveness-cleanup.go           |  34 --
 pkg/kafka/kafka-liveness-stream.go            | 192 ------
 pkg/kafka/types/types.go                      |  27 -
 .../environment/environment.go                | 182 ------
 .../spring-boot-chaos/types/types.go          | 116 ----
 .../vm-poweroff/environment/environment.go    |  32 -
 pkg/vmware/vm-poweroff/types/types.go         |  29 -
 263 files changed, 10 insertions(+), 29110 deletions(-)
 delete mode 100644 chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go
 delete mode 100644 chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go
 delete mode 100644 chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go
 delete mode 100644 chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go
 delete mode 100644 chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go
 delete mode 100644 chaoslib/litmus/container-kill/helper/container-kill.go
 delete mode 100644 chaoslib/litmus/container-kill/lib/container-kill.go
 delete mode 100644 chaoslib/litmus/disk-fill/helper/disk-fill.go
 delete mode 100644 chaoslib/litmus/disk-fill/lib/disk-fill.go
 delete mode 100644 chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go
 delete mode 100644 chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go
 delete mode 100644 chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go
 delete mode 100644 chaoslib/litmus/ebs-loss/lib/ebs-loss.go
 delete mode 100644 chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go
 delete mode 100644 chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go
 delete mode 100644 chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go
 delete mode 100644 chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go
 delete mode 100644 chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go
 delete mode 100644 chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go
 delete mode 100644 chaoslib/litmus/http-chaos/helper/http-helper.go
 delete mode 100644 chaoslib/litmus/http-chaos/lib/header/header.go
 delete mode 100644 chaoslib/litmus/http-chaos/lib/http-chaos.go
 delete mode 100644 chaoslib/litmus/http-chaos/lib/latency/latency.go
 delete mode 100644 chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go
 delete mode 100644 chaoslib/litmus/http-chaos/lib/reset/reset.go
 delete mode 100644 chaoslib/litmus/http-chaos/lib/statuscode/status-code.go
 delete mode 100644 chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go
 delete mode 100644 chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go
 delete mode 100644 chaoslib/litmus/network-chaos/helper/netem.go
 delete mode 100644 chaoslib/litmus/network-chaos/lib/corruption/corruption.go
 delete mode 100644 chaoslib/litmus/network-chaos/lib/duplication/duplication.go
 delete mode 100644 chaoslib/litmus/network-chaos/lib/latency/latency.go
 delete mode 100644 chaoslib/litmus/network-chaos/lib/loss/loss.go
 delete mode 100644 chaoslib/litmus/network-chaos/lib/network-chaos.go
 delete mode 100644 chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go
 delete mode 100644 chaoslib/litmus/node-drain/lib/node-drain.go
 delete mode 100644 chaoslib/litmus/node-io-stress/lib/node-io-stress.go
 delete mode 100644 chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go
 delete mode 100644 chaoslib/litmus/node-restart/lib/node-restart.go
 delete mode 100644 chaoslib/litmus/node-taint/lib/node-taint.go
 delete mode 100644 chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go
 delete mode 100644 chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go
 delete mode 100644 chaoslib/litmus/pod-delete/lib/pod-delete.go
 delete mode 100644 chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go
 delete mode 100644 chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go
 delete mode 100644 chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go
 delete mode 100644 chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go
 delete mode 100644 chaoslib/litmus/pod-network-partition/lib/network-policy.go
 delete mode 100644 chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go
 delete mode 100644 chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go
 delete mode 100644 chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go
 delete mode 100644 chaoslib/litmus/stress-chaos/helper/stress-helper.go
 delete mode 100644 chaoslib/litmus/stress-chaos/lib/stress-chaos.go
 delete mode 100644 chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-id/README.md
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-id/rbac.yaml
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-id/test/test.yml
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-tag/README.md
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-tag/rbac.yaml
 delete mode 100644 experiments/aws-ssm/aws-ssm-chaos-by-tag/test/test.yml
 delete mode 100644 experiments/azure/azure-disk-loss/README.md
 delete mode 100644 experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go
 delete mode 100644 experiments/azure/azure-disk-loss/test/test.yml
 delete mode 100644 experiments/azure/instance-stop/README.md
 delete mode 100644 experiments/azure/instance-stop/experiment/azure-instance-stop.go
 delete mode 100644 experiments/azure/instance-stop/rbac.yaml
 delete mode 100644 experiments/azure/instance-stop/test/test.yml
 delete mode 100644 experiments/baremetal/redfish-node-restart/README.md
 delete mode 100644 experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go
 delete mode 100644 experiments/baremetal/redfish-node-restart/rbac.yaml
 delete mode 100644 experiments/baremetal/redfish-node-restart/test/test.yml
 delete mode 100644 experiments/cassandra/pod-delete/README.md
 delete mode 100644 experiments/cassandra/pod-delete/experiment/pod-delete.go
 delete mode 100644 experiments/cassandra/pod-delete/rbac.yaml
 delete mode 100644 experiments/cassandra/pod-delete/test/test.yml
 delete mode 100644 experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go
 delete mode 100644 experiments/gcp/gcp-vm-disk-loss-by-label/test/test.yml
 delete mode 100644 experiments/gcp/gcp-vm-disk-loss/README.md
 delete mode 100644 experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go
 delete mode 100644 experiments/gcp/gcp-vm-disk-loss/rbac.yaml
 delete mode 100644 experiments/gcp/gcp-vm-disk-loss/test/test.yml
 delete mode 100644 experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go
 delete mode 100644 experiments/gcp/gcp-vm-instance-stop-by-label/test/test.yml
 delete mode 100644 experiments/gcp/gcp-vm-instance-stop/README.md
 delete mode 100644 experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go
 delete mode 100644 experiments/gcp/gcp-vm-instance-stop/rbac.yaml
 delete mode 100644 experiments/gcp/gcp-vm-instance-stop/test/test.yml
 delete mode 100644 experiments/generic/container-kill/README.md
 delete mode 100644 experiments/generic/container-kill/experiment/container-kill.go
 delete mode 100644 experiments/generic/container-kill/rbac.yaml
 delete mode 100644 experiments/generic/container-kill/test/test.yml
 delete mode 100644 experiments/generic/disk-fill/README.md
 delete mode 100644 experiments/generic/disk-fill/experiment/disk-fill.go
 delete mode 100644 experiments/generic/disk-fill/rbac.yaml
 delete mode 100644 experiments/generic/disk-fill/test/test.yml
 delete mode 100644 experiments/generic/docker-service-kill/README.md
 delete mode 100644 experiments/generic/docker-service-kill/experiment/docker-service-kill.go
 delete mode 100644 experiments/generic/docker-service-kill/rbac.yaml
 delete mode 100644 experiments/generic/docker-service-kill/test/test.yml
 delete mode 100644 experiments/generic/kubelet-service-kill/README.md
 delete mode 100644 experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go
 delete mode 100644 experiments/generic/kubelet-service-kill/rbac.yaml
 delete mode 100644 experiments/generic/kubelet-service-kill/test/test.yml
 delete mode 100644 experiments/generic/node-cpu-hog/README.md
 delete mode 100644 experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go
 delete mode 100644 experiments/generic/node-cpu-hog/rbac.yaml
 delete mode 100644 experiments/generic/node-cpu-hog/test/test.yml
 delete mode 100644 experiments/generic/node-drain/README.md
 delete mode 100644 experiments/generic/node-drain/experiment/node-drain.go
 delete mode 100644 experiments/generic/node-drain/rbac.yaml
 delete mode 100644 experiments/generic/node-drain/test/test.yml
 delete mode 100644 experiments/generic/node-io-stress/README.md
 delete mode 100644 experiments/generic/node-io-stress/experiment/node-io-stress.go
 delete mode 100644 experiments/generic/node-io-stress/rbac.yaml
 delete mode 100644 experiments/generic/node-io-stress/test/test.yml
 delete mode 100644 experiments/generic/node-memory-hog/README.md
 delete mode 100644 experiments/generic/node-memory-hog/experiment/node-memory-hog.go
 delete mode 100644 experiments/generic/node-memory-hog/rbac.yaml
 delete mode 100644 experiments/generic/node-memory-hog/test/test.yml
 delete mode 100644 experiments/generic/node-restart/README.md
 delete mode 100644 experiments/generic/node-restart/experiment/node-restart.go
 delete mode 100644 experiments/generic/node-restart/rbac.yaml
 delete mode 100644 experiments/generic/node-restart/test/test.yml
 delete mode 100644 experiments/generic/node-taint/README.md
 delete mode 100644 experiments/generic/node-taint/experiment/node-taint.go
 delete mode 100644 experiments/generic/node-taint/rbac.yaml
 delete mode 100644 experiments/generic/node-taint/test/test.yml
 delete mode 100644 experiments/generic/pod-autoscaler/README.md
 delete mode 100644 experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go
 delete mode 100644 experiments/generic/pod-autoscaler/rbac.yaml
 delete mode 100644 experiments/generic/pod-autoscaler/test/test.yml
 delete mode 100644 experiments/generic/pod-cpu-hog-exec/README.md
 delete mode 100644 experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go
 delete mode 100644 experiments/generic/pod-cpu-hog-exec/rbac.yaml
 delete mode 100644 experiments/generic/pod-cpu-hog-exec/test/test.yml
 delete mode 100644 experiments/generic/pod-cpu-hog/README.md
 delete mode 100644 experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go
 delete mode 100644 experiments/generic/pod-cpu-hog/rbac.yaml
 delete mode 100644 experiments/generic/pod-cpu-hog/test/test.yml
 delete mode 100644 experiments/generic/pod-delete/README.md
 delete mode 100644 experiments/generic/pod-delete/experiment/pod-delete.go
 delete mode 100644 experiments/generic/pod-delete/rbac.yaml
 delete mode 100644 experiments/generic/pod-delete/test/test.yml
 delete mode 100644 experiments/generic/pod-dns-error/README.md
 delete mode 100644 experiments/generic/pod-dns-error/experiment/pod-dns-error.go
 delete mode 100644 experiments/generic/pod-dns-error/rbac.yaml
 delete mode 100644 experiments/generic/pod-dns-error/test/test.yml
 delete mode 100644 experiments/generic/pod-dns-spoof/README.md
 delete mode 100644 experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go
 delete mode 100644 experiments/generic/pod-dns-spoof/rbac.yaml
 delete mode 100644 experiments/generic/pod-dns-spoof/test/test.yml
 delete mode 100644 experiments/generic/pod-fio-stress/README.md
 delete mode 100644 experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go
 delete mode 100644 experiments/generic/pod-fio-stress/rbac.yaml
 delete mode 100644 experiments/generic/pod-fio-stress/test/test.yml
 delete mode 100644 experiments/generic/pod-http-latency/README.md
 delete mode 100644 experiments/generic/pod-http-latency/experiment/pod-http-latency.go
 delete mode 100644 experiments/generic/pod-http-latency/rbac.yaml
 delete mode 100644 experiments/generic/pod-http-latency/test/test.yaml
 delete mode 100644 experiments/generic/pod-http-modify-body/README.md
 delete mode 100644 experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go
 delete mode 100644 experiments/generic/pod-http-modify-body/rbac.yaml
 delete mode 100644 experiments/generic/pod-http-modify-body/test/test.yml
 delete mode 100644 experiments/generic/pod-http-modify-header/README.md
 delete mode 100644 experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go
 delete mode 100644 experiments/generic/pod-http-modify-header/rbac.yaml
 delete mode 100644 experiments/generic/pod-http-modify-header/test/test.yaml
 delete mode 100644 experiments/generic/pod-http-reset-peer/README.md
 delete mode 100644 experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go
 delete mode 100644 experiments/generic/pod-http-reset-peer/rbac.yaml
 delete mode 100644 experiments/generic/pod-http-reset-peer/test/test.yml
 delete mode 100644 experiments/generic/pod-http-status-code/README.md
 delete mode 100644 experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go
 delete mode 100644 experiments/generic/pod-http-status-code/rbac.yaml
 delete mode 100644 experiments/generic/pod-http-status-code/test/test.yml
 delete mode 100644 experiments/generic/pod-io-stress/README.md
 delete mode 100644 experiments/generic/pod-io-stress/experiment/pod-io-stress.go
 delete mode 100644 experiments/generic/pod-io-stress/rbac.yaml
 delete mode 100644 experiments/generic/pod-io-stress/test/test.yml
 delete mode 100644 experiments/generic/pod-memory-hog-exec/README.md
 delete mode 100644 experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go
 delete mode 100644 experiments/generic/pod-memory-hog-exec/rbac.yaml
 delete mode 100644 experiments/generic/pod-memory-hog-exec/test/test.yml
 delete mode 100644 experiments/generic/pod-memory-hog/README.md
 delete mode 100644 experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go
 delete mode 100644 experiments/generic/pod-memory-hog/rbac.yaml
 delete mode 100644 experiments/generic/pod-memory-hog/test/test.yml
 delete mode 100644 experiments/generic/pod-network-corruption/README.md
 delete mode 100644 experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go
 delete mode 100644 experiments/generic/pod-network-corruption/rbac.yaml
 delete mode 100644 experiments/generic/pod-network-corruption/test/test.yml
 delete mode 100644 experiments/generic/pod-network-duplication/README.md
 delete mode 100644 experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go
 delete mode 100644 experiments/generic/pod-network-duplication/rbac.yaml
 delete mode 100644 experiments/generic/pod-network-duplication/test/test.yml
 delete mode 100644 experiments/generic/pod-network-latency/README.md
 delete mode 100644 experiments/generic/pod-network-latency/experiment/pod-network-latency.go
 delete mode 100644 experiments/generic/pod-network-latency/rbac.yaml
 delete mode 100644 experiments/generic/pod-network-latency/test/test.yml
 delete mode 100644 experiments/generic/pod-network-loss/README.md
 delete mode 100644 experiments/generic/pod-network-loss/experiment/pod-network-loss.go
 delete mode 100644 experiments/generic/pod-network-loss/rbac.yaml
 delete mode 100644 experiments/generic/pod-network-loss/test/test.yml
 delete mode 100644 experiments/generic/pod-network-partition/README.md
 delete mode 100644 experiments/generic/pod-network-partition/experiment/pod-network-partition.go
 delete mode 100644 experiments/generic/pod-network-partition/rbac.yaml
 delete mode 100644 experiments/generic/pod-network-partition/test/test.yml
 delete mode 100644 experiments/kafka/kafka-broker-pod-failure/README.md
 delete mode 100644 experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go
 delete mode 100644 experiments/kafka/kafka-broker-pod-failure/rbac.yaml
 delete mode 100644 experiments/kafka/kafka-broker-pod-failure/test/test.yml
 delete mode 100644 experiments/kube-aws/ebs-loss-by-id/README.md
 delete mode 100644 experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go
 delete mode 100644 experiments/kube-aws/ebs-loss-by-id/rbac.yaml
 delete mode 100644 experiments/kube-aws/ebs-loss-by-id/test/test.yml
 delete mode 100644 experiments/kube-aws/ebs-loss-by-tag/README.md
 delete mode 100644 experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go
 delete mode 100644 experiments/kube-aws/ebs-loss-by-tag/rbac.yaml
 delete mode 100644 experiments/kube-aws/ebs-loss-by-tag/test/test.yml
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-id/README.md
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-id/rbac.yaml
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-id/test/test.yml
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-tag/README.md
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-tag/rbac.yaml
 delete mode 100644 experiments/kube-aws/ec2-terminate-by-tag/test/test.yml
 delete mode 100644 experiments/spring-boot/spring-boot-faults/README.md
 delete mode 100644 experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go
 delete mode 100644 experiments/vmware/vm-poweroff/README.md
 delete mode 100644 experiments/vmware/vm-poweroff/experiment/vm-poweroff.go
 delete mode 100644 experiments/vmware/vm-poweroff/test/test.yml
 delete mode 100644 pkg/aws-ssm/aws-ssm-chaos/environment/environment.go
 delete mode 100644 pkg/aws-ssm/aws-ssm-chaos/types/types.go
 delete mode 100644 pkg/azure/disk-loss/environment/environment.go
 delete mode 100644 pkg/azure/disk-loss/types/types.go
 delete mode 100644 pkg/azure/instance-stop/environment/environment.go
 delete mode 100644 pkg/azure/instance-stop/types/types.go
 delete mode 100644 pkg/baremetal/redfish-node-restart/environment/environment.go
 delete mode 100644 pkg/baremetal/redfish-node-restart/types/types.go
 delete mode 100644 pkg/baremetal/redfish/redfish.go
 delete mode 100644 pkg/cassandra/liveness.go
 delete mode 100644 pkg/cassandra/node-tools.go
 delete mode 100644 pkg/cassandra/pod-delete/environment/environment.go
 delete mode 100644 pkg/cassandra/pod-delete/types/types.go
 delete mode 100644 pkg/gcp/gcp-vm-disk-loss/environment/environment.go
 delete mode 100644 pkg/gcp/gcp-vm-disk-loss/types/types.go
 delete mode 100644 pkg/gcp/gcp-vm-instance-stop/environment/environment.go
 delete mode 100644 pkg/gcp/gcp-vm-instance-stop/types/types.go
 delete mode 100644 pkg/kafka/environment/environment.go
 delete mode 100644 pkg/kafka/kafka-cluster-health.go
 delete mode 100644 pkg/kafka/kafka-liveness-cleanup.go
 delete mode 100644 pkg/kafka/kafka-liveness-stream.go
 delete mode 100644 pkg/kafka/types/types.go
 delete mode 100644 pkg/spring-boot/spring-boot-chaos/environment/environment.go
 delete mode 100644 pkg/spring-boot/spring-boot-chaos/types/types.go
 delete mode 100644 pkg/vmware/vm-poweroff/environment/environment.go
 delete mode 100644 pkg/vmware/vm-poweroff/types/types.go

diff --git a/bin/experiment/experiment.go b/bin/experiment/experiment.go
index 95de18b..fb881ba 100755
--- a/bin/experiment/experiment.go
+++ b/bin/experiment/experiment.go
@@ -11,55 +11,9 @@ import (
 	// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
 	// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
 
-	awsSSMChaosByID "github.com/litmuschaos/litmus-go/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment"
-	awsSSMChaosByTag "github.com/litmuschaos/litmus-go/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment"
-	azureDiskLoss "github.com/litmuschaos/litmus-go/experiments/azure/azure-disk-loss/experiment"
-	azureInstanceStop "github.com/litmuschaos/litmus-go/experiments/azure/instance-stop/experiment"
-	redfishNodeRestart "github.com/litmuschaos/litmus-go/experiments/baremetal/redfish-node-restart/experiment"
-	cassandraPodDelete "github.com/litmuschaos/litmus-go/experiments/cassandra/pod-delete/experiment"
-	gcpVMDiskLossByLabel "github.com/litmuschaos/litmus-go/experiments/gcp/gcp-vm-disk-loss-by-label/experiment"
-	gcpVMDiskLoss "github.com/litmuschaos/litmus-go/experiments/gcp/gcp-vm-disk-loss/experiment"
-	gcpVMInstanceStopByLabel "github.com/litmuschaos/litmus-go/experiments/gcp/gcp-vm-instance-stop-by-label/experiment"
-	gcpVMInstanceStop "github.com/litmuschaos/litmus-go/experiments/gcp/gcp-vm-instance-stop/experiment"
-	containerKill "github.com/litmuschaos/litmus-go/experiments/generic/container-kill/experiment"
-	diskFill "github.com/litmuschaos/litmus-go/experiments/generic/disk-fill/experiment"
-	dockerServiceKill "github.com/litmuschaos/litmus-go/experiments/generic/docker-service-kill/experiment"
-	kubeletServiceKill "github.com/litmuschaos/litmus-go/experiments/generic/kubelet-service-kill/experiment"
-	nodeCPUHog "github.com/litmuschaos/litmus-go/experiments/generic/node-cpu-hog/experiment"
-	nodeDrain "github.com/litmuschaos/litmus-go/experiments/generic/node-drain/experiment"
-	nodeIOStress "github.com/litmuschaos/litmus-go/experiments/generic/node-io-stress/experiment"
-	nodeMemoryHog "github.com/litmuschaos/litmus-go/experiments/generic/node-memory-hog/experiment"
 
 	// nodeRestart "github.com/litmuschaos/litmus-go/experiments/generic/node-restart/experiment"
-	nodeTaint "github.com/litmuschaos/litmus-go/experiments/generic/node-taint/experiment"
-	podAutoscaler "github.com/litmuschaos/litmus-go/experiments/generic/pod-autoscaler/experiment"
-	podCPUHogExec "github.com/litmuschaos/litmus-go/experiments/generic/pod-cpu-hog-exec/experiment"
-	podCPUHog "github.com/litmuschaos/litmus-go/experiments/generic/pod-cpu-hog/experiment"
-	podDelete "github.com/litmuschaos/litmus-go/experiments/generic/pod-delete/experiment"
-	podDNSError "github.com/litmuschaos/litmus-go/experiments/generic/pod-dns-error/experiment"
-	podDNSSpoof "github.com/litmuschaos/litmus-go/experiments/generic/pod-dns-spoof/experiment"
-	podFioStress "github.com/litmuschaos/litmus-go/experiments/generic/pod-fio-stress/experiment"
-	podHttpLatency "github.com/litmuschaos/litmus-go/experiments/generic/pod-http-latency/experiment"
-	podHttpModifyBody "github.com/litmuschaos/litmus-go/experiments/generic/pod-http-modify-body/experiment"
-	podHttpModifyHeader "github.com/litmuschaos/litmus-go/experiments/generic/pod-http-modify-header/experiment"
-	podHttpResetPeer "github.com/litmuschaos/litmus-go/experiments/generic/pod-http-reset-peer/experiment"
-	podHttpStatusCode "github.com/litmuschaos/litmus-go/experiments/generic/pod-http-status-code/experiment"
-	podIOStress "github.com/litmuschaos/litmus-go/experiments/generic/pod-io-stress/experiment"
-	podMemoryHogExec "github.com/litmuschaos/litmus-go/experiments/generic/pod-memory-hog-exec/experiment"
-	podMemoryHog "github.com/litmuschaos/litmus-go/experiments/generic/pod-memory-hog/experiment"
-	podNetworkCorruption "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-corruption/experiment"
-	podNetworkDuplication "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-duplication/experiment"
-	podNetworkLatency "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-latency/experiment"
-	podNetworkLoss "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-loss/experiment"
-	podNetworkPartition "github.com/litmuschaos/litmus-go/experiments/generic/pod-network-partition/experiment"
-	kafkaBrokerPodFailure "github.com/litmuschaos/litmus-go/experiments/kafka/kafka-broker-pod-failure/experiment"
-	ebsLossByID "github.com/litmuschaos/litmus-go/experiments/kube-aws/ebs-loss-by-id/experiment"
-	ebsLossByTag "github.com/litmuschaos/litmus-go/experiments/kube-aws/ebs-loss-by-tag/experiment"
-	ec2TerminateByID "github.com/litmuschaos/litmus-go/experiments/kube-aws/ec2-terminate-by-id/experiment"
-	ec2TerminateByTag "github.com/litmuschaos/litmus-go/experiments/kube-aws/ec2-terminate-by-tag/experiment"
 	viraNodeRestart "github.com/litmuschaos/litmus-go/experiments/kubernetes/node-restart/experiment"
-	springBootFaults "github.com/litmuschaos/litmus-go/experiments/spring-boot/spring-boot-faults/experiment"
-	vmpoweroff "github.com/litmuschaos/litmus-go/experiments/vmware/vm-poweroff/experiment"
 
 	"github.com/litmuschaos/litmus-go/pkg/clients"
 	"github.com/litmuschaos/litmus-go/pkg/log"
@@ -92,102 +46,8 @@ func main() {
 
 	// invoke the corresponding experiment based on the (-name) flag
 	switch *experimentName {
-	case "container-kill":
-		containerKill.ContainerKill(clients)
-	case "disk-fill":
-		diskFill.DiskFill(clients)
-	case "kafka-broker-pod-failure":
-		kafkaBrokerPodFailure.KafkaBrokerPodFailure(clients)
-	case "kubelet-service-kill":
-		kubeletServiceKill.KubeletServiceKill(clients)
-	case "docker-service-kill":
-		dockerServiceKill.DockerServiceKill(clients)
-	case "node-cpu-hog":
-		nodeCPUHog.NodeCPUHog(clients)
-	case "node-drain":
-		nodeDrain.NodeDrain(clients)
-	case "node-io-stress":
-		nodeIOStress.NodeIOStress(clients)
-	case "node-memory-hog":
-		nodeMemoryHog.NodeMemoryHog(clients)
-	case "node-taint":
-		nodeTaint.NodeTaint(clients)
-	case "pod-autoscaler":
-		podAutoscaler.PodAutoscaler(clients)
-	case "pod-cpu-hog-exec":
-		podCPUHogExec.PodCPUHogExec(clients)
-	case "pod-delete":
-		podDelete.PodDelete(clients)
-	case "pod-io-stress":
-		podIOStress.PodIOStress(clients)
-	case "pod-memory-hog-exec":
-		podMemoryHogExec.PodMemoryHogExec(clients)
-	case "pod-network-corruption":
-		podNetworkCorruption.PodNetworkCorruption(clients)
-	case "pod-network-duplication":
-		podNetworkDuplication.PodNetworkDuplication(clients)
-	case "pod-network-latency":
-		podNetworkLatency.PodNetworkLatency(clients)
-	case "pod-network-loss":
-		podNetworkLoss.PodNetworkLoss(clients)
-	case "pod-network-partition":
-		podNetworkPartition.PodNetworkPartition(clients)
-	case "pod-memory-hog":
-		podMemoryHog.PodMemoryHog(clients)
-	case "pod-cpu-hog":
-		podCPUHog.PodCPUHog(clients)
-	case "cassandra-pod-delete":
-		cassandraPodDelete.CasssandraPodDelete(clients)
-	case "aws-ssm-chaos-by-id":
-		awsSSMChaosByID.AWSSSMChaosByID(clients)
-	case "aws-ssm-chaos-by-tag":
-		awsSSMChaosByTag.AWSSSMChaosByTag(clients)
-	case "ec2-terminate-by-id":
-		ec2TerminateByID.EC2TerminateByID(clients)
-	case "ec2-terminate-by-tag":
-		ec2TerminateByTag.EC2TerminateByTag(clients)
-	case "ebs-loss-by-id":
-		ebsLossByID.EBSLossByID(clients)
-	case "ebs-loss-by-tag":
-		ebsLossByTag.EBSLossByTag(clients)
-	// case "node-restart":
-	// 	nodeRestart.NodeRestart(clients)
 	case "vira-node-restart":
 		viraNodeRestart.NodeRestart(clients)
-	case "pod-dns-error":
-		podDNSError.PodDNSError(clients)
-	case "pod-dns-spoof":
-		podDNSSpoof.PodDNSSpoof(clients)
-	case "pod-http-latency":
-		podHttpLatency.PodHttpLatency(clients)
-	case "pod-http-status-code":
-		podHttpStatusCode.PodHttpStatusCode(clients)
-	case "pod-http-modify-header":
-		podHttpModifyHeader.PodHttpModifyHeader(clients)
-	case "pod-http-modify-body":
-		podHttpModifyBody.PodHttpModifyBody(clients)
-	case "pod-http-reset-peer":
-		podHttpResetPeer.PodHttpResetPeer(clients)
-	case "vm-poweroff":
-		vmpoweroff.VMPoweroff(clients)
-	case "azure-instance-stop":
-		azureInstanceStop.AzureInstanceStop(clients)
-	case "azure-disk-loss":
-		azureDiskLoss.AzureDiskLoss(clients)
-	case "gcp-vm-disk-loss":
-		gcpVMDiskLoss.VMDiskLoss(clients)
-	case "pod-fio-stress":
-		podFioStress.PodFioStress(clients)
-	case "gcp-vm-instance-stop":
-		gcpVMInstanceStop.VMInstanceStop(clients)
-	case "redfish-node-restart":
-		redfishNodeRestart.NodeRestart(clients)
-	case "gcp-vm-instance-stop-by-label":
-		gcpVMInstanceStopByLabel.GCPVMInstanceStopByLabel(clients)
-	case "gcp-vm-disk-loss-by-label":
-		gcpVMDiskLossByLabel.GCPVMDiskLossByLabel(clients)
-	case "spring-boot-cpu-stress", "spring-boot-memory-stress", "spring-boot-exceptions", "spring-boot-app-kill", "spring-boot-faults", "spring-boot-latency":
-		springBootFaults.Experiment(clients, *experimentName)
 	default:
 		log.Errorf("Unsupported -name %v, please provide the correct value of -name args", *experimentName)
 		return
diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go
deleted file mode 100644
index 9205e7f..0000000
--- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package lib
-
-import (
-	"os"
-	"strings"
-	"time"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-// InjectChaosInSerialMode will inject the aws ssm chaos in serial mode that is one after other
-func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, inject chan os.Signal) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instanceID list, %v", instanceIDList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//Running SSM command on the instance
-			for i, ec2ID := range instanceIDList {
-
-				//Sending AWS SSM command
-				log.Info("[Chaos]: Starting the ssm command")
-				ec2IDList := strings.Fields(ec2ID)
-				commandId, err := ssm.SendSSMCommand(experimentsDetails, ec2IDList)
-				if err != nil {
-					return stacktrace.Propagate(err, "failed to send ssm command")
-				}
-				//prepare commands for abort recovery
-				experimentsDetails.CommandIDs = append(experimentsDetails.CommandIDs, commandId)
-
-				//wait for the ssm command to get in running state
-				log.Info("[Wait]: Waiting for the ssm command to get in InProgress state")
-				if err := ssm.WaitForCommandStatus("InProgress", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil {
-					return stacktrace.Propagate(err, "failed to start ssm command")
-				}
-				common.SetTargets(ec2ID, "injected", "EC2", chaosDetails)
-
-				// run the probes during chaos
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return stacktrace.Propagate(err, "failed to run probes")
-					}
-				}
-
-				//wait for the ssm command to get succeeded in the given chaos duration
-				log.Info("[Wait]: Waiting for the ssm command to get completed")
-				if err := ssm.WaitForCommandStatus("Success", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil {
-					return stacktrace.Propagate(err, "failed to send ssm command")
-				}
-				common.SetTargets(ec2ID, "reverted", "EC2", chaosDetails)
-
-				//Wait for chaos interval
-				log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-				time.Sleep(time.Duration(experimentsDetails.ChaosInterval) * time.Second)
-
-			}
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-
-	}
-	return nil
-}
-
-// InjectChaosInParallelMode will inject the aws ssm chaos in parallel mode that is all at once
-func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, inject chan os.Signal) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instanceID list, %v", instanceIDList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//Sending AWS SSM command
-			log.Info("[Chaos]: Starting the ssm command")
-			commandId, err := ssm.SendSSMCommand(experimentsDetails, instanceIDList)
-			if err != nil {
-				return stacktrace.Propagate(err, "failed to send ssm command")
-			}
-			//prepare commands for abort recovery
-			experimentsDetails.CommandIDs = append(experimentsDetails.CommandIDs, commandId)
-
-			for _, ec2ID := range instanceIDList {
-				//wait for the ssm command to get in running state
-				log.Info("[Wait]: Waiting for the ssm command to get in InProgress state")
-				if err := ssm.WaitForCommandStatus("InProgress", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil {
-					return stacktrace.Propagate(err, "failed to start ssm command")
-				}
-			}
-
-			// run the probes during chaos
-			if len(resultDetails.ProbeDetails) != 0 {
-				if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return stacktrace.Propagate(err, "failed to run probes")
-				}
-			}
-
-			for _, ec2ID := range instanceIDList {
-				//wait for the ssm command to get succeeded in the given chaos duration
-				log.Info("[Wait]: Waiting for the ssm command to get completed")
-				if err := ssm.WaitForCommandStatus("Success", commandId, ec2ID, experimentsDetails.Region, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.Delay); err != nil {
-					return stacktrace.Propagate(err, "failed to send ssm command")
-				}
-			}
-
-			//Wait for chaos interval
-			log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-			time.Sleep(time.Duration(experimentsDetails.ChaosInterval) * time.Second)
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-
-	}
-	return nil
-}
-
-// AbortWatcher will be watching for the abort signal and revert the chaos
-func AbortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, abort chan os.Signal) {
-
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-	switch {
-	case len(experimentsDetails.CommandIDs) != 0:
-		for _, commandId := range experimentsDetails.CommandIDs {
-			if err := ssm.CancelCommand(commandId, experimentsDetails.Region); err != nil {
-				log.Errorf("[Abort]: Failed to cancel command, recovery failed: %v", err)
-			}
-		}
-	default:
-		log.Info("[Abort]: No SSM Command found to cancel")
-	}
-	if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil {
-		log.Errorf("Failed to delete ssm document: %v", err)
-	}
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go
deleted file mode 100644
index 0eb99d1..0000000
--- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package ssm
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-
-	"github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareAWSSSMChaosByID contains the prepration and injection steps for the experiment
-func PrepareAWSSSMChaosByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//create and upload the ssm document on the given aws service monitoring docs
-	if err = ssm.CreateAndUploadDocument(experimentsDetails.DocumentName, experimentsDetails.DocumentType, experimentsDetails.DocumentFormat, experimentsDetails.DocumentPath, experimentsDetails.Region); err != nil {
-		return stacktrace.Propagate(err, "could not create and upload the ssm document")
-	}
-	experimentsDetails.IsDocsUploaded = true
-	log.Info("[Info]: SSM docs uploaded successfully")
-
-	// watching for the abort signal and revert the chaos
-	go lib.AbortWatcher(experimentsDetails, abort)
-
-	//get the instance id or list of instance ids
-	instanceIDList := strings.Split(experimentsDetails.EC2InstanceID, ",")
-	if experimentsDetails.EC2InstanceID == "" || len(instanceIDList) == 0 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no instance id found for chaos injection"}
-	}
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = lib.InjectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = lib.InjectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Delete the ssm document on the given aws service monitoring docs
-	err = ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region)
-	if err != nil {
-		return stacktrace.Propagate(err, "failed to delete ssm doc")
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go
deleted file mode 100644
index 99884e6..0000000
--- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package ssm
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-
-	"github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-// PrepareAWSSSMChaosByTag contains the prepration and injection steps for the experiment
-func PrepareAWSSSMChaosByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//create and upload the ssm document on the given aws service monitoring docs
-	if err = ssm.CreateAndUploadDocument(experimentsDetails.DocumentName, experimentsDetails.DocumentType, experimentsDetails.DocumentFormat, experimentsDetails.DocumentPath, experimentsDetails.Region); err != nil {
-		return stacktrace.Propagate(err, "could not create and upload the ssm document")
-	}
-	experimentsDetails.IsDocsUploaded = true
-	log.Info("[Info]: SSM docs uploaded successfully")
-
-	// watching for the abort signal and revert the chaos
-	go lib.AbortWatcher(experimentsDetails, abort)
-	instanceIDList := common.FilterBasedOnPercentage(experimentsDetails.InstanceAffectedPerc, experimentsDetails.TargetInstanceIDList)
-	log.Infof("[Chaos]:Number of Instance targeted: %v", len(instanceIDList))
-
-	if len(instanceIDList) == 0 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no instance id found for chaos injection"}
-	}
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = lib.InjectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = lib.InjectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Delete the ssm document on the given aws service monitoring docs
-	err = ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region)
-	if err != nil {
-		return stacktrace.Propagate(err, "failed to delete ssm doc")
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go b/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go
deleted file mode 100644
index aa2c16e..0000000
--- a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go
+++ /dev/null
@@ -1,290 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	diskStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/disk"
-	instanceStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/retry"
-	"github.com/palantir/stacktrace"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareChaos contains the prepration and injection steps for the experiment
-func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//get the disk name  or list of disk names
-	diskNameList := strings.Split(experimentsDetails.VirtualDiskNames, ",")
-	if experimentsDetails.VirtualDiskNames == "" || len(diskNameList) == 0 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no volume names found to detach"}
-	}
-	instanceNamesWithDiskNames, err := diskStatus.GetInstanceNameForDisks(diskNameList, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup)
-
-	if err != nil {
-		return stacktrace.Propagate(err, "error fetching attached instances for disks")
-	}
-
-	// Get the instance name with attached disks
-	attachedDisksWithInstance := make(map[string]*[]compute.DataDisk)
-
-	for instanceName := range instanceNamesWithDiskNames {
-		attachedDisksWithInstance[instanceName], err = diskStatus.GetInstanceDiskList(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, experimentsDetails.ScaleSet, instanceName)
-		if err != nil {
-			return stacktrace.Propagate(err, "error fetching virtual disks")
-		}
-	}
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-
-		// watching for the abort signal and revert the chaos
-		go abortWatcher(experimentsDetails, attachedDisksWithInstance, instanceNamesWithDiskNames, chaosDetails)
-
-		switch strings.ToLower(experimentsDetails.Sequence) {
-		case "serial":
-			if err = injectChaosInSerialMode(experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in serial mode")
-			}
-		case "parallel":
-			if err = injectChaosInParallelMode(experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-			}
-		default:
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-		}
-
-		//Waiting for the ramp time after chaos injection
-		if experimentsDetails.RampTime != 0 {
-			log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-			common.WaitForDuration(experimentsDetails.RampTime)
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode will inject the Azure disk loss chaos in parallel mode that is all at once
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on Azure virtual disk"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		// Detaching the virtual disks
-		log.Info("[Chaos]: Detaching the virtual disks from the instances")
-		for instanceName, diskNameList := range instanceNamesWithDiskNames {
-			if err = diskStatus.DetachDisks(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskNameList); err != nil {
-				return stacktrace.Propagate(err, "failed to detach disks")
-			}
-		}
-		// Waiting for disk to be detached
-		for _, diskNameList := range instanceNamesWithDiskNames {
-			for _, diskName := range diskNameList {
-				log.Infof("[Wait]: Waiting for Disk '%v' to detach", diskName)
-				if err := diskStatus.WaitForDiskToDetach(experimentsDetails, diskName); err != nil {
-					return stacktrace.Propagate(err, "disk detachment check failed")
-				}
-			}
-		}
-
-		// Updating the result details
-		for _, diskNameList := range instanceNamesWithDiskNames {
-			for _, diskName := range diskNameList {
-				common.SetTargets(diskName, "detached", "VirtualDisk", chaosDetails)
-			}
-		}
-		// run the probes during chaos
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-				return stacktrace.Propagate(err, "failed to run probes")
-			}
-		}
-
-		//Wait for chaos duration
-		log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-		common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-		//Attaching the virtual disks to the instance
-		log.Info("[Chaos]: Attaching the Virtual disks back to the instances")
-		for instanceName, diskNameList := range attachedDisksWithInstance {
-			if err = diskStatus.AttachDisk(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskNameList); err != nil {
-				return stacktrace.Propagate(err, "virtual disk attachment failed")
-			}
-
-			// Wait for disk to be attached
-			for _, diskNameList := range instanceNamesWithDiskNames {
-				for _, diskName := range diskNameList {
-					log.Infof("[Wait]: Waiting for Disk '%v' to attach", diskName)
-					if err := diskStatus.WaitForDiskToAttach(experimentsDetails, diskName); err != nil {
-						return stacktrace.Propagate(err, "disk attachment check failed")
-					}
-				}
-			}
-
-			// Updating the result details
-			for _, diskNameList := range instanceNamesWithDiskNames {
-				for _, diskName := range diskNameList {
-					common.SetTargets(diskName, "re-attached", "VirtualDisk", chaosDetails)
-				}
-			}
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	return nil
-}
-
-// injectChaosInSerialMode will inject the Azure disk loss chaos in serial mode that is one after other
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on Azure virtual disks"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		for instanceName, diskNameList := range instanceNamesWithDiskNames {
-			for i, diskName := range diskNameList {
-				// Converting diskName to list type because DetachDisks() accepts a list type
-				diskNameToList := []string{diskName}
-
-				// Detaching the virtual disks
-				log.Infof("[Chaos]: Detaching %v from the instance", diskName)
-				if err = diskStatus.DetachDisks(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskNameToList); err != nil {
-					return stacktrace.Propagate(err, "failed to detach disks")
-				}
-
-				// Waiting for disk to be detached
-				log.Infof("[Wait]: Waiting for Disk '%v' to detach", diskName)
-				if err := diskStatus.WaitForDiskToDetach(experimentsDetails, diskName); err != nil {
-					return stacktrace.Propagate(err, "disk detachment check failed")
-				}
-
-				common.SetTargets(diskName, "detached", "VirtualDisk", chaosDetails)
-
-				// run the probes during chaos
-				// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return stacktrace.Propagate(err, "failed to run probes")
-					}
-				}
-
-				//Wait for chaos duration
-				log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-				common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-				//Attaching the virtual disks to the instance
-				log.Infof("[Chaos]: Attaching %v back to the instance", diskName)
-				if err = diskStatus.AttachDisk(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, attachedDisksWithInstance[instanceName]); err != nil {
-					return stacktrace.Propagate(err, "disk attachment failed")
-				}
-
-				// Waiting for disk to be attached
-				log.Infof("[Wait]: Waiting for Disk '%v' to attach", diskName)
-				if err := diskStatus.WaitForDiskToAttach(experimentsDetails, diskName); err != nil {
-					return stacktrace.Propagate(err, "disk attachment check failed")
-				}
-
-				common.SetTargets(diskName, "re-attached", "VirtualDisk", chaosDetails)
-			}
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	return nil
-}
-
-// abortWatcher will be watching for the abort signal and revert the chaos
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, attachedDisksWithInstance map[string]*[]compute.DataDisk, instanceNamesWithDiskNames map[string][]string, chaosDetails *types.ChaosDetails) {
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-
-	log.Info("[Abort]: Attaching disk(s) as abort signal received")
-
-	for instanceName, diskList := range attachedDisksWithInstance {
-		// Checking for provisioning state of the vm instances
-		err = retry.
-			Times(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).
-			Wait(time.Duration(experimentsDetails.Delay) * time.Second).
-			Try(func(attempt uint) error {
-				status, err := instanceStatus.GetAzureInstanceProvisionStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet)
-				if err != nil {
-					return stacktrace.Propagate(err, "failed to get instance")
-				}
-				if status != "Provisioning succeeded" {
-					return stacktrace.Propagate(err, "instance is updating, waiting for instance to finish update")
-				}
-				return nil
-			})
-		if err != nil {
-			log.Errorf("[Error]: Instance is still in 'updating' state after timeout, re-attach might fail")
-		}
-		log.Infof("[Abort]: Attaching disk(s) to instance: %v", instanceName)
-		for _, disk := range *diskList {
-			diskStatusString, err := diskStatus.GetDiskStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, *disk.Name)
-			if err != nil {
-				log.Errorf("Failed to get disk status: %v", err)
-			}
-			if diskStatusString != "Attached" {
-				if err := diskStatus.AttachDisk(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskList); err != nil {
-					log.Errorf("Failed to attach disk, manual revert required: %v", err)
-				} else {
-					common.SetTargets(*disk.Name, "re-attached", "VirtualDisk", chaosDetails)
-				}
-			}
-		}
-	}
-
-	log.Infof("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go b/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go
deleted file mode 100644
index 8b3950d..0000000
--- a/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go
+++ /dev/null
@@ -1,282 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/types"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	azureCommon "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common"
-	azureStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareAzureStop will initialize instanceNameList and start chaos injection based on sequence method selected
-func PrepareAzureStop(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	// Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//  get the instance name or list of instance names
-	instanceNameList := strings.Split(experimentsDetails.AzureInstanceNames, ",")
-	if experimentsDetails.AzureInstanceNames == "" || len(instanceNameList) == 0 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no instance name found to stop"}
-	}
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(experimentsDetails, instanceNameList)
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	// Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode will inject the Azure instance termination in serial mode that is one after the other
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		// ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instanceName list, %v", instanceNameList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on Azure instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			// PowerOff the instance serially
-			for i, vmName := range instanceNameList {
-
-				// Stopping the Azure instance
-				log.Infof("[Chaos]: Stopping the Azure instance: %v", vmName)
-				if experimentsDetails.ScaleSet == "enable" {
-					if err := azureStatus.AzureScaleSetInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to stop the Azure instance")
-					}
-				} else {
-					if err := azureStatus.AzureInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to stop the Azure instance")
-					}
-				}
-
-				// Wait for Azure instance to completely stop
-				log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the stopped state", vmName)
-				if err := azureStatus.WaitForAzureComputeDown(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-					return stacktrace.Propagate(err, "instance poweroff status check failed")
-				}
-
-				// Run the probes during chaos
-				// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return stacktrace.Propagate(err, "failed to run probes")
-					}
-				}
-
-				// Wait for Chaos interval
-				log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-				common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-				// Starting the Azure instance
-				log.Info("[Chaos]: Starting back the Azure instance")
-				if experimentsDetails.ScaleSet == "enable" {
-					if err := azureStatus.AzureScaleSetInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to start the Azure instance")
-					}
-				} else {
-					if err := azureStatus.AzureInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to start the Azure instance")
-					}
-				}
-
-				// Wait for Azure instance to get in running state
-				log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the running state", vmName)
-				if err := azureStatus.WaitForAzureComputeUp(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-					return stacktrace.Propagate(err, "instance power on status check failed")
-				}
-			}
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode will inject the Azure instance termination in parallel mode that is all at once
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	select {
-	case <-inject:
-		// Stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		// ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instanceName list, %v", instanceNameList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on Azure instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			// PowerOff the instances parallelly
-			for _, vmName := range instanceNameList {
-				// Stopping the Azure instance
-				log.Infof("[Chaos]: Stopping the Azure instance: %v", vmName)
-				if experimentsDetails.ScaleSet == "enable" {
-					if err := azureStatus.AzureScaleSetInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to stop Azure instance")
-					}
-				} else {
-					if err := azureStatus.AzureInstanceStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to stop Azure instance")
-					}
-				}
-			}
-
-			// Wait for all Azure instances to completely stop
-			for _, vmName := range instanceNameList {
-				log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the stopped state", vmName)
-				if err := azureStatus.WaitForAzureComputeDown(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-					return stacktrace.Propagate(err, "instance poweroff status check failed")
-				}
-			}
-
-			// Run probes during chaos
-			if len(resultDetails.ProbeDetails) != 0 {
-				if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return stacktrace.Propagate(err, "failed to run probes")
-				}
-			}
-
-			// Wait for Chaos interval
-			log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-			// Starting the Azure instance
-			for _, vmName := range instanceNameList {
-				log.Infof("[Chaos]: Starting back the Azure instance: %v", vmName)
-				if experimentsDetails.ScaleSet == "enable" {
-					if err := azureStatus.AzureScaleSetInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to start the Azure instance")
-					}
-				} else {
-					if err := azureStatus.AzureInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-						return stacktrace.Propagate(err, "unable to start the Azure instance")
-					}
-				}
-			}
-
-			// Wait for Azure instance to get in running state
-			for _, vmName := range instanceNameList {
-				log.Infof("[Wait]: Waiting for Azure instance '%v' to get in the running state", vmName)
-				if err := azureStatus.WaitForAzureComputeUp(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-					return stacktrace.Propagate(err, "instance power on status check failed")
-				}
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-	return nil
-}
-
-// watching for the abort signal and revert the chaos
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string) {
-	<-abort
-
-	var instanceState string
-
-	log.Info("[Abort]: Chaos Revert Started")
-	for _, vmName := range instanceNameList {
-		if experimentsDetails.ScaleSet == "enable" {
-			scaleSetName, vmId := azureCommon.GetScaleSetNameAndInstanceId(vmName)
-			instanceState, err = azureStatus.GetAzureScaleSetInstanceStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, scaleSetName, vmId)
-		} else {
-			instanceState, err = azureStatus.GetAzureInstanceStatus(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName)
-		}
-		if err != nil {
-			log.Errorf("[Abort]: Failed to get instance status when an abort signal is received: %v", err)
-		}
-		if instanceState != "VM running" && instanceState != "VM starting" {
-			log.Info("[Abort]: Waiting for the Azure instance to get down")
-			if err := azureStatus.WaitForAzureComputeDown(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-				log.Errorf("[Abort]: Instance power off status check failed: %v", err)
-			}
-
-			log.Info("[Abort]: Starting Azure instance as abort signal received")
-			if experimentsDetails.ScaleSet == "enable" {
-				if err := azureStatus.AzureScaleSetInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-					log.Errorf("[Abort]: Unable to start the Azure instance: %v", err)
-				}
-			} else {
-				if err := azureStatus.AzureInstanceStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName); err != nil {
-					log.Errorf("[Abort]: Unable to start the Azure instance: %v", err)
-				}
-			}
-		}
-
-		log.Info("[Abort]: Waiting for the Azure instance to start")
-		err := azureStatus.WaitForAzureComputeUp(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, vmName)
-		if err != nil {
-			log.Errorf("[Abort]: Instance power on status check failed: %v", err)
-			log.Errorf("[Abort]: Azure instance %v failed to start after an abort signal is received", vmName)
-		}
-	}
-	log.Infof("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/container-kill/helper/container-kill.go b/chaoslib/litmus/container-kill/helper/container-kill.go
deleted file mode 100644
index d92b159..0000000
--- a/chaoslib/litmus/container-kill/helper/container-kill.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package helper
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/palantir/stacktrace"
-	"github.com/sirupsen/logrus"
-	"os/exec"
-	"strconv"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/retry"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-var err error
-
-// Helper injects the container-kill chaos
-func Helper(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-	resultDetails := types.ResultDetails{}
-
-	//Fetching all the ENV passed in the helper pod
-	log.Info("[PreReq]: Getting the ENV variables")
-	getENV(&experimentsDetails)
-
-	// Initialise the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	// Initialise Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if err := killContainer(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil {
-		// update failstep inside chaosresult
-		if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil {
-			log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr)
-		}
-		log.Fatalf("helper pod failed, err: %v", err)
-	}
-}
-
-// killContainer kill the random application container
-// it will kill the container till the chaos duration
-// the execution will stop after timestamp passes the given chaos duration
-func killContainer(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error {
-	targetList, err := common.ParseTargets(chaosDetails.ChaosPodName)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not parse targets")
-	}
-
-	var targets []targetDetails
-
-	for _, t := range targetList.Target {
-		td := targetDetails{
-			Name:            t.Name,
-			Namespace:       t.Namespace,
-			TargetContainer: t.TargetContainer,
-			Source:          chaosDetails.ChaosPodName,
-		}
-		targets = append(targets, td)
-		log.Infof("Injecting chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-	}
-
-	if err := killIterations(targets, experimentsDetails, clients, eventsDetails, chaosDetails, resultDetails); err != nil {
-		return err
-	}
-
-	log.Infof("[Completion]: %v chaos has been completed", experimentsDetails.ExperimentName)
-	return nil
-}
-
-func killIterations(targets []targetDetails, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		var containerIds []string
-
-		for _, t := range targets {
-			t.RestartCountBefore, err = getRestartCount(t, clients)
-			if err != nil {
-				return stacktrace.Propagate(err, "could get container restart count")
-			}
-
-			containerId, err := common.GetContainerID(t.Namespace, t.Name, t.TargetContainer, clients, t.Source)
-			if err != nil {
-				return stacktrace.Propagate(err, "could not get container id")
-			}
-
-			log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{
-				"PodName":            t.Name,
-				"ContainerName":      t.TargetContainer,
-				"RestartCountBefore": t.RestartCountBefore,
-			})
-
-			containerIds = append(containerIds, containerId)
-		}
-
-		if err := kill(experimentsDetails, containerIds, clients, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not kill target container")
-		}
-
-		//Waiting for the chaos interval after chaos injection
-		if experimentsDetails.ChaosInterval != 0 {
-			log.Infof("[Wait]: Wait for the chaos interval %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-		}
-
-		for _, t := range targets {
-			if err := validate(t, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				return stacktrace.Propagate(err, "could not verify restart count")
-			}
-			if err := result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "targeted", "pod", t.Name); err != nil {
-				return stacktrace.Propagate(err, "could not annotate chaosresult")
-			}
-		}
-
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	return nil
-}
-
-func kill(experimentsDetails *experimentTypes.ExperimentDetails, containerIds []string, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// record the event inside chaosengine
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	switch experimentsDetails.ContainerRuntime {
-	case "docker":
-		if err := stopDockerContainer(containerIds, experimentsDetails.SocketPath, experimentsDetails.Signal, experimentsDetails.ChaosPodName); err != nil {
-			return stacktrace.Propagate(err, "could not stop container")
-		}
-	case "containerd", "crio":
-		if err := stopContainerdContainer(containerIds, experimentsDetails.SocketPath, experimentsDetails.Signal, experimentsDetails.ChaosPodName); err != nil {
-			return stacktrace.Propagate(err, "could not stop container")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: fmt.Sprintf("unsupported container runtime %s", experimentsDetails.ContainerRuntime)}
-	}
-	return nil
-}
-
-func validate(t targetDetails, timeout, delay int, clients clients.ClientSets) error {
-	//Check the status of restarted container
-	if err := common.CheckContainerStatus(t.Namespace, t.Name, timeout, delay, clients, t.Source); err != nil {
-		return err
-	}
-
-	// It will verify that the restart count of container should increase after chaos injection
-	return verifyRestartCount(t, timeout, delay, clients, t.RestartCountBefore)
-}
-
-//stopContainerdContainer kill the application container
-func stopContainerdContainer(containerIDs []string, socketPath, signal, source string) error {
-	if signal != "SIGKILL" && signal != "SIGTERM" {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Reason: fmt.Sprintf("unsupported signal %s, use either SIGTERM or SIGKILL", signal)}
-	}
-
-	cmd := exec.Command("sudo", "crictl", "-i", fmt.Sprintf("unix://%s", socketPath), "-r", fmt.Sprintf("unix://%s", socketPath), "stop")
-	if signal == "SIGKILL" {
-		cmd.Args = append(cmd.Args, "--timeout=0")
-	}
-	cmd.Args = append(cmd.Args, containerIDs...)
-
-	var errOut, out bytes.Buffer
-	cmd.Stderr = &errOut
-	cmd.Stdout = &out
-	if err := cmd.Run(); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: source, Reason: fmt.Sprintf("failed to stop container :%s", out.String())}
-	}
-	return nil
-}
-
-//stopDockerContainer kill the application container
-func stopDockerContainer(containerIDs []string, socketPath, signal, source string) error {
-	var errOut, out bytes.Buffer
-	cmd := exec.Command("sudo", "docker", "--host", fmt.Sprintf("unix://%s", socketPath), "kill", "--signal", signal)
-	cmd.Args = append(cmd.Args, containerIDs...)
-	cmd.Stderr = &errOut
-	cmd.Stdout = &out
-	if err := cmd.Run(); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: source, Reason: fmt.Sprintf("failed to stop container :%s", out.String())}
-	}
-	return nil
-}
-
-//getRestartCount return the restart count of target container
-func getRestartCount(target targetDetails, clients clients.ClientSets) (int, error) {
-	pod, err := clients.KubeClient.CoreV1().Pods(target.Namespace).Get(context.Background(), target.Name, v1.GetOptions{})
-	if err != nil {
-		return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: target.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", target.Name, target.Namespace), Reason: err.Error()}
-	}
-	restartCount := 0
-	for _, container := range pod.Status.ContainerStatuses {
-		if container.Name == target.TargetContainer {
-			restartCount = int(container.RestartCount)
-			break
-		}
-	}
-	return restartCount, nil
-}
-
-//verifyRestartCount verify the restart count of target container that it is restarted or not after chaos injection
-func verifyRestartCount(t targetDetails, timeout, delay int, clients clients.ClientSets, restartCountBefore int) error {
-
-	restartCountAfter := 0
-	return retry.
-		Times(uint(timeout / delay)).
-		Wait(time.Duration(delay) * time.Second).
-		Try(func(attempt uint) error {
-			pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: err.Error()}
-			}
-			for _, container := range pod.Status.ContainerStatuses {
-				if container.Name == t.TargetContainer {
-					restartCountAfter = int(container.RestartCount)
-					break
-				}
-			}
-			if restartCountAfter <= restartCountBefore {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: "target container is not restarted after kill"}
-			}
-			log.Infof("restartCount of target container after chaos injection: %v", strconv.Itoa(restartCountAfter))
-			return nil
-		})
-}
-
-//getENV fetches all the env variables from the runner pod
-func getENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10"))
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.SocketPath = types.Getenv("SOCKET_PATH", "")
-	experimentDetails.ContainerRuntime = types.Getenv("CONTAINER_RUNTIME", "")
-	experimentDetails.Signal = types.Getenv("SIGNAL", "SIGKILL")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-}
-
-type targetDetails struct {
-	Name               string
-	Namespace          string
-	TargetContainer    string
-	RestartCountBefore int
-	Source             string
-}
diff --git a/chaoslib/litmus/container-kill/lib/container-kill.go b/chaoslib/litmus/container-kill/lib/container-kill.go
deleted file mode 100644
index 79c92b7..0000000
--- a/chaoslib/litmus/container-kill/lib/container-kill.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-//PrepareContainerKill contains the preparation steps before chaos injection
-func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	var err error
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-	//Setup the tunables if provided in range
-	SetChaosTunables(experimentsDetails)
-
-	log.InfoWithValues("[Info]: The tunables are:", logrus.Fields{
-		"PodsAffectedPerc": experimentsDetails.PodsAffectedPerc,
-		"Sequence":         experimentsDetails.Sequence,
-	})
-
-	targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// Getting the serviceAccountName, need permission inside helper pod to create the events
-	if experimentsDetails.ChaosServiceAccount == "" {
-		experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not  experiment service account")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode kill the container of all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// creating the helper pod to perform container kill chaos
-	for _, pod := range targetPodList.Items {
-
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		runID := stringutils.GetRunID()
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-		//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-		log.Info("[Status]: Checking the status of the helper pods")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-
-		// Wait till the completion of the helper pod
-		// set an upper limit for the waiting time
-		log.Info("[Wait]: waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, experimentsDetails.ChaosNamespace, true)
-		}
-
-		//Deleting all the helper pod for container-kill chaos
-		log.Info("[Cleanup]: Deleting all the helper pods")
-		if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode kill the container of all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	runID := stringutils.GetRunID()
-	targets := common.FilterPodsForNodes(targetPodList, experimentsDetails.TargetContainer)
-
-	for node, tar := range targets {
-		var targetsPerNode []string
-		for _, k := range tar.Target {
-			targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer))
-		}
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-	//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-	log.Info("[Status]: Checking the status of the helper pods")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	// Wait till the completion of the helper pod
-	// set an upper limit for the waiting time
-	log.Info("[Wait]: waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, experimentsDetails.ChaosNamespace, true)
-	}
-
-	//Deleting all the helper pod for container-kill chaos
-	log.Info("[Cleanup]: Deleting all the helper pods")
-	if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error {
-
-	privilegedEnable := false
-	if experimentsDetails.ContainerRuntime == "crio" {
-		privilegedEnable = true
-	}
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			ServiceAccountName:            experimentsDetails.ChaosServiceAccount,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			NodeName:                      nodeName,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			Volumes: []apiv1.Volume{
-				{
-					Name: "cri-socket",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: experimentsDetails.SocketPath,
-						},
-					},
-				},
-			},
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"./helpers -name container-kill",
-					},
-					Resources: chaosDetails.Resources,
-					Env:       getPodEnv(experimentsDetails, targets),
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "cri-socket",
-							MountPath: experimentsDetails.SocketPath,
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privilegedEnable,
-					},
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-// getPodEnv derive all the env required for the helper pod
-func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar {
-
-	var envDetails common.ENVDetails
-	envDetails.SetEnv("TARGETS", targets).
-		SetEnv("TOTAL_CHAOS_DURATION", strconv.Itoa(experimentsDetails.ChaosDuration)).
-		SetEnv("CHAOS_NAMESPACE", experimentsDetails.ChaosNamespace).
-		SetEnv("CHAOSENGINE", experimentsDetails.EngineName).
-		SetEnv("CHAOS_UID", string(experimentsDetails.ChaosUID)).
-		SetEnv("CHAOS_INTERVAL", strconv.Itoa(experimentsDetails.ChaosInterval)).
-		SetEnv("SOCKET_PATH", experimentsDetails.SocketPath).
-		SetEnv("CONTAINER_RUNTIME", experimentsDetails.ContainerRuntime).
-		SetEnv("SIGNAL", experimentsDetails.Signal).
-		SetEnv("STATUS_CHECK_DELAY", strconv.Itoa(experimentsDetails.Delay)).
-		SetEnv("STATUS_CHECK_TIMEOUT", strconv.Itoa(experimentsDetails.Timeout)).
-		SetEnv("EXPERIMENT_NAME", experimentsDetails.ExperimentName).
-		SetEnv("INSTANCE_ID", experimentsDetails.InstanceID).
-		SetEnvFromDownwardAPI("v1", "metadata.name")
-
-	return envDetails.ENV
-}
-
-//SetChaosTunables will setup a random value within a given range of values
-//If the value is not provided in range it'll setup the initial provided value.
-func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/disk-fill/helper/disk-fill.go b/chaoslib/litmus/disk-fill/helper/disk-fill.go
deleted file mode 100644
index 474f93f..0000000
--- a/chaoslib/litmus/disk-fill/helper/disk-fill.go
+++ /dev/null
@@ -1,368 +0,0 @@
-package helper
-
-import (
-	"context"
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/exec"
-	"os/signal"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	"k8s.io/apimachinery/pkg/api/resource"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-var inject, abort chan os.Signal
-
-// Helper injects the disk-fill chaos
-func Helper(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-	resultDetails := types.ResultDetails{}
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Fetching all the ENV passed in the helper pod
-	log.Info("[PreReq]: Getting the ENV variables")
-	getENV(&experimentsDetails)
-
-	// Intialise the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	// Intialise Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	if err := diskFill(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil {
-		// update failstep inside chaosresult
-		if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil {
-			log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr)
-		}
-		log.Fatalf("helper pod failed, err: %v", err)
-	}
-}
-
-// diskFill contains steps to inject disk-fill chaos
-func diskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error {
-
-	targetList, err := common.ParseTargets(chaosDetails.ChaosPodName)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not parse targets")
-	}
-
-	var targets []targetDetails
-
-	for _, t := range targetList.Target {
-		td := targetDetails{
-			Name:            t.Name,
-			Namespace:       t.Namespace,
-			TargetContainer: t.TargetContainer,
-			Source:          chaosDetails.ChaosPodName,
-		}
-
-		// Derive the container id of the target container
-		td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients, chaosDetails.ChaosPodName)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container id")
-		}
-
-		// extract out the pid of the target container
-		td.TargetPID, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source)
-		if err != nil {
-			return err
-		}
-
-		td.SizeToFill, err = getDiskSizeToFill(td, experimentsDetails, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get disk size to fill")
-		}
-
-		log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{
-			"PodName":         td.Name,
-			"Namespace":       td.Namespace,
-			"SizeToFill(KB)":  td.SizeToFill,
-			"TargetContainer": td.TargetContainer,
-		})
-
-		targets = append(targets, td)
-	}
-
-	// record the event inside chaosengine
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(targets, experimentsDetails, clients, resultDetails.Name)
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(1)
-	default:
-	}
-
-	for _, t := range targets {
-		if t.SizeToFill > 0 {
-			if err := fillDisk(t, experimentsDetails.DataBlockSize); err != nil {
-				return stacktrace.Propagate(err, "could not fill ephemeral storage")
-			}
-			log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-			if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil {
-				if revertErr := revertDiskFill(t, clients); revertErr != nil {
-					return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())}
-				}
-				return stacktrace.Propagate(err, "could not annotate chaosresult")
-			}
-		} else {
-			log.Warn("No required free space found!")
-		}
-	}
-
-	log.Infof("[Chaos]: Waiting for %vs", experimentsDetails.ChaosDuration)
-
-	common.WaitForDuration(experimentsDetails.ChaosDuration)
-
-	log.Info("[Chaos]: Stopping the experiment")
-
-	var errList []string
-
-	for _, t := range targets {
-		// It will delete the target pod if target pod is evicted
-		// if target pod is still running then it will delete all the files, which was created earlier during chaos execution
-		if err = revertDiskFill(t, clients); err != nil {
-			errList = append(errList, err.Error())
-			continue
-		}
-		if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-			errList = append(errList, err.Error())
-		}
-	}
-
-	if len(errList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-	}
-	return nil
-}
-
-// fillDisk fill the ephemeral disk by creating files
-func fillDisk(t targetDetails, bs int) error {
-
-	// Creating files to fill the required ephemeral storage size of block size of 4K
-	log.Infof("[Fill]: Filling ephemeral storage, size: %vKB", t.SizeToFill)
-	dd := fmt.Sprintf("sudo dd if=/dev/urandom of=/proc/%v/root/home/diskfill bs=%vK count=%v", t.TargetPID, bs, strconv.Itoa(t.SizeToFill/bs))
-	log.Infof("dd: {%v}", dd)
-	cmd := exec.Command("/bin/bash", "-c", dd)
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		log.Error(err.Error())
-	}
-	return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: string(out)}
-}
-
-// getEphemeralStorageAttributes derive the ephemeral storage attributes from the target pod
-func getEphemeralStorageAttributes(t targetDetails, clients clients.ClientSets) (int64, error) {
-
-	pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{})
-	if err != nil {
-		return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: err.Error()}
-	}
-
-	var ephemeralStorageLimit int64
-	containers := pod.Spec.Containers
-
-	// Extracting ephemeral storage limit & requested value from the target container
-	// It will be in the form of Kb
-	for _, container := range containers {
-		if container.Name == t.TargetContainer {
-			ephemeralStorageLimit = container.Resources.Limits.StorageEphemeral().ToDec().ScaledValue(resource.Kilo)
-			break
-		}
-	}
-
-	return ephemeralStorageLimit, nil
-}
-
-// filterUsedEphemeralStorage filter out the used ephemeral storage from the given string
-func filterUsedEphemeralStorage(ephemeralStorageDetails string) (int, error) {
-
-	// Filtering out the ephemeral storage size from the output of du command
-	// It contains details of all subdirectories of target container
-	ephemeralStorageAll := strings.Split(ephemeralStorageDetails, "\n")
-	// It will return the details of main directory
-	ephemeralStorageAllDiskFill := strings.Split(ephemeralStorageAll[len(ephemeralStorageAll)-2], "\t")[0]
-	// type casting string to integer
-	ephemeralStorageSize, err := strconv.Atoi(ephemeralStorageAllDiskFill)
-	return ephemeralStorageSize, err
-}
-
-// getSizeToBeFilled generate the ephemeral storage size need to be filled
-func getSizeToBeFilled(experimentsDetails *experimentTypes.ExperimentDetails, usedEphemeralStorageSize int, ephemeralStorageLimit int) int {
-	var requirementToBeFill int
-
-	switch ephemeralStorageLimit {
-	case 0:
-		ephemeralStorageMebibytes, _ := strconv.Atoi(experimentsDetails.EphemeralStorageMebibytes)
-		requirementToBeFill = ephemeralStorageMebibytes * 1024
-	default:
-		// deriving size need to be filled from the used size & requirement size to fill
-		fillPercentage, _ := strconv.Atoi(experimentsDetails.FillPercentage)
-		requirementToBeFill = (ephemeralStorageLimit * fillPercentage) / 100
-	}
-
-	needToBeFilled := requirementToBeFill - usedEphemeralStorageSize
-	return needToBeFilled
-}
-
-// revertDiskFill will delete the target pod if target pod is evicted
-// if target pod is still running then it will delete the files, which was created during chaos execution
-func revertDiskFill(t targetDetails, clients clients.ClientSets) error {
-	pod, err := clients.KubeClient.CoreV1().Pods(t.Namespace).Get(context.Background(), t.Name, v1.GetOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s,namespace: %s}", t.Name, t.Namespace), Reason: err.Error()}
-	}
-	podReason := pod.Status.Reason
-	if podReason == "Evicted" {
-		// Deleting the pod as pod is already evicted
-		log.Warn("Target pod is evicted, deleting the pod")
-		if err := clients.KubeClient.CoreV1().Pods(t.Namespace).Delete(context.Background(), t.Name, v1.DeleteOptions{}); err != nil {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s,namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("failed to delete target pod after eviction :%s", err.Error())}
-		}
-	} else {
-		// deleting the files after chaos execution
-		rm := fmt.Sprintf("sudo rm -rf /proc/%v/root/home/diskfill", t.TargetPID)
-		cmd := exec.Command("/bin/bash", "-c", rm)
-		out, err := cmd.CombinedOutput()
-		if err != nil {
-			log.Error(err.Error())
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s,namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("failed to cleanup ephemeral storage: %s", string(out))}
-		}
-	}
-	log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-	return nil
-}
-
-// getENV fetches all the env variables from the runner pod
-func getENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.FillPercentage = types.Getenv("FILL_PERCENTAGE", "")
-	experimentDetails.EphemeralStorageMebibytes = types.Getenv("EPHEMERAL_STORAGE_MEBIBYTES", "")
-	experimentDetails.DataBlockSize, _ = strconv.Atoi(types.Getenv("DATA_BLOCK_SIZE", "256"))
-	experimentDetails.ContainerRuntime = types.Getenv("CONTAINER_RUNTIME", "")
-	experimentDetails.SocketPath = types.Getenv("SOCKET_PATH", "")
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(targets []targetDetails, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultName string) {
-	// waiting till the abort signal received
-	<-abort
-
-	log.Info("[Chaos]: Killing process started because of terminated signal received")
-	log.Info("Chaos Revert Started")
-	// retry thrice for the chaos revert
-	retry := 3
-	for retry > 0 {
-		for _, t := range targets {
-			err := revertDiskFill(t, clients)
-			if err != nil {
-				log.Errorf("unable to kill disk-fill process, err :%v", err)
-				continue
-			}
-			if err = result.AnnotateChaosResult(resultName, experimentsDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-				log.Errorf("unable to annotate the chaosresult, err :%v", err)
-			}
-		}
-		retry--
-		time.Sleep(1 * time.Second)
-	}
-	log.Info("Chaos Revert Completed")
-	os.Exit(1)
-}
-
-func getDiskSizeToFill(t targetDetails, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (int, error) {
-
-	usedEphemeralStorageSize, err := getUsedEphemeralStorage(t)
-	if err != nil {
-		return 0, stacktrace.Propagate(err, "could not get used ephemeral storage")
-	}
-
-	// GetEphemeralStorageAttributes derive the ephemeral storage attributes from the target container
-	ephemeralStorageLimit, err := getEphemeralStorageAttributes(t, clients)
-	if err != nil {
-		return 0, stacktrace.Propagate(err, "could not get ephemeral storage attributes")
-	}
-
-	if ephemeralStorageLimit == 0 && experimentsDetails.EphemeralStorageMebibytes == "0" {
-		return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: "either provide ephemeral storage limit inside target container or define EPHEMERAL_STORAGE_MEBIBYTES ENV"}
-	}
-
-	// deriving the ephemeral storage size to be filled
-	sizeTobeFilled := getSizeToBeFilled(experimentsDetails, usedEphemeralStorageSize, int(ephemeralStorageLimit))
-
-	return sizeTobeFilled, nil
-}
-
-func getUsedEphemeralStorage(t targetDetails) (int, error) {
-	// derive the used ephemeral storage size from the target container
-	du := fmt.Sprintf("sudo du /proc/%v/root", t.TargetPID)
-	cmd := exec.Command("/bin/bash", "-c", du)
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		log.Error(err.Error())
-		return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to get used ephemeral storage size: %s", string(out))}
-	}
-	ephemeralStorageDetails := string(out)
-
-	// filtering out the used ephemeral storage from the output of du command
-	usedEphemeralStorageSize, err := filterUsedEphemeralStorage(ephemeralStorageDetails)
-	if err != nil {
-		return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to get used ephemeral storage size: %s", err.Error())}
-	}
-	log.Infof("used ephemeral storage space: %vKB", strconv.Itoa(usedEphemeralStorageSize))
-	return usedEphemeralStorageSize, nil
-}
-
-type targetDetails struct {
-	Name            string
-	Namespace       string
-	TargetContainer string
-	ContainerId     string
-	SizeToFill      int
-	TargetPID       int
-	Source          string
-}
diff --git a/chaoslib/litmus/disk-fill/lib/disk-fill.go b/chaoslib/litmus/disk-fill/lib/disk-fill.go
deleted file mode 100644
index cf0192a..0000000
--- a/chaoslib/litmus/disk-fill/lib/disk-fill.go
+++ /dev/null
@@ -1,298 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/exec"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-//PrepareDiskFill contains the preparation steps before chaos injection
-func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	var err error
-	// It will contain all the pod & container details required for exec command
-	execCommandDetails := exec.PodDetails{}
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-	//set up the tunables if provided in range
-	setChaosTunables(experimentsDetails)
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"FillPercentage":            experimentsDetails.FillPercentage,
-		"EphemeralStorageMebibytes": experimentsDetails.EphemeralStorageMebibytes,
-		"PodsAffectedPerc":          experimentsDetails.PodsAffectedPerc,
-		"Sequence":                  experimentsDetails.Sequence,
-	})
-
-	targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// Getting the serviceAccountName, need permission inside helper pod to create the events
-	if experimentsDetails.ChaosServiceAccount == "" {
-		experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not  experiment service account")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode fill the ephemeral storage of all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// creating the helper pod to perform disk-fill chaos
-	for _, pod := range targetPodList.Items {
-
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		runID := stringutils.GetRunID()
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-		//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-		log.Info("[Status]: Checking the status of the helper pods")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-
-		// Wait till the completion of the helper pod
-		// set an upper limit for the waiting time
-		log.Info("[Wait]: waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-		}
-
-		//Deleting all the helper pod for disk-fill chaos
-		log.Info("[Cleanup]: Deleting the helper pod")
-		if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-
-	return nil
-
-}
-
-// injectChaosInParallelMode fill the ephemeral storage of of all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	var err error
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	runID := stringutils.GetRunID()
-	targets := common.FilterPodsForNodes(targetPodList, experimentsDetails.TargetContainer)
-
-	for node, tar := range targets {
-		var targetsPerNode []string
-		for _, k := range tar.Target {
-			targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer))
-		}
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-	//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-	log.Info("[Status]: Checking the status of the helper pods")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	// Wait till the completion of the helper pod
-	// set an upper limit for the waiting time
-	log.Info("[Wait]: waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-	}
-
-	//Deleting all the helper pod for disk-fill chaos
-	log.Info("[Cleanup]: Deleting all the helper pod")
-	if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, appNodeName, runID string) error {
-
-	privilegedEnable := true
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			HostPID:                       true,
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			NodeName:                      appNodeName,
-			ServiceAccountName:            experimentsDetails.ChaosServiceAccount,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-
-			Volumes: []apiv1.Volume{
-				{
-					Name: "socket-path",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: experimentsDetails.SocketPath,
-						},
-					},
-				},
-			},
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"./helpers -name disk-fill",
-					},
-					Resources: chaosDetails.Resources,
-					Env:       getPodEnv(experimentsDetails, targets),
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "socket-path",
-							MountPath: experimentsDetails.SocketPath,
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privilegedEnable,
-					},
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-// getPodEnv derive all the env required for the helper pod
-func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar {
-
-	var envDetails common.ENVDetails
-	envDetails.SetEnv("TARGETS", targets).
-		SetEnv("APP_CONTAINER", experimentsDetails.TargetContainer).
-		SetEnv("TOTAL_CHAOS_DURATION", strconv.Itoa(experimentsDetails.ChaosDuration)).
-		SetEnv("CHAOS_NAMESPACE", experimentsDetails.ChaosNamespace).
-		SetEnv("CHAOSENGINE", experimentsDetails.EngineName).
-		SetEnv("CHAOS_UID", string(experimentsDetails.ChaosUID)).
-		SetEnv("EXPERIMENT_NAME", experimentsDetails.ExperimentName).
-		SetEnv("FILL_PERCENTAGE", experimentsDetails.FillPercentage).
-		SetEnv("EPHEMERAL_STORAGE_MEBIBYTES", experimentsDetails.EphemeralStorageMebibytes).
-		SetEnv("DATA_BLOCK_SIZE", strconv.Itoa(experimentsDetails.DataBlockSize)).
-		SetEnv("INSTANCE_ID", experimentsDetails.InstanceID).
-		SetEnv("SOCKET_PATH", experimentsDetails.SocketPath).
-		SetEnv("CONTAINER_RUNTIME", experimentsDetails.ContainerRuntime).
-		SetEnvFromDownwardAPI("v1", "metadata.name")
-
-	return envDetails.ENV
-}
-
-// setChaosTunables will setup a random value within a given range of values
-// If the value is not provided in range it'll setup the initial provided value.
-func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.FillPercentage = common.ValidateRange(experimentsDetails.FillPercentage)
-	experimentsDetails.EphemeralStorageMebibytes = common.ValidateRange(experimentsDetails.EphemeralStorageMebibytes)
-	experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go b/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go
deleted file mode 100644
index 6dd00f3..0000000
--- a/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// PrepareDockerServiceKill contains prepration steps before chaos injection
-func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	var err error
-	if experimentsDetails.TargetNode == "" {
-		//Select node for docker-service-kill
-		experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get node name")
-		}
-	}
-
-	log.InfoWithValues("[Info]: Details of node under chaos injection", logrus.Fields{
-		"NodeName": experimentsDetails.TargetNode,
-	})
-
-	experimentsDetails.RunID = stringutils.GetRunID()
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + experimentsDetails.TargetNode + " node"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	// Creating the helper pod to perform docker-service-kill
-	if err = createHelperPod(experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil {
-		return stacktrace.Propagate(err, "could not create helper pod")
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-	//Checking the status of helper pod
-	log.Info("[Status]: Checking the status of the helper pod")
-	if err = status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return err
-		}
-	}
-
-	// Checking for the node to be in not-ready state
-	log.Info("[Status]: Check for the node to be in NotReady state")
-	if err = status.CheckNodeNotReadyState(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check for NOT READY state")
-	}
-
-	// Wait till the completion of helper pod
-	log.Info("[Wait]: Waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-	}
-
-	//Deleting the helper pod
-	log.Info("[Cleanup]: Deleting the helper pod")
-	if err = common.DeletePod(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod")
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appNodeName string) error {
-
-	privileged := true
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			Name:        experimentsDetails.ExperimentName + "-helper-" + experimentsDetails.RunID,
-			Namespace:   experimentsDetails.ChaosNamespace,
-			Labels:      common.GetHelperLabels(chaosDetails.Labels, experimentsDetails.RunID, experimentsDetails.ExperimentName),
-			Annotations: chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			NodeName:                      appNodeName,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			Volumes: []apiv1.Volume{
-				{
-					Name: "bus",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: "/var/run",
-						},
-					},
-				},
-				{
-					Name: "root",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: "/",
-						},
-					},
-				},
-			},
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"sleep 10 && systemctl stop docker && sleep " + strconv.Itoa(experimentsDetails.ChaosDuration) + " && systemctl start docker",
-					},
-					Resources: chaosDetails.Resources,
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "bus",
-							MountPath: "/var/run",
-						},
-						{
-							Name:      "root",
-							MountPath: "/node",
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privileged,
-					},
-					TTY: true,
-				},
-			},
-			Tolerations: []apiv1.Toleration{
-				{
-					Key:               "node.kubernetes.io/not-ready",
-					Operator:          apiv1.TolerationOperator("Exists"),
-					Effect:            apiv1.TaintEffect("NoExecute"),
-					TolerationSeconds: ptrint64(int64(experimentsDetails.ChaosDuration) + 60),
-				},
-				{
-					Key:               "node.kubernetes.io/unreachable",
-					Operator:          apiv1.TolerationOperator("Exists"),
-					Effect:            apiv1.TaintEffect("NoExecute"),
-					TolerationSeconds: ptrint64(int64(experimentsDetails.ChaosDuration) + 60),
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-func ptrint64(p int64) *int64 {
-	return &p
-}
diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go
deleted file mode 100644
index b21a24d..0000000
--- a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-
-	ebsloss "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareEBSLossByID contains the prepration and injection steps for the experiment
-func PrepareEBSLossByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-
-		//get the volume id or list of instance ids
-		volumeIDList := strings.Split(experimentsDetails.EBSVolumeID, ",")
-		if len(volumeIDList) == 0 {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no volume id found to detach"}
-		}
-		// watching for the abort signal and revert the chaos
-		go ebsloss.AbortWatcher(experimentsDetails, volumeIDList, abort, chaosDetails)
-
-		switch strings.ToLower(experimentsDetails.Sequence) {
-		case "serial":
-			if err = ebsloss.InjectChaosInSerialMode(experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in serial mode")
-			}
-		case "parallel":
-			if err = ebsloss.InjectChaosInParallelMode(experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-			}
-		default:
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-		}
-
-		//Waiting for the ramp time after chaos injection
-		if experimentsDetails.RampTime != 0 {
-			log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-			common.WaitForDuration(experimentsDetails.RampTime)
-		}
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go
deleted file mode 100644
index 0b2039c..0000000
--- a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-
-	ebsloss "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareEBSLossByTag contains the prepration and injection steps for the experiment
-func PrepareEBSLossByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-
-		targetEBSVolumeIDList := common.FilterBasedOnPercentage(experimentsDetails.VolumeAffectedPerc, experimentsDetails.TargetVolumeIDList)
-		log.Infof("[Chaos]:Number of volumes targeted: %v", len(targetEBSVolumeIDList))
-
-		// watching for the abort signal and revert the chaos
-		go ebsloss.AbortWatcher(experimentsDetails, targetEBSVolumeIDList, abort, chaosDetails)
-
-		switch strings.ToLower(experimentsDetails.Sequence) {
-		case "serial":
-			if err = ebsloss.InjectChaosInSerialMode(experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in serial mode")
-			}
-		case "parallel":
-			if err = ebsloss.InjectChaosInParallelMode(experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-			}
-		default:
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-		}
-		//Waiting for the ramp time after chaos injection
-		if experimentsDetails.RampTime != 0 {
-			log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-			common.WaitForDuration(experimentsDetails.RampTime)
-		}
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss.go
deleted file mode 100644
index 8fd39c0..0000000
--- a/chaoslib/litmus/ebs-loss/lib/ebs-loss.go
+++ /dev/null
@@ -1,232 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	ebs "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ebs"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-// InjectChaosInSerialMode will inject the ebs loss chaos in serial mode which means one after other
-func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-		for i, volumeID := range targetEBSVolumeIDList {
-
-			//Get volume attachment details
-			ec2InstanceID, device, err := ebs.GetVolumeAttachmentDetails(volumeID, experimentsDetails.VolumeTag, experimentsDetails.Region)
-			if err != nil {
-				return stacktrace.Propagate(err, "failed to get the attachment info")
-			}
-
-			//Detaching the ebs volume from the instance
-			log.Info("[Chaos]: Detaching the EBS volume from the instance")
-			if err = ebs.EBSVolumeDetach(volumeID, experimentsDetails.Region); err != nil {
-				return stacktrace.Propagate(err, "ebs detachment failed")
-			}
-
-			common.SetTargets(volumeID, "injected", "EBS", chaosDetails)
-
-			//Wait for ebs volume detachment
-			log.Infof("[Wait]: Wait for EBS volume detachment for volume %v", volumeID)
-			if err = ebs.WaitForVolumeDetachment(volumeID, ec2InstanceID, experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				return stacktrace.Propagate(err, "ebs detachment failed")
-			}
-
-			// run the probes during chaos
-			// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-			if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-				if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return stacktrace.Propagate(err, "failed to run probes")
-				}
-			}
-
-			//Wait for chaos duration
-			log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-			//Getting the EBS volume attachment status
-			ebsState, err := ebs.GetEBSStatus(volumeID, ec2InstanceID, experimentsDetails.Region)
-			if err != nil {
-				return stacktrace.Propagate(err, "failed to get the ebs status")
-			}
-
-			switch ebsState {
-			case "attached":
-				log.Info("[Skip]: The EBS volume is already attached")
-			default:
-				//Attaching the ebs volume from the instance
-				log.Info("[Chaos]: Attaching the EBS volume back to the instance")
-				if err = ebs.EBSVolumeAttach(volumeID, ec2InstanceID, device, experimentsDetails.Region); err != nil {
-					return stacktrace.Propagate(err, "ebs attachment failed")
-				}
-
-				//Wait for ebs volume attachment
-				log.Infof("[Wait]: Wait for EBS volume attachment for %v volume", volumeID)
-				if err = ebs.WaitForVolumeAttachment(volumeID, ec2InstanceID, experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-					return stacktrace.Propagate(err, "ebs attachment failed")
-				}
-			}
-			common.SetTargets(volumeID, "reverted", "EBS", chaosDetails)
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	return nil
-}
-
-// InjectChaosInParallelMode will inject the chaos in parallel mode that means all at once
-func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	var ec2InstanceIDList, deviceList []string
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		//prepare the instaceIDs and device name for all the given volume
-		for _, volumeID := range targetEBSVolumeIDList {
-			ec2InstanceID, device, err := ebs.GetVolumeAttachmentDetails(volumeID, experimentsDetails.VolumeTag, experimentsDetails.Region)
-			if err != nil {
-				return stacktrace.Propagate(err, "failed to get the attachment info")
-			}
-			if ec2InstanceID == "" || device == "" {
-				return cerrors.Error{
-					ErrorCode: cerrors.ErrorTypeChaosInject,
-					Reason:    "Volume not attached to any instance",
-					Target:    fmt.Sprintf("EBS Volume ID: %v", volumeID),
-				}
-			}
-			ec2InstanceIDList = append(ec2InstanceIDList, ec2InstanceID)
-			deviceList = append(deviceList, device)
-		}
-
-		for _, volumeID := range targetEBSVolumeIDList {
-			//Detaching the ebs volume from the instance
-			log.Info("[Chaos]: Detaching the EBS volume from the instance")
-			if err := ebs.EBSVolumeDetach(volumeID, experimentsDetails.Region); err != nil {
-				return stacktrace.Propagate(err, "ebs detachment failed")
-			}
-			common.SetTargets(volumeID, "injected", "EBS", chaosDetails)
-		}
-
-		log.Info("[Info]: Checking if the detachment process initiated")
-		if err := ebs.CheckEBSDetachmentInitialisation(targetEBSVolumeIDList, ec2InstanceIDList, experimentsDetails.Region); err != nil {
-			return stacktrace.Propagate(err, "failed to initialise the detachment")
-		}
-
-		for i, volumeID := range targetEBSVolumeIDList {
-			//Wait for ebs volume detachment
-			log.Infof("[Wait]: Wait for EBS volume detachment for volume %v", volumeID)
-			if err := ebs.WaitForVolumeDetachment(volumeID, ec2InstanceIDList[i], experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				return stacktrace.Propagate(err, "ebs detachment failed")
-			}
-		}
-
-		// run the probes during chaos
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-				return stacktrace.Propagate(err, "failed to run probes")
-			}
-		}
-
-		//Wait for chaos interval
-		log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-		common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-		for i, volumeID := range targetEBSVolumeIDList {
-
-			//Getting the EBS volume attachment status
-			ebsState, err := ebs.GetEBSStatus(volumeID, ec2InstanceIDList[i], experimentsDetails.Region)
-			if err != nil {
-				return stacktrace.Propagate(err, "failed to get the ebs status")
-			}
-
-			switch ebsState {
-			case "attached":
-				log.Info("[Skip]: The EBS volume is already attached")
-			default:
-				//Attaching the ebs volume from the instance
-				log.Info("[Chaos]: Attaching the EBS volume from the instance")
-				if err = ebs.EBSVolumeAttach(volumeID, ec2InstanceIDList[i], deviceList[i], experimentsDetails.Region); err != nil {
-					return stacktrace.Propagate(err, "ebs attachment failed")
-				}
-
-				//Wait for ebs volume attachment
-				log.Infof("[Wait]: Wait for EBS volume attachment for volume %v", volumeID)
-				if err = ebs.WaitForVolumeAttachment(volumeID, ec2InstanceIDList[i], experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-					return stacktrace.Propagate(err, "ebs attachment failed")
-				}
-			}
-			common.SetTargets(volumeID, "reverted", "EBS", chaosDetails)
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	return nil
-}
-
-// AbortWatcher will watching for the abort signal and revert the chaos
-func AbortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, volumeIDList []string, abort chan os.Signal, chaosDetails *types.ChaosDetails) {
-
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-	for _, volumeID := range volumeIDList {
-		//Get volume attachment details
-		instanceID, deviceName, err := ebs.GetVolumeAttachmentDetails(volumeID, experimentsDetails.VolumeTag, experimentsDetails.Region)
-		if err != nil {
-			log.Errorf("Failed to get the attachment info: %v", err)
-		}
-
-		//Getting the EBS volume attachment status
-		ebsState, err := ebs.GetEBSStatus(experimentsDetails.EBSVolumeID, instanceID, experimentsDetails.Region)
-		if err != nil {
-			log.Errorf("Failed to get the ebs status when an abort signal is received: %v", err)
-		}
-		if ebsState != "attached" {
-
-			//Wait for ebs volume detachment
-			//We first wait for the volume to get in detached state then we are attaching it.
-			log.Info("[Abort]: Wait for EBS complete volume detachment")
-			if err = ebs.WaitForVolumeDetachment(experimentsDetails.EBSVolumeID, instanceID, experimentsDetails.Region, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				log.Errorf("Unable to detach the ebs volume: %v", err)
-			}
-			//Attaching the ebs volume from the instance
-			log.Info("[Chaos]: Attaching the EBS volume from the instance")
-			err = ebs.EBSVolumeAttach(experimentsDetails.EBSVolumeID, instanceID, deviceName, experimentsDetails.Region)
-			if err != nil {
-				log.Errorf("EBS attachment failed when an abort signal is received: %v", err)
-			}
-		}
-		common.SetTargets(volumeID, "reverted", "EBS", chaosDetails)
-	}
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go b/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go
deleted file mode 100644
index 2472b94..0000000
--- a/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	awslib "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-id/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareEC2TerminateByID contains the prepration and injection steps for the experiment
-func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//get the instance id or list of instance ids
-	instanceIDList := strings.Split(experimentsDetails.Ec2InstanceID, ",")
-	if experimentsDetails.Ec2InstanceID == "" || len(instanceIDList) == 0 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "no EC2 instance ID found to terminate"}
-	}
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(experimentsDetails, instanceIDList, chaosDetails)
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode will inject the ec2 instance termination in serial mode that is one after other
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instanceID list, %v", instanceIDList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//PowerOff the instance
-			for i, id := range instanceIDList {
-
-				//Stopping the EC2 instance
-				log.Info("[Chaos]: Stopping the desired EC2 instance")
-				if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-
-				common.SetTargets(id, "injected", "EC2", chaosDetails)
-
-				//Wait for ec2 instance to completely stop
-				log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id)
-				if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-
-				// run the probes during chaos
-				// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return stacktrace.Propagate(err, "failed to run probes")
-					}
-				}
-
-				//Wait for chaos interval
-				log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-				time.Sleep(time.Duration(experimentsDetails.ChaosInterval) * time.Second)
-
-				//Starting the EC2 instance
-				if experimentsDetails.ManagedNodegroup != "enable" {
-					log.Info("[Chaos]: Starting back the EC2 instance")
-					if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-
-					//Wait for ec2 instance to get in running state
-					log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id)
-					if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-				}
-				common.SetTargets(id, "reverted", "EC2", chaosDetails)
-			}
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode will inject the ec2 instance termination in parallel mode that is all at once
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instanceID list, %v", instanceIDList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//PowerOff the instance
-			for _, id := range instanceIDList {
-				//Stopping the EC2 instance
-				log.Info("[Chaos]: Stopping the desired EC2 instance")
-				if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-				common.SetTargets(id, "injected", "EC2", chaosDetails)
-			}
-
-			for _, id := range instanceIDList {
-				//Wait for ec2 instance to completely stop
-				log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id)
-				if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-				common.SetTargets(id, "reverted", "EC2 Instance ID", chaosDetails)
-			}
-
-			// run the probes during chaos
-			if len(resultDetails.ProbeDetails) != 0 {
-				if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return stacktrace.Propagate(err, "failed to run probes")
-				}
-			}
-
-			//Wait for chaos interval
-			log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-			time.Sleep(time.Duration(experimentsDetails.ChaosInterval) * time.Second)
-
-			//Starting the EC2 instance
-			if experimentsDetails.ManagedNodegroup != "enable" {
-
-				for _, id := range instanceIDList {
-					log.Info("[Chaos]: Starting back the EC2 instance")
-					if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-				}
-
-				for _, id := range instanceIDList {
-					//Wait for ec2 instance to get in running state
-					log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id)
-					if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-				}
-			}
-			for _, id := range instanceIDList {
-				common.SetTargets(id, "reverted", "EC2", chaosDetails)
-			}
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-	return nil
-}
-
-// watching for the abort signal and revert the chaos
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, chaosDetails *types.ChaosDetails) {
-
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-	for _, id := range instanceIDList {
-		instanceState, err := awslib.GetEC2InstanceStatus(id, experimentsDetails.Region)
-		if err != nil {
-			log.Errorf("Failed to get instance status when an abort signal is received: %v", err)
-		}
-		if instanceState != "running" && experimentsDetails.ManagedNodegroup != "enable" {
-
-			log.Info("[Abort]: Waiting for the EC2 instance to get down")
-			if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-				log.Errorf("Unable to wait till stop of the instance: %v", err)
-			}
-
-			log.Info("[Abort]: Starting EC2 instance as abort signal received")
-			err := awslib.EC2Start(id, experimentsDetails.Region)
-			if err != nil {
-				log.Errorf("EC2 instance failed to start when an abort signal is received: %v", err)
-			}
-		}
-		common.SetTargets(id, "reverted", "EC2", chaosDetails)
-	}
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go b/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go
deleted file mode 100644
index 25e1b3a..0000000
--- a/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go
+++ /dev/null
@@ -1,287 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	awslib "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-tag/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-	"github.com/sirupsen/logrus"
-)
-
-var inject, abort chan os.Signal
-
-// PrepareEC2TerminateByTag contains the prepration and injection steps for the experiment
-func PrepareEC2TerminateByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	instanceIDList := common.FilterBasedOnPercentage(experimentsDetails.InstanceAffectedPerc, experimentsDetails.TargetInstanceIDList)
-	log.Infof("[Chaos]:Number of Instance targeted: %v", len(instanceIDList))
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(experimentsDetails, instanceIDList, chaosDetails)
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err := injectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err := injectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode will inject the ce2 instance termination in serial mode that is one after other
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instanceID list, %v", instanceIDList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//PowerOff the instance
-			for i, id := range instanceIDList {
-
-				//Stopping the EC2 instance
-				log.Info("[Chaos]: Stopping the desired EC2 instance")
-				if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-
-				common.SetTargets(id, "injected", "EC2", chaosDetails)
-
-				//Wait for ec2 instance to completely stop
-				log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id)
-				if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-
-				// run the probes during chaos
-				// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return stacktrace.Propagate(err, "failed to run probes")
-					}
-				}
-
-				//Wait for chaos interval
-				log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-				time.Sleep(time.Duration(experimentsDetails.ChaosInterval) * time.Second)
-
-				//Starting the EC2 instance
-				if experimentsDetails.ManagedNodegroup != "enable" {
-					log.Info("[Chaos]: Starting back the EC2 instance")
-					if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-
-					//Wait for ec2 instance to get in running state
-					log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id)
-					if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-				}
-				common.SetTargets(id, "reverted", "EC2", chaosDetails)
-			}
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode will inject the ce2 instance termination in parallel mode that is all at once
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-			log.Infof("[Info]: Target instanceID list, %v", instanceIDList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on ec2 instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//PowerOff the instance
-			for _, id := range instanceIDList {
-				//Stopping the EC2 instance
-				log.Info("[Chaos]: Stopping the desired EC2 instance")
-				if err := awslib.EC2Stop(id, experimentsDetails.Region); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-				common.SetTargets(id, "injected", "EC2", chaosDetails)
-			}
-
-			for _, id := range instanceIDList {
-				//Wait for ec2 instance to completely stop
-				log.Infof("[Wait]: Wait for EC2 instance '%v' to get in stopped state", id)
-				if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-					return stacktrace.Propagate(err, "ec2 instance failed to stop")
-				}
-			}
-
-			// run the probes during chaos
-			if len(resultDetails.ProbeDetails) != 0 {
-				if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return stacktrace.Propagate(err, "failed to run probes")
-				}
-			}
-
-			//Wait for chaos interval
-			log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-			time.Sleep(time.Duration(experimentsDetails.ChaosInterval) * time.Second)
-
-			//Starting the EC2 instance
-			if experimentsDetails.ManagedNodegroup != "enable" {
-
-				for _, id := range instanceIDList {
-					log.Info("[Chaos]: Starting back the EC2 instance")
-					if err := awslib.EC2Start(id, experimentsDetails.Region); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-				}
-
-				for _, id := range instanceIDList {
-					//Wait for ec2 instance to get in running state
-					log.Infof("[Wait]: Wait for EC2 instance '%v' to get in running state", id)
-					if err := awslib.WaitForEC2Up(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-						return stacktrace.Propagate(err, "ec2 instance failed to start")
-					}
-				}
-			}
-			for _, id := range instanceIDList {
-				common.SetTargets(id, "reverted", "EC2", chaosDetails)
-			}
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-	return nil
-}
-
-// SetTargetInstance will select the target instance which are in running state and filtered from the given instance tag
-func SetTargetInstance(experimentsDetails *experimentTypes.ExperimentDetails) error {
-
-	instanceIDList, err := awslib.GetInstanceList(experimentsDetails.InstanceTag, experimentsDetails.Region)
-	if err != nil {
-		return stacktrace.Propagate(err, "failed to get the instance id list")
-	}
-	if len(instanceIDList) == 0 {
-		return cerrors.Error{
-			ErrorCode: cerrors.ErrorTypeTargetSelection,
-			Reason:    fmt.Sprintf("no instance found with the given tag %v, in region %v", experimentsDetails.InstanceTag, experimentsDetails.Region),
-		}
-	}
-
-	for _, id := range instanceIDList {
-		instanceState, err := awslib.GetEC2InstanceStatus(id, experimentsDetails.Region)
-		if err != nil {
-			return stacktrace.Propagate(err, "failed to get the instance status while selecting the target instances")
-		}
-		if instanceState == "running" {
-			experimentsDetails.TargetInstanceIDList = append(experimentsDetails.TargetInstanceIDList, id)
-		}
-	}
-
-	if len(experimentsDetails.TargetInstanceIDList) == 0 {
-		return cerrors.Error{
-			ErrorCode: cerrors.ErrorTypeChaosInject,
-			Reason:    "failed to get any running instance",
-			Target:    fmt.Sprintf("EC2 Instance Tag: %v", experimentsDetails.InstanceTag)}
-	}
-
-	log.InfoWithValues("[Info]: Targeting the running instances filtered from instance tag", logrus.Fields{
-		"Total number of instances filtered":   len(instanceIDList),
-		"Number of running instances filtered": len(experimentsDetails.TargetInstanceIDList),
-	})
-	return nil
-}
-
-// watching for the abort signal and revert the chaos
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, chaosDetails *types.ChaosDetails) {
-
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-	for _, id := range instanceIDList {
-		instanceState, err := awslib.GetEC2InstanceStatus(id, experimentsDetails.Region)
-		if err != nil {
-			log.Errorf("Failed to get instance status when an abort signal is received: %v", err)
-		}
-		if instanceState != "running" && experimentsDetails.ManagedNodegroup != "enable" {
-
-			log.Info("[Abort]: Waiting for the EC2 instance to get down")
-			if err := awslib.WaitForEC2Down(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.ManagedNodegroup, experimentsDetails.Region, id); err != nil {
-				log.Errorf("Unable to wait till stop of the instance: %v", err)
-			}
-
-			log.Info("[Abort]: Starting EC2 instance as abort signal received")
-			err := awslib.EC2Start(id, experimentsDetails.Region)
-			if err != nil {
-				log.Errorf("EC2 instance failed to start when an abort signal is received: %v", err)
-			}
-		}
-		common.SetTargets(id, "reverted", "EC2", chaosDetails)
-	}
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go b/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go
deleted file mode 100644
index fb6dff6..0000000
--- a/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go
+++ /dev/null
@@ -1,303 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-	"google.golang.org/api/compute/v1"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareDiskVolumeLossByLabel contains the prepration and injection steps for the experiment
-func PrepareDiskVolumeLossByLabel(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	diskVolumeNamesList := common.FilterBasedOnPercentage(experimentsDetails.DiskAffectedPerc, experimentsDetails.TargetDiskVolumeNamesList)
-
-	if err := getDeviceNamesAndVMInstanceNames(diskVolumeNamesList, computeService, experimentsDetails); err != nil {
-		return err
-	}
-
-	select {
-
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-
-	default:
-		// watching for the abort signal and revert the chaos
-		go abortWatcher(computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, abort, chaosDetails)
-
-		switch strings.ToLower(experimentsDetails.Sequence) {
-		case "serial":
-			if err = injectChaosInSerialMode(computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in serial mode")
-			}
-		case "parallel":
-			if err = injectChaosInParallelMode(computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-			}
-		default:
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-		}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode will inject the disk loss chaos in serial mode which means one after the other
-func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, instanceNamesList []string, zone string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on VM instance"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		for i := range targetDiskVolumeNamesList {
-
-			//Detaching the disk volume from the instance
-			log.Info("[Chaos]: Detaching the disk volume from the instance")
-			if err = gcp.DiskVolumeDetach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i]); err != nil {
-				return stacktrace.Propagate(err, "disk detachment failed")
-			}
-
-			common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails)
-
-			//Wait for disk volume detachment
-			log.Infof("[Wait]: Wait for disk volume detachment for volume %v", targetDiskVolumeNamesList[i])
-			if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				return stacktrace.Propagate(err, "unable to detach the disk volume from the vm instance")
-			}
-
-			// run the probes during chaos
-			// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-			if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-				if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return err
-				}
-			}
-
-			//Wait for chaos duration
-			log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-			//Getting the disk volume attachment status
-			diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone)
-			if err != nil {
-				return stacktrace.Propagate(err, "failed to get the disk volume status")
-			}
-
-			switch diskState {
-			case "attached":
-				log.Info("[Skip]: The disk volume is already attached")
-			default:
-				//Attaching the disk volume to the instance
-				log.Info("[Chaos]: Attaching the disk volume back to the instance")
-				if err = gcp.DiskVolumeAttach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil {
-					return stacktrace.Propagate(err, "disk attachment failed")
-				}
-
-				//Wait for disk volume attachment
-				log.Infof("[Wait]: Wait for disk volume attachment for %v volume", targetDiskVolumeNamesList[i])
-				if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-					return stacktrace.Propagate(err, "unable to attach the disk volume to the vm instance")
-				}
-			}
-
-			common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails)
-		}
-
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-
-	return nil
-}
-
-// injectChaosInParallelMode will inject the disk loss chaos in parallel mode that means all at once
-func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, instanceNamesList []string, zone string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on vm instance"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		for i := range targetDiskVolumeNamesList {
-
-			//Detaching the disk volume from the instance
-			log.Info("[Chaos]: Detaching the disk volume from the instance")
-			if err = gcp.DiskVolumeDetach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i]); err != nil {
-				return stacktrace.Propagate(err, "disk detachment failed")
-			}
-
-			common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails)
-		}
-
-		for i := range targetDiskVolumeNamesList {
-
-			//Wait for disk volume detachment
-			log.Infof("[Wait]: Wait for disk volume detachment for volume %v", targetDiskVolumeNamesList[i])
-			if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				return stacktrace.Propagate(err, "unable to detach the disk volume from the vm instance")
-			}
-		}
-
-		// run the probes during chaos
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-				return err
-			}
-		}
-
-		//Wait for chaos interval
-		log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-		common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-		for i := range targetDiskVolumeNamesList {
-
-			//Getting the disk volume attachment status
-			diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone)
-			if err != nil {
-				return stacktrace.Propagate(err, "failed to get the disk status")
-			}
-
-			switch diskState {
-			case "attached":
-				log.Info("[Skip]: The disk volume is already attached")
-			default:
-				//Attaching the disk volume to the instance
-				log.Info("[Chaos]: Attaching the disk volume to the instance")
-				if err = gcp.DiskVolumeAttach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil {
-					return stacktrace.Propagate(err, "disk attachment failed")
-				}
-
-				//Wait for disk volume attachment
-				log.Infof("[Wait]: Wait for disk volume attachment for volume %v", targetDiskVolumeNamesList[i])
-				if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-					return stacktrace.Propagate(err, "unable to attach the disk volume to the vm instance")
-				}
-			}
-
-			common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails)
-		}
-
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-
-	return nil
-}
-
-// AbortWatcher will watching for the abort signal and revert the chaos
-func abortWatcher(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, instanceNamesList []string, zone string, abort chan os.Signal, chaosDetails *types.ChaosDetails) {
-
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-
-	for i := range targetDiskVolumeNamesList {
-
-		//Getting the disk volume attachment status
-		diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone)
-		if err != nil {
-			log.Errorf("Failed to get %s disk state when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err)
-		}
-
-		if diskState != "attached" {
-
-			//Wait for disk volume detachment
-			//We first wait for the volume to get in detached state then we are attaching it.
-			log.Infof("[Abort]: Wait for %s complete disk volume detachment", targetDiskVolumeNamesList[i])
-
-			if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, instanceNamesList[i], zone, experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				log.Errorf("Unable to detach %s disk volume, err: %v", targetDiskVolumeNamesList[i], err)
-			}
-
-			//Attaching the disk volume from the instance
-			log.Infof("[Chaos]: Attaching %s disk volume to the instance", targetDiskVolumeNamesList[i])
-
-			err = gcp.DiskVolumeAttach(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zone, experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i])
-			if err != nil {
-				log.Errorf("%s disk attachment failed when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err)
-			}
-		}
-
-		common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails)
-	}
-
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
-
-// getDeviceNamesAndVMInstanceNames fetches the device name and attached VM instance name for each target disk
-func getDeviceNamesAndVMInstanceNames(diskVolumeNamesList []string, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails) error {
-
-	for i := range diskVolumeNamesList {
-
-		instanceName, err := gcp.GetVolumeAttachmentDetails(computeService, experimentsDetails.GCPProjectID, experimentsDetails.Zones, diskVolumeNamesList[i])
-		if err != nil || instanceName == "" {
-			return stacktrace.Propagate(err, "failed to get the disk attachment info")
-		}
-
-		deviceName, err := gcp.GetDiskDeviceNameForVM(computeService, diskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones, instanceName)
-		if err != nil {
-			return stacktrace.Propagate(err, "failed to fetch the disk device name")
-		}
-
-		experimentsDetails.TargetDiskInstanceNamesList = append(experimentsDetails.TargetDiskInstanceNamesList, instanceName)
-		experimentsDetails.DeviceNamesList = append(experimentsDetails.DeviceNamesList, deviceName)
-	}
-
-	return nil
-}
diff --git a/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go b/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go
deleted file mode 100644
index 38f0690..0000000
--- a/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	gcp "github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-	"github.com/pkg/errors"
-	"google.golang.org/api/compute/v1"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareDiskVolumeLoss contains the prepration and injection steps for the experiment
-func PrepareDiskVolumeLoss(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//get the disk volume names list
-	diskNamesList := strings.Split(experimentsDetails.DiskVolumeNames, ",")
-
-	//get the disk zones list
-	diskZonesList := strings.Split(experimentsDetails.Zones, ",")
-
-	//get the device names for the given disks
-	if err := getDeviceNamesList(computeService, experimentsDetails, diskNamesList, diskZonesList); err != nil {
-		return stacktrace.Propagate(err, "failed to fetch the disk device names")
-	}
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-
-		// watching for the abort signal and revert the chaos
-		go abortWatcher(computeService, experimentsDetails, diskNamesList, diskZonesList, abort, chaosDetails)
-
-		switch strings.ToLower(experimentsDetails.Sequence) {
-		case "serial":
-			if err = injectChaosInSerialMode(computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in serial mode")
-			}
-		case "parallel":
-			if err = injectChaosInParallelMode(computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-			}
-		default:
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-		}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode will inject the disk loss chaos in serial mode which means one after the other
-func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, diskZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on VM instance"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-		for i := range targetDiskVolumeNamesList {
-
-			//Detaching the disk volume from the instance
-			log.Infof("[Chaos]: Detaching %s disk volume from the instance", targetDiskVolumeNamesList[i])
-			if err = gcp.DiskVolumeDetach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i]); err != nil {
-				return stacktrace.Propagate(err, "disk detachment failed")
-			}
-
-			common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails)
-
-			//Wait for disk volume detachment
-			log.Infof("[Wait]: Wait for %s disk volume detachment", targetDiskVolumeNamesList[i])
-			if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				return stacktrace.Propagate(err, "unable to detach disk volume from the vm instance")
-			}
-
-			// run the probes during chaos
-			// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-			if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-				if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return err
-				}
-			}
-
-			//Wait for chaos duration
-			log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-			//Getting the disk volume attachment status
-			diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i])
-			if err != nil {
-				return stacktrace.Propagate(err, fmt.Sprintf("failed to get %s disk volume status", targetDiskVolumeNamesList[i]))
-			}
-
-			switch diskState {
-			case "attached":
-				log.Infof("[Skip]: %s disk volume is already attached", targetDiskVolumeNamesList[i])
-			default:
-				//Attaching the disk volume to the instance
-				log.Infof("[Chaos]: Attaching %s disk volume back to the instance", targetDiskVolumeNamesList[i])
-				if err = gcp.DiskVolumeAttach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil {
-					return stacktrace.Propagate(err, "disk attachment failed")
-				}
-
-				//Wait for disk volume attachment
-				log.Infof("[Wait]: Wait for %s disk volume attachment", targetDiskVolumeNamesList[i])
-				if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-					return stacktrace.Propagate(err, "unable to attach disk volume to the vm instance")
-				}
-			}
-			common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails)
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	return nil
-}
-
-// injectChaosInParallelMode will inject the disk loss chaos in parallel mode that means all at once
-func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, diskZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on vm instance"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		for i := range targetDiskVolumeNamesList {
-
-			//Detaching the disk volume from the instance
-			log.Infof("[Chaos]: Detaching %s disk volume from the instance", targetDiskVolumeNamesList[i])
-			if err = gcp.DiskVolumeDetach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i]); err != nil {
-				return stacktrace.Propagate(err, "disk detachment failed")
-			}
-
-			common.SetTargets(targetDiskVolumeNamesList[i], "injected", "DiskVolume", chaosDetails)
-		}
-
-		for i := range targetDiskVolumeNamesList {
-
-			//Wait for disk volume detachment
-			log.Infof("[Wait]: Wait for %s disk volume detachment", targetDiskVolumeNamesList[i])
-			if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				return stacktrace.Propagate(err, "unable to detach disk volume from the vm instance")
-			}
-		}
-
-		// run the probes during chaos
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-				return err
-			}
-		}
-
-		//Wait for chaos interval
-		log.Infof("[Wait]: Waiting for the chaos interval of %vs", experimentsDetails.ChaosInterval)
-		common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-		for i := range targetDiskVolumeNamesList {
-
-			//Getting the disk volume attachment status
-			diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i])
-			if err != nil {
-				return errors.Errorf("failed to get the disk status, err: %v", err)
-			}
-
-			switch diskState {
-			case "attached":
-				log.Infof("[Skip]: %s disk volume is already attached", targetDiskVolumeNamesList[i])
-			default:
-				//Attaching the disk volume to the instance
-				log.Infof("[Chaos]: Attaching %s disk volume to the instance", targetDiskVolumeNamesList[i])
-				if err = gcp.DiskVolumeAttach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i]); err != nil {
-					return stacktrace.Propagate(err, "disk attachment failed")
-				}
-
-				//Wait for disk volume attachment
-				log.Infof("[Wait]: Wait for %s disk volume attachment", targetDiskVolumeNamesList[i])
-				if err = gcp.WaitForVolumeAttachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-					return stacktrace.Propagate(err, "unable to attach disk volume to the vm instance")
-				}
-			}
-			common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails)
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	return nil
-}
-
-// AbortWatcher will watching for the abort signal and revert the chaos
-func abortWatcher(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, diskZonesList []string, abort chan os.Signal, chaosDetails *types.ChaosDetails) {
-
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-
-	for i := range targetDiskVolumeNamesList {
-
-		//Getting the disk volume attachment status
-		diskState, err := gcp.GetDiskVolumeState(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i])
-		if err != nil {
-			log.Errorf("Failed to get %s disk state when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err)
-		}
-
-		if diskState != "attached" {
-
-			//Wait for disk volume detachment
-			//We first wait for the volume to get in detached state then we are attaching it.
-			log.Infof("[Abort]: Wait for complete disk volume detachment for %s", targetDiskVolumeNamesList[i])
-
-			if err = gcp.WaitForVolumeDetachment(computeService, targetDiskVolumeNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.TargetDiskInstanceNamesList[i], diskZonesList[i], experimentsDetails.Delay, experimentsDetails.Timeout); err != nil {
-				log.Errorf("Unable to detach %s disk volume, err: %v", targetDiskVolumeNamesList[i], err)
-			}
-
-			//Attaching the disk volume from the instance
-			log.Infof("[Chaos]: Attaching %s disk volume from the instance", targetDiskVolumeNamesList[i])
-
-			err = gcp.DiskVolumeAttach(computeService, experimentsDetails.TargetDiskInstanceNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.DeviceNamesList[i], targetDiskVolumeNamesList[i])
-			if err != nil {
-				log.Errorf("%s disk attachment failed when an abort signal is received, err: %v", targetDiskVolumeNamesList[i], err)
-			}
-		}
-
-		common.SetTargets(targetDiskVolumeNamesList[i], "reverted", "DiskVolume", chaosDetails)
-	}
-
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
-
-// getDeviceNamesList fetches the device names for the target disks
-func getDeviceNamesList(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, diskNamesList, diskZonesList []string) error {
-
-	for i := range diskNamesList {
-
-		deviceName, err := gcp.GetDiskDeviceNameForVM(computeService, diskNamesList[i], experimentsDetails.GCPProjectID, diskZonesList[i], experimentsDetails.TargetDiskInstanceNamesList[i])
-		if err != nil {
-			return err
-		}
-
-		experimentsDetails.DeviceNamesList = append(experimentsDetails.DeviceNamesList, deviceName)
-	}
-
-	return nil
-}
diff --git a/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go b/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go
deleted file mode 100644
index ca5eb4c..0000000
--- a/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go
+++ /dev/null
@@ -1,285 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	gcplib "github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-	"google.golang.org/api/compute/v1"
-)
-
-var inject, abort chan os.Signal
-
-// PrepareVMStopByLabel executes the experiment steps by injecting chaos into target VM instances
-func PrepareVMStopByLabel(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	instanceNamesList := common.FilterBasedOnPercentage(experimentsDetails.InstanceAffectedPerc, experimentsDetails.TargetVMInstanceNameList)
-	log.Infof("[Chaos]:Number of Instance targeted: %v", len(instanceNamesList))
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(computeService, experimentsDetails, instanceNamesList, chaosDetails)
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err := injectChaosInSerialMode(computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err := injectChaosInParallelMode(computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode stops VM instances in serial mode i.e. one after the other
-func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target VM instance list, %v", instanceNamesList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos in VM instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//Stop the instance
-			for i := range instanceNamesList {
-
-				//Stopping the VM instance
-				log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i])
-				if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-					return stacktrace.Propagate(err, "VM instance failed to stop")
-				}
-
-				common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails)
-
-				//Wait for VM instance to completely stop
-				log.Infof("[Wait]: Wait for VM instance %s to stop", instanceNamesList[i])
-				if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-					return stacktrace.Propagate(err, "vm instance failed to fully shutdown")
-				}
-
-				// run the probes during chaos
-				// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return err
-					}
-				}
-
-				// wait for the chaos interval
-				log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-				common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-				switch experimentsDetails.ManagedInstanceGroup {
-				case "enable":
-
-					// wait for VM instance to get in running state
-					log.Infof("[Wait]: Wait for VM instance %s to get in RUNNING state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-						return stacktrace.Propagate(err, "unable to start %s vm instance")
-					}
-
-				default:
-
-					// starting the VM instance
-					log.Infof("[Chaos]: Starting back %s VM instance", instanceNamesList[i])
-					if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-						return stacktrace.Propagate(err, "vm instance failed to start")
-					}
-
-					// wait for VM instance to get in running state
-					log.Infof("[Wait]: Wait for VM instance %s to get in RUNNING state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-						return stacktrace.Propagate(err, "unable to start %s vm instance")
-					}
-				}
-
-				common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-
-	return nil
-}
-
-// injectChaosInParallelMode will inject the VM instance termination in serial mode that is one after other
-func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target VM instance list, %v", instanceNamesList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos in VM instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			// power-off the instance
-			for i := range instanceNamesList {
-
-				// stopping the VM instance
-				log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i])
-				if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-					return stacktrace.Propagate(err, "vm instance failed to stop")
-				}
-
-				common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails)
-			}
-
-			for i := range instanceNamesList {
-
-				// wait for VM instance to completely stop
-				log.Infof("[Wait]: Wait for VM instance %s to get in stopped state", instanceNamesList[i])
-				if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-					return stacktrace.Propagate(err, "vm instance failed to fully shutdown")
-				}
-			}
-
-			// run the probes during chaos
-			if len(resultDetails.ProbeDetails) != 0 {
-				if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return err
-				}
-			}
-
-			// wait for chaos interval
-			log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-			switch experimentsDetails.ManagedInstanceGroup {
-			case "enable":
-
-				// wait for VM instance to get in running state
-				for i := range instanceNamesList {
-
-					log.Infof("[Wait]: Wait for VM instance '%v' to get in running state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-						return stacktrace.Propagate(err, "unable to start the vm instance")
-					}
-
-					common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-				}
-
-			default:
-
-				// starting the VM instance
-				for i := range instanceNamesList {
-
-					log.Info("[Chaos]: Starting back the VM instance")
-					if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-						return stacktrace.Propagate(err, "vm instance failed to start")
-					}
-				}
-
-				// wait for VM instance to get in running state
-				for i := range instanceNamesList {
-
-					log.Infof("[Wait]: Wait for VM instance '%v' to get in running state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-						return stacktrace.Propagate(err, "unable to start the vm instance")
-					}
-
-					common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-				}
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-
-	return nil
-}
-
-// abortWatcher watches for the abort signal and reverts the chaos
-func abortWatcher(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, chaosDetails *types.ChaosDetails) {
-
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-	for i := range instanceNamesList {
-		instanceState, err := gcplib.GetVMInstanceStatus(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones)
-		if err != nil {
-			log.Errorf("Failed to get %s instance status when an abort signal is received, err: %v", instanceNamesList[i], err)
-		}
-		if instanceState != "RUNNING" && experimentsDetails.ManagedInstanceGroup != "enable" {
-
-			log.Info("[Abort]: Waiting for the VM instance to shut down")
-			if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-				log.Errorf("Unable to wait till stop of %s instance, err: %v", instanceNamesList[i], err)
-			}
-
-			log.Info("[Abort]: Starting VM instance as abort signal received")
-			err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, experimentsDetails.Zones)
-			if err != nil {
-				log.Errorf("%s instance failed to start when an abort signal is received, err: %v", instanceNamesList[i], err)
-			}
-		}
-		common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-	}
-
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go b/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go
deleted file mode 100644
index 2cbcfdb..0000000
--- a/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	gcplib "github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-	"google.golang.org/api/compute/v1"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// PrepareVMStop contains the prepration and injection steps for the experiment
-func PrepareVMStop(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	// waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// get the instance name or list of instance names
-	instanceNamesList := strings.Split(experimentsDetails.VMInstanceName, ",")
-
-	// get the zone name or list of corresponding zones for the instances
-	instanceZonesList := strings.Split(experimentsDetails.Zones, ",")
-
-	go abortWatcher(computeService, experimentsDetails, instanceNamesList, instanceZonesList, chaosDetails)
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	// wait for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode stops VM instances in serial mode i.e. one after the other
-func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, instanceZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target instance list, %v", instanceNamesList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos in VM instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//Stop the instance
-			for i := range instanceNamesList {
-
-				//Stopping the VM instance
-				log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i])
-				if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-					return stacktrace.Propagate(err, "vm instance failed to stop")
-				}
-
-				common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails)
-
-				//Wait for VM instance to completely stop
-				log.Infof("[Wait]: Wait for VM instance %s to get in stopped state", instanceNamesList[i])
-				if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-					return stacktrace.Propagate(err, "vm instance failed to fully shutdown")
-				}
-
-				// run the probes during chaos
-				// the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return err
-					}
-				}
-
-				// wait for the chaos interval
-				log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-				common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-				switch experimentsDetails.ManagedInstanceGroup {
-				case "disable":
-
-					// starting the VM instance
-					log.Infof("[Chaos]: Starting back %s VM instance", instanceNamesList[i])
-					if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-						return stacktrace.Propagate(err, "vm instance failed to start")
-					}
-
-					// wait for VM instance to get in running state
-					log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-						return stacktrace.Propagate(err, "unable to start vm instance")
-					}
-
-				default:
-
-					// wait for VM instance to get in running state
-					log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-						return stacktrace.Propagate(err, "unable to start vm instance")
-					}
-				}
-
-				common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-
-	return nil
-}
-
-// injectChaosInParallelMode stops VM instances in parallel mode i.e. all at once
-func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, instanceZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target VM instance list, %v", instanceNamesList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos in VM instance"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			// power-off the instance
-			for i := range instanceNamesList {
-
-				// stopping the VM instance
-				log.Infof("[Chaos]: Stopping %s VM instance", instanceNamesList[i])
-				if err := gcplib.VMInstanceStop(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-					return stacktrace.Propagate(err, "vm instance failed to stop")
-				}
-
-				common.SetTargets(instanceNamesList[i], "injected", "VM", chaosDetails)
-			}
-
-			for i := range instanceNamesList {
-
-				// wait for VM instance to completely stop
-				log.Infof("[Wait]: Wait for VM instance %s to get in stopped state", instanceNamesList[i])
-				if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-					return stacktrace.Propagate(err, "vm instance failed to fully shutdown")
-				}
-			}
-
-			// run the probes during chaos
-			if len(resultDetails.ProbeDetails) != 0 {
-				if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return err
-				}
-			}
-
-			// wait for chaos interval
-			log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-			switch experimentsDetails.ManagedInstanceGroup {
-			case "disable":
-
-				// starting the VM instance
-				for i := range instanceNamesList {
-					log.Infof("[Chaos]: Starting back %s VM instance", instanceNamesList[i])
-					if err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-						return stacktrace.Propagate(err, "vm instance failed to start")
-					}
-				}
-
-				// wait for VM instance to get in running state
-				for i := range instanceNamesList {
-
-					log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-						return stacktrace.Propagate(err, "unable to start vm instance")
-					}
-
-					common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-				}
-
-			default:
-
-				// wait for VM instance to get in running state
-				for i := range instanceNamesList {
-
-					log.Infof("[Wait]: Wait for VM instance %s to get in running state", instanceNamesList[i])
-					if err := gcplib.WaitForVMInstanceUp(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, instanceZonesList[i]); err != nil {
-						return stacktrace.Propagate(err, "unable to start vm instance")
-					}
-
-					common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-				}
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-
-	return nil
-}
-
-// abortWatcher watches for the abort signal and reverts the chaos
-func abortWatcher(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, zonesList []string, chaosDetails *types.ChaosDetails) {
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-
-	if experimentsDetails.ManagedInstanceGroup != "enable" {
-
-		for i := range instanceNamesList {
-
-			instanceState, err := gcplib.GetVMInstanceStatus(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zonesList[i])
-			if err != nil {
-				log.Errorf("Failed to get %s vm instance status when an abort signal is received, err: %v", instanceNamesList[i], err)
-			}
-
-			if instanceState != "RUNNING" {
-
-				log.Infof("[Abort]: Waiting for %s VM instance to shut down", instanceNamesList[i])
-				if err := gcplib.WaitForVMInstanceDown(computeService, experimentsDetails.Timeout, experimentsDetails.Delay, instanceNamesList[i], experimentsDetails.GCPProjectID, zonesList[i]); err != nil {
-					log.Errorf("Unable to wait till stop of %s instance, err: %v", instanceNamesList[i], err)
-				}
-
-				log.Infof("[Abort]: Starting %s VM instance as abort signal is received", instanceNamesList[i])
-				err := gcplib.VMInstanceStart(computeService, instanceNamesList[i], experimentsDetails.GCPProjectID, zonesList[i])
-				if err != nil {
-					log.Errorf("%s VM instance failed to start when an abort signal is received, err: %v", instanceNamesList[i], err)
-				}
-			}
-
-			common.SetTargets(instanceNamesList[i], "reverted", "VM", chaosDetails)
-		}
-	}
-
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/http-chaos/helper/http-helper.go b/chaoslib/litmus/http-chaos/helper/http-helper.go
deleted file mode 100644
index 43f339d..0000000
--- a/chaoslib/litmus/http-chaos/helper/http-helper.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package helper
-
-import (
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/signal"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-// Helper injects the http chaos
-func Helper(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-	resultDetails := types.ResultDetails{}
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Fetching all the ENV passed for the helper pod
-	log.Info("[PreReq]: Getting the ENV variables")
-	getENV(&experimentsDetails)
-
-	// Initialise the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	// Initialise Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	err := prepareK8sHttpChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails)
-	if err != nil {
-		// update failstep inside chaosresult
-		if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil {
-			log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr)
-		}
-		log.Fatalf("helper pod failed, err: %v", err)
-	}
-}
-
-// prepareK8sHttpChaos contains the preparation steps before chaos injection
-func prepareK8sHttpChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error {
-
-	targetList, err := common.ParseTargets(chaosDetails.ChaosPodName)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not parse targets")
-	}
-
-	var targets []targetDetails
-
-	for _, t := range targetList.Target {
-		td := targetDetails{
-			Name:            t.Name,
-			Namespace:       t.Namespace,
-			TargetContainer: t.TargetContainer,
-			Source:          chaosDetails.ChaosPodName,
-		}
-
-		td.ContainerId, err = common.GetRuntimeBasedContainerID(experimentsDetails.ContainerRuntime, experimentsDetails.SocketPath, td.Name, td.Namespace, td.TargetContainer, clients, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container id")
-		}
-
-		// extract out the pid of the target container
-		td.Pid, err = common.GetPauseAndSandboxPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container pid")
-		}
-		targets = append(targets, td)
-	}
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(targets, resultDetails.Name, chaosDetails.ChaosNamespace, experimentsDetails)
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(1)
-	default:
-	}
-
-	for _, t := range targets {
-		// injecting http chaos inside target container
-		if err = injectChaos(experimentsDetails, t); err != nil {
-			return stacktrace.Propagate(err, "could not inject chaos")
-		}
-		log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-		if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil {
-			if revertErr := revertChaos(experimentsDetails, t); revertErr != nil {
-				return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())}
-			}
-			return stacktrace.Propagate(err, "could not annotate chaosresult")
-		}
-	}
-
-	// record the event inside chaosengine
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	log.Infof("[Chaos]: Waiting for %vs", experimentsDetails.ChaosDuration)
-
-	common.WaitForDuration(experimentsDetails.ChaosDuration)
-
-	log.Info("[Chaos]: chaos duration is over, reverting chaos")
-
-	var errList []string
-	for _, t := range targets {
-		// cleaning the ip rules process after chaos injection
-		err := revertChaos(experimentsDetails, t)
-		if err != nil {
-			errList = append(errList, err.Error())
-			continue
-		}
-		if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-			errList = append(errList, err.Error())
-		}
-	}
-
-	if len(errList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-	}
-	return nil
-}
-
-// injectChaos inject the http chaos in target container and add ruleset to the iptables to redirect the ports
-func injectChaos(experimentDetails *experimentTypes.ExperimentDetails, t targetDetails) error {
-	if err := startProxy(experimentDetails, t.Pid); err != nil {
-		killErr := killProxy(t.Pid, t.Source)
-		if killErr != nil {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(killErr).Error())}
-		}
-		return stacktrace.Propagate(err, "could not start proxy server")
-	}
-	if err := addIPRuleSet(experimentDetails, t.Pid); err != nil {
-		killErr := killProxy(t.Pid, t.Source)
-		if killErr != nil {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(killErr).Error())}
-		}
-		return stacktrace.Propagate(err, "could not add ip rules")
-	}
-	return nil
-}
-
-// revertChaos revert the http chaos in target container
-func revertChaos(experimentDetails *experimentTypes.ExperimentDetails, t targetDetails) error {
-
-	var errList []string
-
-	if err := removeIPRuleSet(experimentDetails, t.Pid); err != nil {
-		errList = append(errList, err.Error())
-	}
-
-	if err := killProxy(t.Pid, t.Source); err != nil {
-		errList = append(errList, err.Error())
-	}
-	if len(errList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-	}
-	log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-	return nil
-}
-
-// startProxy starts the proxy process inside the target container
-// it is using nsenter command to enter into network namespace of target container
-// and execute the proxy related command inside it.
-func startProxy(experimentDetails *experimentTypes.ExperimentDetails, pid int) error {
-
-	toxics := os.Getenv("TOXIC_COMMAND")
-
-	// starting toxiproxy server inside the target container
-	startProxyServerCommand := fmt.Sprintf("(sudo nsenter -t %d -n toxiproxy-server -host=0.0.0.0 > /dev/null 2>&1 &)", pid)
-	// Creating a proxy for the targeted service in the target container
-	createProxyCommand := fmt.Sprintf("(sudo nsenter -t %d -n toxiproxy-cli create -l 0.0.0.0:%d -u 0.0.0.0:%d proxy)", pid, experimentDetails.ProxyPort, experimentDetails.TargetServicePort)
-	createToxicCommand := fmt.Sprintf("(sudo nsenter -t %d -n toxiproxy-cli toxic add %s --toxicity %f proxy)", pid, toxics, float32(experimentDetails.Toxicity)/100.0)
-
-	// sleep 2 is added for proxy-server to be ready for creating proxy and adding toxics
-	chaosCommand := fmt.Sprintf("%s && sleep 2 && %s && %s", startProxyServerCommand, createProxyCommand, createToxicCommand)
-
-	log.Infof("[Chaos]: Starting proxy server")
-
-	if err := common.RunBashCommand(chaosCommand, "failed to start proxy server", experimentDetails.ChaosPodName); err != nil {
-		return err
-	}
-
-	log.Info("[Info]: Proxy started successfully")
-	return nil
-}
-
-const NoProxyToKill = "you need to specify whom to kill"
-
-// killProxy kills the proxy process inside the target container
-// it is using nsenter command to enter into network namespace of target container
-// and execute the proxy related command inside it.
-func killProxy(pid int, source string) error {
-	stopProxyServerCommand := fmt.Sprintf("sudo nsenter -t %d -n sudo kill -9 $(ps aux | grep [t]oxiproxy | awk 'FNR==1{print $1}')", pid)
-	log.Infof("[Chaos]: Stopping proxy server")
-
-	if err := common.RunBashCommand(stopProxyServerCommand, "failed to stop proxy server", source); err != nil {
-		return err
-	}
-
-	log.Info("[Info]: Proxy stopped successfully")
-	return nil
-}
-
-// addIPRuleSet adds the ip rule set to iptables in target container
-// it is using nsenter command to enter into network namespace of target container
-// and execute the iptables related command inside it.
-func addIPRuleSet(experimentDetails *experimentTypes.ExperimentDetails, pid int) error {
-	// it adds the proxy port REDIRECT iprule in the beginning of the PREROUTING table
-	// so that it always matches all the incoming packets for the matching target port filters and
-	// if matches then it redirect the request to the proxy port
-	addIPRuleSetCommand := fmt.Sprintf("(sudo nsenter -t %d -n iptables -t nat -I PREROUTING -i %v -p tcp --dport %d -j REDIRECT --to-port %d)", pid, experimentDetails.NetworkInterface, experimentDetails.TargetServicePort, experimentDetails.ProxyPort)
-	log.Infof("[Chaos]: Adding IPtables ruleset")
-
-	if err := common.RunBashCommand(addIPRuleSetCommand, "failed to add ip rules", experimentDetails.ChaosPodName); err != nil {
-		return err
-	}
-
-	log.Info("[Info]: IP rule set added successfully")
-	return nil
-}
-
-const NoIPRulesetToRemove = "No chain/target/match by that name"
-
-// removeIPRuleSet removes the ip rule set from iptables in target container
-// it is using nsenter command to enter into network namespace of target container
-// and execute the iptables related command inside it.
-func removeIPRuleSet(experimentDetails *experimentTypes.ExperimentDetails, pid int) error {
-	removeIPRuleSetCommand := fmt.Sprintf("sudo nsenter -t %d -n iptables -t nat -D PREROUTING -i %v -p tcp --dport %d -j REDIRECT --to-port %d", pid, experimentDetails.NetworkInterface, experimentDetails.TargetServicePort, experimentDetails.ProxyPort)
-	log.Infof("[Chaos]: Removing IPtables ruleset")
-
-	if err := common.RunBashCommand(removeIPRuleSetCommand, "failed to remove ip rules", experimentDetails.ChaosPodName); err != nil {
-		return err
-	}
-
-	log.Info("[Info]: IP rule set removed successfully")
-	return nil
-}
-
-// getENV fetches all the env variables from the runner pod
-func getENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", ""))
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.ContainerRuntime = types.Getenv("CONTAINER_RUNTIME", "")
-	experimentDetails.SocketPath = types.Getenv("SOCKET_PATH", "")
-	experimentDetails.NetworkInterface = types.Getenv("NETWORK_INTERFACE", "")
-	experimentDetails.TargetServicePort, _ = strconv.Atoi(types.Getenv("TARGET_SERVICE_PORT", ""))
-	experimentDetails.ProxyPort, _ = strconv.Atoi(types.Getenv("PROXY_PORT", ""))
-	experimentDetails.Toxicity, _ = strconv.Atoi(types.Getenv("TOXICITY", "100"))
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(targets []targetDetails, resultName, chaosNS string, experimentDetails *experimentTypes.ExperimentDetails) {
-
-	<-abort
-	log.Info("[Abort]: Killing process started because of terminated signal received")
-	log.Info("[Abort]: Chaos Revert Started")
-
-	retry := 3
-	for retry > 0 {
-		for _, t := range targets {
-			if err = revertChaos(experimentDetails, t); err != nil {
-				if strings.Contains(err.Error(), NoIPRulesetToRemove) && strings.Contains(err.Error(), NoProxyToKill) {
-					continue
-				}
-				log.Errorf("unable to revert for %v pod, err :%v", t.Name, err)
-				continue
-			}
-			if err = result.AnnotateChaosResult(resultName, chaosNS, "reverted", "pod", t.Name); err != nil {
-				log.Errorf("unable to annotate the chaosresult for %v pod, err :%v", t.Name, err)
-			}
-		}
-		retry--
-		time.Sleep(1 * time.Second)
-	}
-
-	log.Info("Chaos Revert Completed")
-	os.Exit(1)
-}
-
-type targetDetails struct {
-	Name            string
-	Namespace       string
-	TargetContainer string
-	ContainerId     string
-	Pid             int
-	Source          string
-}
diff --git a/chaoslib/litmus/http-chaos/lib/header/header.go b/chaoslib/litmus/http-chaos/lib/header/header.go
deleted file mode 100644
index 737efb2..0000000
--- a/chaoslib/litmus/http-chaos/lib/header/header.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package header
-
-import (
-	http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/sirupsen/logrus"
-)
-
-//PodHttpModifyHeaderChaos contains the steps to prepare and inject http modify header chaos
-func PodHttpModifyHeaderChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"Target Port":      experimentsDetails.TargetServicePort,
-		"Listen Port":      experimentsDetails.ProxyPort,
-		"Sequence":         experimentsDetails.Sequence,
-		"PodsAffectedPerc": experimentsDetails.PodsAffectedPerc,
-		"Toxicity":         experimentsDetails.Toxicity,
-		"Headers":          experimentsDetails.HeadersMap,
-		"Header Mode":      experimentsDetails.HeaderMode,
-	})
-
-	stream := "downstream"
-	if experimentsDetails.HeaderMode == "request" {
-		stream = "upstream"
-	}
-	args := "-t header --" + stream + " -a headers='" + (experimentsDetails.HeadersMap) + "' -a mode=" + experimentsDetails.HeaderMode
-	return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
diff --git a/chaoslib/litmus/http-chaos/lib/http-chaos.go b/chaoslib/litmus/http-chaos/lib/http-chaos.go
deleted file mode 100644
index 3d2b116..0000000
--- a/chaoslib/litmus/http-chaos/lib/http-chaos.go
+++ /dev/null
@@ -1,292 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-//PrepareAndInjectChaos contains the preparation & injection steps
-func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args string) error {
-
-	var err error
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-	//set up the tunables if provided in range
-	SetChaosTunables(experimentsDetails)
-
-	targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// Getting the serviceAccountName, need permission inside helper pod to create the events
-	if experimentsDetails.ChaosServiceAccount == "" {
-		experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not  experiment service account")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode inject the http chaos in all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, args string, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// creating the helper pod to perform http chaos
-	for _, pod := range targetPodList.Items {
-
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{
-			"PodName":       pod.Name,
-			"NodeName":      pod.Spec.NodeName,
-			"ContainerName": experimentsDetails.TargetContainer,
-		})
-
-		runID := stringutils.GetRunID()
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID, args); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-		//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-		log.Info("[Status]: Checking the status of the helper pods")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-
-		// Wait till the completion of the helper pod
-		// set an upper limit for the waiting time
-		log.Info("[Wait]: waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-		}
-
-		//Deleting all the helper pod for http chaos
-		log.Info("[Cleanup]: Deleting the helper pod")
-		if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-
-	return nil
-}
-
-// injectChaosInParallelMode inject the http chaos in all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, args string, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	runID := stringutils.GetRunID()
-	targets := common.FilterPodsForNodes(targetPodList, experimentsDetails.TargetContainer)
-
-	for node, tar := range targets {
-		var targetsPerNode []string
-		for _, k := range tar.Target {
-			targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer))
-		}
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-	//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-	log.Info("[Status]: Checking the status of the helper pods")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	// Wait till the completion of the helper pod
-	// set an upper limit for the waiting time
-	log.Info("[Wait]: waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-	}
-
-	// Deleting all the helper pod for http chaos
-	log.Info("[Cleanup]: Deleting all the helper pod")
-	if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID, args string) error {
-
-	privilegedEnable := true
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			HostPID:                       true,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			ServiceAccountName:            experimentsDetails.ChaosServiceAccount,
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			NodeName:                      nodeName,
-			Volumes: []apiv1.Volume{
-				{
-					Name: "cri-socket",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: experimentsDetails.SocketPath,
-						},
-					},
-				},
-			},
-
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"./helpers -name http-chaos",
-					},
-					Resources: chaosDetails.Resources,
-					Env:       getPodEnv(experimentsDetails, targets, args),
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "cri-socket",
-							MountPath: experimentsDetails.SocketPath,
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privilegedEnable,
-						Capabilities: &apiv1.Capabilities{
-							Add: []apiv1.Capability{
-								"NET_ADMIN",
-								"SYS_ADMIN",
-							},
-						},
-					},
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-// getPodEnv derive all the env required for the helper pod
-func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets, args string) []apiv1.EnvVar {
-
-	var envDetails common.ENVDetails
-	envDetails.SetEnv("TARGETS", targets).
-		SetEnv("TOTAL_CHAOS_DURATION", strconv.Itoa(experimentsDetails.ChaosDuration)).
-		SetEnv("CHAOS_NAMESPACE", experimentsDetails.ChaosNamespace).
-		SetEnv("CHAOSENGINE", experimentsDetails.EngineName).
-		SetEnv("CHAOS_UID", string(experimentsDetails.ChaosUID)).
-		SetEnv("CONTAINER_RUNTIME", experimentsDetails.ContainerRuntime).
-		SetEnv("EXPERIMENT_NAME", experimentsDetails.ExperimentName).
-		SetEnv("SOCKET_PATH", experimentsDetails.SocketPath).
-		SetEnv("TOXIC_COMMAND", args).
-		SetEnv("NETWORK_INTERFACE", experimentsDetails.NetworkInterface).
-		SetEnv("TARGET_SERVICE_PORT", strconv.Itoa(experimentsDetails.TargetServicePort)).
-		SetEnv("PROXY_PORT", strconv.Itoa(experimentsDetails.ProxyPort)).
-		SetEnv("TOXICITY", strconv.Itoa(experimentsDetails.Toxicity)).
-		SetEnvFromDownwardAPI("v1", "metadata.name")
-
-	return envDetails.ENV
-}
-
-//SetChaosTunables will set up a random value within a given range of values
-//If the value is not provided in range it'll set up the initial provided value.
-func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/http-chaos/lib/latency/latency.go b/chaoslib/litmus/http-chaos/lib/latency/latency.go
deleted file mode 100644
index df9c112..0000000
--- a/chaoslib/litmus/http-chaos/lib/latency/latency.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package latency
-
-import (
-	"strconv"
-
-	http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/sirupsen/logrus"
-)
-
-//PodHttpLatencyChaos contains the steps to prepare and inject http latency chaos
-func PodHttpLatencyChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"Target Port":      experimentsDetails.TargetServicePort,
-		"Listen Port":      experimentsDetails.ProxyPort,
-		"Sequence":         experimentsDetails.Sequence,
-		"PodsAffectedPerc": experimentsDetails.PodsAffectedPerc,
-		"Toxicity":         experimentsDetails.Toxicity,
-		"Latency":          experimentsDetails.Latency,
-	})
-
-	args := "-t latency -a latency=" + strconv.Itoa(experimentsDetails.Latency)
-	return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
diff --git a/chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go b/chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go
deleted file mode 100644
index bce1d26..0000000
--- a/chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package modifybody
-
-import (
-	"fmt"
-	"math"
-	"strings"
-
-	http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/sirupsen/logrus"
-)
-
-// PodHttpModifyBodyChaos contains the steps to prepare and inject http modify body chaos
-func PodHttpModifyBodyChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// responseBodyMaxLength defines the max length of response body string to be printed. It is taken as
-	// the min of length of body and 120 characters to avoid printing large response body.
-	responseBodyMaxLength := int(math.Min(float64(len(experimentsDetails.ResponseBody)), 120))
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"Target Port":      experimentsDetails.TargetServicePort,
-		"Listen Port":      experimentsDetails.ProxyPort,
-		"Sequence":         experimentsDetails.Sequence,
-		"PodsAffectedPerc": experimentsDetails.PodsAffectedPerc,
-		"Toxicity":         experimentsDetails.Toxicity,
-		"ResponseBody":     experimentsDetails.ResponseBody[0:responseBodyMaxLength],
-		"Content Type":     experimentsDetails.ContentType,
-		"Content Encoding": experimentsDetails.ContentEncoding,
-	})
-
-	args := fmt.Sprintf(
-		`-t modify_body -a body="%v" -a content_type=%v -a content_encoding=%v`,
-		EscapeQuotes(experimentsDetails.ResponseBody), experimentsDetails.ContentType, experimentsDetails.ContentEncoding)
-	return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
-
-// EscapeQuotes escapes the quotes in the given string
-func EscapeQuotes(input string) string {
-	output := strings.ReplaceAll(input, `\`, `\\`)
-	output = strings.ReplaceAll(output, `"`, `\"`)
-	return output
-}
diff --git a/chaoslib/litmus/http-chaos/lib/reset/reset.go b/chaoslib/litmus/http-chaos/lib/reset/reset.go
deleted file mode 100644
index 20838ca..0000000
--- a/chaoslib/litmus/http-chaos/lib/reset/reset.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package reset
-
-import (
-	"strconv"
-
-	http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/sirupsen/logrus"
-)
-
-//PodHttpResetPeerChaos contains the steps to prepare and inject http reset peer chaos
-func PodHttpResetPeerChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"Target Port":      experimentsDetails.TargetServicePort,
-		"Listen Port":      experimentsDetails.ProxyPort,
-		"Sequence":         experimentsDetails.Sequence,
-		"PodsAffectedPerc": experimentsDetails.PodsAffectedPerc,
-		"Toxicity":         experimentsDetails.Toxicity,
-		"Reset Timeout":    experimentsDetails.ResetTimeout,
-	})
-
-	args := "-t reset_peer -a timeout=" + strconv.Itoa(experimentsDetails.ResetTimeout)
-	return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
diff --git a/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go b/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go
deleted file mode 100644
index be541f7..0000000
--- a/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package statuscode
-
-import (
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"math"
-	"math/rand"
-	"strconv"
-	"strings"
-	"time"
-
-	http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib"
-	body "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/modify-body"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/sirupsen/logrus"
-)
-
-var acceptedStatusCodes = []string{
-	"200", "201", "202", "204",
-	"300", "301", "302", "304", "307",
-	"400", "401", "403", "404",
-	"500", "501", "502", "503", "504",
-}
-
-// PodHttpStatusCodeChaos contains the steps to prepare and inject http status code chaos
-func PodHttpStatusCodeChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// responseBodyMaxLength defines the max length of response body string to be printed. It is taken as
-	// the min of length of body and 120 characters to avoid printing large response body.
-	responseBodyMaxLength := int(math.Min(float64(len(experimentsDetails.ResponseBody)), 120))
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"Target Port":        experimentsDetails.TargetServicePort,
-		"Listen Port":        experimentsDetails.ProxyPort,
-		"Sequence":           experimentsDetails.Sequence,
-		"PodsAffectedPerc":   experimentsDetails.PodsAffectedPerc,
-		"Toxicity":           experimentsDetails.Toxicity,
-		"StatusCode":         experimentsDetails.StatusCode,
-		"ModifyResponseBody": experimentsDetails.ModifyResponseBody,
-		"ResponseBody":       experimentsDetails.ResponseBody[0:responseBodyMaxLength],
-		"Content Type":       experimentsDetails.ContentType,
-		"Content Encoding":   experimentsDetails.ContentEncoding,
-	})
-
-	args := fmt.Sprintf(
-		`-t status_code -a status_code=%s -a modify_response_body=%d -a response_body="%v" -a content_type=%s -a content_encoding=%s`,
-		experimentsDetails.StatusCode, stringBoolToInt(experimentsDetails.ModifyResponseBody), body.EscapeQuotes(experimentsDetails.ResponseBody),
-		experimentsDetails.ContentType, experimentsDetails.ContentEncoding)
-	return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
-
-// GetStatusCode performs two functions:
-// 1. It checks if the status code is provided or not. If it's not then it selects a random status code from supported list
-// 2. It checks if the provided status code is valid or not.
-func GetStatusCode(statusCode string) (string, error) {
-
-	if statusCode == "" {
-		log.Info("[Info]: No status code provided. Selecting a status code randomly from supported status codes")
-		return acceptedStatusCodes[rand.Intn(len(acceptedStatusCodes))], nil
-	}
-
-	statusCodeList := strings.Split(statusCode, ",")
-	rand.Seed(time.Now().Unix())
-	if len(statusCodeList) == 1 {
-		if checkStatusCode(statusCodeList[0], acceptedStatusCodes) {
-			return statusCodeList[0], nil
-		}
-	} else {
-		acceptedCodes := getAcceptedCodesInList(statusCodeList, acceptedStatusCodes)
-		if len(acceptedCodes) == 0 {
-			return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("invalid status code: %s", statusCode)}
-		}
-		return acceptedCodes[rand.Intn(len(acceptedCodes))], nil
-	}
-	return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("status code '%s' is not supported. Supported status codes are: %v", statusCode, acceptedStatusCodes)}
-}
-
-// getAcceptedCodesInList returns the list of accepted status codes from a list of status codes
-func getAcceptedCodesInList(statusCodeList []string, acceptedStatusCodes []string) []string {
-	var acceptedCodes []string
-	for _, statusCode := range statusCodeList {
-		if checkStatusCode(statusCode, acceptedStatusCodes) {
-			acceptedCodes = append(acceptedCodes, statusCode)
-		}
-	}
-	return acceptedCodes
-}
-
-// checkStatusCode checks if the provided status code is present in acceptedStatusCode list
-func checkStatusCode(statusCode string, acceptedStatusCodes []string) bool {
-	for _, code := range acceptedStatusCodes {
-		if code == statusCode {
-			return true
-		}
-	}
-	return false
-}
-
-// stringBoolToInt will convert boolean string to int
-func stringBoolToInt(b string) int {
-	parsedBool, err := strconv.ParseBool(b)
-	if err != nil {
-		return 0
-	}
-	if parsedBool {
-		return 1
-	}
-	return 0
-}
diff --git a/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go b/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go
deleted file mode 100644
index 9a78b18..0000000
--- a/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/workloads"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// PreparePodDelete contains the prepration steps before chaos injection
-func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.ChaoslibDetail.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.ChaoslibDetail.RampTime)
-		common.WaitForDuration(experimentsDetails.ChaoslibDetail.RampTime)
-	}
-
-	switch strings.ToLower(experimentsDetails.ChaoslibDetail.Sequence) {
-	case "serial":
-		if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.ChaoslibDetail.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.ChaoslibDetail.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.ChaoslibDetail.RampTime)
-		common.WaitForDuration(experimentsDetails.ChaoslibDetail.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode delete the kafka broker pods in serial mode(one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	GracePeriod := int64(0)
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaoslibDetail.ChaosDuration {
-		// Get the target pod details for the chaos execution
-		// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-		if experimentsDetails.KafkaBroker == "" && chaosDetails.AppDetail == nil {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or KAFKA_BROKER"}
-		}
-
-		podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.ChaoslibDetail.PodsAffectedPerc)
-		targetPodList, err := common.GetPodList(experimentsDetails.KafkaBroker, podsAffectedPerc, clients, chaosDetails)
-		if err != nil {
-			return err
-		}
-
-		// deriving the parent name of the target resources
-		for _, pod := range targetPodList.Items {
-			kind, parentName, err := workloads.GetPodOwnerTypeAndName(&pod, clients.DynamicClient)
-			if err != nil {
-				return err
-			}
-			common.SetParentName(parentName, kind, pod.Namespace, chaosDetails)
-		}
-		for _, target := range chaosDetails.ParentsResources {
-			common.SetTargets(target.Name, "targeted", target.Kind, chaosDetails)
-		}
-
-		if experimentsDetails.ChaoslibDetail.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		//Deleting the application pod
-		for _, pod := range targetPodList.Items {
-
-			log.InfoWithValues("[Info]: Killing the following pods", logrus.Fields{
-				"PodName": pod.Name})
-
-			if experimentsDetails.ChaoslibDetail.Force {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{GracePeriodSeconds: &GracePeriod})
-			} else {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{})
-			}
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())}
-			}
-
-			switch chaosDetails.Randomness {
-			case true:
-				if err := common.RandomInterval(experimentsDetails.ChaoslibDetail.ChaosInterval); err != nil {
-					return stacktrace.Propagate(err, "could not get random chaos interval")
-				}
-			default:
-				//Waiting for the chaos interval after chaos injection
-				if experimentsDetails.ChaoslibDetail.ChaosInterval != "" {
-					log.Infof("[Wait]: Wait for the chaos interval %vs", experimentsDetails.ChaoslibDetail.ChaosInterval)
-					waitTime, _ := strconv.Atoi(experimentsDetails.ChaoslibDetail.ChaosInterval)
-					common.WaitForDuration(waitTime)
-				}
-			}
-
-			//Verify the status of pod after the chaos injection
-			log.Info("[Status]: Verification for the recreation of application pod")
-			for _, parent := range chaosDetails.ParentsResources {
-				target := types.AppDetails{
-					Names:     []string{parent.Name},
-					Kind:      parent.Kind,
-					Namespace: parent.Namespace,
-				}
-				if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil {
-					return stacktrace.Propagate(err, "could not check pod statuses by workload names")
-				}
-			}
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-	log.Infof("[Completion]: %v chaos is done", experimentsDetails.ExperimentName)
-
-	return nil
-}
-
-// injectChaosInParallelMode delete the kafka broker pods in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	GracePeriod := int64(0)
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaoslibDetail.ChaosDuration {
-		// Get the target pod details for the chaos execution
-		// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-		if experimentsDetails.KafkaBroker == "" && chaosDetails.AppDetail == nil {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or KAFKA_BROKER"}
-		}
-		podsAffectedPerc, _ := strconv.Atoi(experimentsDetails.ChaoslibDetail.PodsAffectedPerc)
-		targetPodList, err := common.GetPodList(experimentsDetails.KafkaBroker, podsAffectedPerc, clients, chaosDetails)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get target pods")
-		}
-
-		// deriving the parent name of the target resources
-		for _, pod := range targetPodList.Items {
-			kind, parentName, err := workloads.GetPodOwnerTypeAndName(&pod, clients.DynamicClient)
-			if err != nil {
-				return stacktrace.Propagate(err, "could not get pod owner name and kind")
-			}
-			common.SetParentName(parentName, kind, pod.Namespace, chaosDetails)
-		}
-		for _, target := range chaosDetails.ParentsResources {
-			common.SetTargets(target.Name, "targeted", target.Kind, chaosDetails)
-		}
-
-		if experimentsDetails.ChaoslibDetail.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		//Deleting the application pod
-		for _, pod := range targetPodList.Items {
-
-			log.InfoWithValues("[Info]: Killing the following pods", logrus.Fields{
-				"PodName": pod.Name})
-
-			if experimentsDetails.ChaoslibDetail.Force {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{GracePeriodSeconds: &GracePeriod})
-			} else {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{})
-			}
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())}
-			}
-		}
-
-		switch chaosDetails.Randomness {
-		case true:
-			if err := common.RandomInterval(experimentsDetails.ChaoslibDetail.ChaosInterval); err != nil {
-				return stacktrace.Propagate(err, "could not get random chaos interval")
-			}
-		default:
-			//Waiting for the chaos interval after chaos injection
-			if experimentsDetails.ChaoslibDetail.ChaosInterval != "" {
-				log.Infof("[Wait]: Wait for the chaos interval %vs", experimentsDetails.ChaoslibDetail.ChaosInterval)
-				waitTime, _ := strconv.Atoi(experimentsDetails.ChaoslibDetail.ChaosInterval)
-				common.WaitForDuration(waitTime)
-			}
-		}
-
-		//Verify the status of pod after the chaos injection
-		log.Info("[Status]: Verification for the recreation of application pod")
-		for _, parent := range chaosDetails.ParentsResources {
-			target := types.AppDetails{
-				Names:     []string{parent.Name},
-				Kind:      parent.Kind,
-				Namespace: parent.Namespace,
-			}
-			if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil {
-				return stacktrace.Propagate(err, "could not check pod statuses by workload names")
-			}
-		}
-
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-
-	log.Infof("[Completion]: %v chaos is done", experimentsDetails.ExperimentName)
-
-	return nil
-}
diff --git a/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go b/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go
deleted file mode 100644
index 8de74c5..0000000
--- a/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/kubelet-service-kill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// PrepareKubeletKill contains prepration steps before chaos injection
-func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	var err error
-	if experimentsDetails.TargetNode == "" {
-		//Select node for kubelet-service-kill
-		experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get node name")
-		}
-	}
-
-	log.InfoWithValues("[Info]: Details of node under chaos injection", logrus.Fields{
-		"NodeName": experimentsDetails.TargetNode,
-	})
-
-	experimentsDetails.RunID = stringutils.GetRunID()
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + experimentsDetails.TargetNode + " node"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	// Creating the helper pod to perform node memory hog
-	if err = createHelperPod(experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil {
-		return stacktrace.Propagate(err, "could not create helper pod")
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-	//Checking the status of helper pod
-	log.Info("[Status]: Checking the status of the helper pod")
-	if err = status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	common.SetTargets(experimentsDetails.TargetNode, "targeted", "node", chaosDetails)
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return err
-		}
-	}
-
-	// Checking for the node to be in not-ready state
-	log.Info("[Status]: Check for the node to be in NotReady state")
-	if err = status.CheckNodeNotReadyState(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check for NOT READY state")
-	}
-
-	// Wait till the completion of helper pod
-	log.Info("[Wait]: Waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteHelperPodBasedOnJobCleanupPolicy(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-	}
-
-	//Deleting the helper pod
-	log.Info("[Cleanup]: Deleting the helper pod")
-	if err = common.DeletePod(experimentsDetails.ExperimentName+"-helper-"+experimentsDetails.RunID, appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod")
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appNodeName string) error {
-
-	privileged := true
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			Name:        experimentsDetails.ExperimentName + "-helper-" + experimentsDetails.RunID,
-			Namespace:   experimentsDetails.ChaosNamespace,
-			Labels:      common.GetHelperLabels(chaosDetails.Labels, experimentsDetails.RunID, experimentsDetails.ExperimentName),
-			Annotations: chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			NodeName:                      appNodeName,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			Volumes: []apiv1.Volume{
-				{
-					Name: "bus",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: "/var/run",
-						},
-					},
-				},
-				{
-					Name: "root",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: "/",
-						},
-					},
-				},
-			},
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"sleep 10 && systemctl stop kubelet && sleep " + strconv.Itoa(experimentsDetails.ChaosDuration) + " && systemctl start kubelet",
-					},
-					Resources: chaosDetails.Resources,
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "bus",
-							MountPath: "/var/run",
-						},
-						{
-							Name:      "root",
-							MountPath: "/node",
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privileged,
-					},
-					TTY: true,
-				},
-			},
-			Tolerations: []apiv1.Toleration{
-				{
-					Key:               "node.kubernetes.io/not-ready",
-					Operator:          apiv1.TolerationOperator("Exists"),
-					Effect:            apiv1.TaintEffect("NoExecute"),
-					TolerationSeconds: ptrint64(int64(experimentsDetails.ChaosDuration) + 60),
-				},
-				{
-					Key:               "node.kubernetes.io/unreachable",
-					Operator:          apiv1.TolerationOperator("Exists"),
-					Effect:            apiv1.TaintEffect("NoExecute"),
-					TolerationSeconds: ptrint64(int64(experimentsDetails.ChaosDuration) + 60),
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-func ptrint64(p int64) *int64 {
-	return &p
-}
diff --git a/chaoslib/litmus/network-chaos/helper/netem.go b/chaoslib/litmus/network-chaos/helper/netem.go
deleted file mode 100644
index c0193f5..0000000
--- a/chaoslib/litmus/network-chaos/helper/netem.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package helper
-
-import (
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/exec"
-	"os/signal"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-const (
-	qdiscNotFound    = "Cannot delete qdisc with handle of zero"
-	qdiscNoFileFound = "RTNETLINK answers: No such file or directory"
-)
-
-var (
-	err                                              error
-	inject, abort                                    chan os.Signal
-	sPorts, dPorts, whitelistDPorts, whitelistSPorts []string
-)
-
-// Helper injects the network chaos
-func Helper(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-	resultDetails := types.ResultDetails{}
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Fetching all the ENV passed for the helper pod
-	log.Info("[PreReq]: Getting the ENV variables")
-	getENV(&experimentsDetails)
-
-	// Initialise the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	// Initialise Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	err := preparePodNetworkChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails)
-	if err != nil {
-		// update failstep inside chaosresult
-		if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil {
-			log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr)
-		}
-		log.Fatalf("helper pod failed, err: %v", err)
-	}
-
-}
-
-//preparePodNetworkChaos contains the prepration steps before chaos injection
-func preparePodNetworkChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error {
-
-	targetEnv := os.Getenv("TARGETS")
-	if targetEnv == "" {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: "no target found, provide atleast one target"}
-	}
-
-	var targets []targetDetails
-
-	for _, t := range strings.Split(targetEnv, ";") {
-		target := strings.Split(t, ":")
-		if len(target) != 4 {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: fmt.Sprintf("unsupported target format: '%v'", targets)}
-		}
-		td := targetDetails{
-			Name:            target[0],
-			Namespace:       target[1],
-			TargetContainer: target[2],
-			DestinationIps:  getDestIps(target[3]),
-			Source:          chaosDetails.ChaosPodName,
-		}
-
-		td.ContainerId, err = common.GetRuntimeBasedContainerID(experimentsDetails.ContainerRuntime, experimentsDetails.SocketPath, td.Name, td.Namespace, td.TargetContainer, clients, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container id")
-		}
-
-		// extract out the pid of the target container
-		td.Pid, err = common.GetPauseAndSandboxPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container pid")
-		}
-
-		targets = append(targets, td)
-	}
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(targets, experimentsDetails.NetworkInterface, resultDetails.Name, chaosDetails.ChaosNamespace)
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(1)
-	default:
-	}
-
-	for _, t := range targets {
-		// injecting network chaos inside target container
-		if err = injectChaos(experimentsDetails.NetworkInterface, t); err != nil {
-			return stacktrace.Propagate(err, "could not inject chaos")
-		}
-		log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-		if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil {
-			if _, revertErr := killnetem(t, experimentsDetails.NetworkInterface); err != nil {
-				return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())}
-			}
-			return stacktrace.Propagate(err, "could not annotate chaosresult")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := "Injected " + experimentsDetails.ExperimentName + " chaos on application pods"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	log.Infof("[Chaos]: Waiting for %vs", experimentsDetails.ChaosDuration)
-
-	common.WaitForDuration(experimentsDetails.ChaosDuration)
-
-	log.Info("[Chaos]: duration is over, reverting chaos")
-
-	var errList []string
-	for _, t := range targets {
-		// cleaning the netem process after chaos injection
-		killed, err := killnetem(t, experimentsDetails.NetworkInterface)
-		if !killed && err != nil {
-			errList = append(errList, err.Error())
-			continue
-		}
-		if killed && err == nil {
-			if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-				errList = append(errList, err.Error())
-			}
-		}
-	}
-
-	if len(errList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-	}
-	return nil
-}
-
-// injectChaos inject the network chaos in target container
-// it is using nsenter command to enter into network namespace of target container
-// and execute the netem command inside it.
-func injectChaos(netInterface string, target targetDetails) error {
-
-	netemCommands := os.Getenv("NETEM_COMMAND")
-
-	if len(target.DestinationIps) == 0 && len(sPorts) == 0 && len(dPorts) == 0 && len(whitelistDPorts) == 0 && len(whitelistSPorts) == 0 {
-		tc := fmt.Sprintf("sudo nsenter -t %d -n tc qdisc replace dev %s root netem %v", target.Pid, netInterface, netemCommands)
-		log.Info(tc)
-		if err := common.RunBashCommand(tc, "failed to create tc rules", target.Source); err != nil {
-			return err
-		}
-	} else {
-
-		// Create a priority-based queue
-		// This instantly creates classes 1:1, 1:2, 1:3
-		priority := fmt.Sprintf("sudo nsenter -t %v -n tc qdisc replace dev %v root handle 1: prio", target.Pid, netInterface)
-		log.Info(priority)
-		if err := common.RunBashCommand(priority, "failed to create priority-based queue", target.Source); err != nil {
-			return err
-		}
-
-		// Add queueing discipline for 1:3 class.
-		// No traffic is going through 1:3 yet
-		traffic := fmt.Sprintf("sudo nsenter -t %v -n tc qdisc replace dev %v parent 1:3 netem %v", target.Pid, netInterface, netemCommands)
-		log.Info(traffic)
-		if err := common.RunBashCommand(traffic, "failed to create netem queueing discipline", target.Source); err != nil {
-			return err
-		}
-
-		if len(whitelistDPorts) != 0 || len(whitelistSPorts) != 0 {
-			for _, port := range whitelistDPorts {
-				//redirect traffic to specific dport through band 2
-				tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 2 u32 match ip dport %v 0xffff flowid 1:2", target.Pid, netInterface, port)
-				log.Info(tc)
-				if err := common.RunBashCommand(tc, "failed to create whitelist dport match filters", target.Source); err != nil {
-					return err
-				}
-			}
-
-			for _, port := range whitelistSPorts {
-				//redirect traffic to specific sport through band 2
-				tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 2 u32 match ip sport %v 0xffff flowid 1:2", target.Pid, netInterface, port)
-				log.Info(tc)
-				if err := common.RunBashCommand(tc, "failed to create whitelist sport match filters", target.Source); err != nil {
-					return err
-				}
-			}
-
-			tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip dst 0.0.0.0/0 flowid 1:3", target.Pid, netInterface)
-			log.Info(tc)
-			if err := common.RunBashCommand(tc, "failed to create rule for all ports match filters", target.Source); err != nil {
-				return err
-			}
-		} else {
-
-			for _, ip := range target.DestinationIps {
-				// redirect traffic to specific IP through band 3
-				tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip dst %v flowid 1:3", target.Pid, netInterface, ip)
-				if strings.Contains(ip, ":") {
-					tc = fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip6 dst %v flowid 1:3", target.Pid, netInterface, ip)
-				}
-				log.Info(tc)
-				if err := common.RunBashCommand(tc, "failed to create destination ips match filters", target.Source); err != nil {
-					return err
-				}
-			}
-
-			for _, port := range sPorts {
-				//redirect traffic to specific sport through band 3
-				tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip sport %v 0xffff flowid 1:3", target.Pid, netInterface, port)
-				log.Info(tc)
-				if err := common.RunBashCommand(tc, "failed to create source ports match filters", target.Source); err != nil {
-					return err
-				}
-			}
-
-			for _, port := range dPorts {
-				//redirect traffic to specific dport through band 3
-				tc := fmt.Sprintf("sudo nsenter -t %v -n tc filter add dev %v protocol ip parent 1:0 prio 3 u32 match ip dport %v 0xffff flowid 1:3", target.Pid, netInterface, port)
-				log.Info(tc)
-				if err := common.RunBashCommand(tc, "failed to create destination ports match filters", target.Source); err != nil {
-					return err
-				}
-			}
-		}
-	}
-
-	log.Infof("chaos injected successfully on {pod: %v, container: %v}", target.Name, target.TargetContainer)
-	return nil
-}
-
-// killnetem kill the netem process for all the target containers
-func killnetem(target targetDetails, networkInterface string) (bool, error) {
-
-	tc := fmt.Sprintf("sudo nsenter -t %d -n tc qdisc delete dev %s root", target.Pid, networkInterface)
-	cmd := exec.Command("/bin/bash", "-c", tc)
-	out, err := cmd.CombinedOutput()
-
-	if err != nil {
-		log.Info(cmd.String())
-		// ignoring err if qdisc process doesn't exist inside the target container
-		if strings.Contains(string(out), qdiscNotFound) || strings.Contains(string(out), qdiscNoFileFound) {
-			log.Warn("The network chaos process has already been removed")
-			return true, err
-		}
-		log.Error(err.Error())
-		return false, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: target.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", target.Name, target.Namespace, target.TargetContainer), Reason: fmt.Sprintf("failed to revert network faults: %s", string(out))}
-	}
-	log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", target.Name, target.Namespace, target.TargetContainer)
-	return true, nil
-}
-
-type targetDetails struct {
-	Name            string
-	Namespace       string
-	ServiceMesh     string
-	DestinationIps  []string
-	TargetContainer string
-	ContainerId     string
-	Pid             int
-	Source          string
-}
-
-//getENV fetches all the env variables from the runner pod
-func getENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", ""))
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.ContainerRuntime = types.Getenv("CONTAINER_RUNTIME", "")
-	experimentDetails.NetworkInterface = types.Getenv("NETWORK_INTERFACE", "")
-	experimentDetails.SocketPath = types.Getenv("SOCKET_PATH", "")
-	experimentDetails.DestinationIPs = types.Getenv("DESTINATION_IPS", "")
-	experimentDetails.SourcePorts = types.Getenv("SOURCE_PORTS", "")
-	experimentDetails.DestinationPorts = types.Getenv("DESTINATION_PORTS", "")
-
-	if strings.TrimSpace(experimentDetails.DestinationPorts) != "" {
-		if strings.Contains(experimentDetails.DestinationPorts, "!") {
-			whitelistDPorts = strings.Split(strings.TrimPrefix(strings.TrimSpace(experimentDetails.DestinationPorts), "!"), ",")
-		} else {
-			dPorts = strings.Split(strings.TrimSpace(experimentDetails.DestinationPorts), ",")
-		}
-	}
-	if strings.TrimSpace(experimentDetails.SourcePorts) != "" {
-		if strings.Contains(experimentDetails.SourcePorts, "!") {
-			whitelistSPorts = strings.Split(strings.TrimPrefix(strings.TrimSpace(experimentDetails.SourcePorts), "!"), ",")
-		} else {
-			sPorts = strings.Split(strings.TrimSpace(experimentDetails.SourcePorts), ",")
-		}
-	}
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(targets []targetDetails, networkInterface, resultName, chaosNS string) {
-
-	<-abort
-	log.Info("[Chaos]: Killing process started because of terminated signal received")
-	log.Info("Chaos Revert Started")
-	// retry thrice for the chaos revert
-	retry := 3
-	for retry > 0 {
-		for _, t := range targets {
-			killed, err := killnetem(t, networkInterface)
-			if err != nil && !killed {
-				log.Errorf("unable to kill netem process, err :%v", err)
-				continue
-			}
-			if killed && err == nil {
-				if err = result.AnnotateChaosResult(resultName, chaosNS, "reverted", "pod", t.Name); err != nil {
-					log.Errorf("unable to annotate the chaosresult, err :%v", err)
-				}
-			}
-		}
-		retry--
-		time.Sleep(1 * time.Second)
-	}
-	log.Info("Chaos Revert Completed")
-	os.Exit(1)
-}
-func getDestIps(serviceMesh string) []string {
-	var (
-		destIps   = os.Getenv("DESTINATION_IPS")
-		uniqueIps []string
-	)
-
-	if serviceMesh == "true" {
-		destIps = os.Getenv("DESTINATION_IPS_SERVICE_MESH")
-	}
-
-	if strings.TrimSpace(destIps) == "" {
-		return nil
-	}
-
-	ips := strings.Split(strings.TrimSpace(destIps), ",")
-
-	// removing duplicates ips from the list, if any
-	for i := range ips {
-		if !common.Contains(ips[i], uniqueIps) {
-			uniqueIps = append(uniqueIps, ips[i])
-		}
-	}
-
-	return uniqueIps
-}
diff --git a/chaoslib/litmus/network-chaos/lib/corruption/corruption.go b/chaoslib/litmus/network-chaos/lib/corruption/corruption.go
deleted file mode 100644
index 78974f0..0000000
--- a/chaoslib/litmus/network-chaos/lib/corruption/corruption.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package corruption
-
-import (
-	network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-//PodNetworkCorruptionChaos contains the steps to prepare and inject chaos
-func PodNetworkCorruptionChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	args := "corrupt " + experimentsDetails.NetworkPacketCorruptionPercentage
-	return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
diff --git a/chaoslib/litmus/network-chaos/lib/duplication/duplication.go b/chaoslib/litmus/network-chaos/lib/duplication/duplication.go
deleted file mode 100644
index a5705f0..0000000
--- a/chaoslib/litmus/network-chaos/lib/duplication/duplication.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package duplication
-
-import (
-	network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-//PodNetworkDuplicationChaos contains the steps to prepare and inject chaos
-func PodNetworkDuplicationChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	args := "duplicate " + experimentsDetails.NetworkPacketDuplicationPercentage
-	return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
diff --git a/chaoslib/litmus/network-chaos/lib/latency/latency.go b/chaoslib/litmus/network-chaos/lib/latency/latency.go
deleted file mode 100644
index c5482f1..0000000
--- a/chaoslib/litmus/network-chaos/lib/latency/latency.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package latency
-
-import (
-	"strconv"
-
-	network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-//PodNetworkLatencyChaos contains the steps to prepare and inject chaos
-func PodNetworkLatencyChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	args := "delay " + strconv.Itoa(experimentsDetails.NetworkLatency) + "ms " + strconv.Itoa(experimentsDetails.Jitter) + "ms"
-	return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
diff --git a/chaoslib/litmus/network-chaos/lib/loss/loss.go b/chaoslib/litmus/network-chaos/lib/loss/loss.go
deleted file mode 100644
index 419f37d..0000000
--- a/chaoslib/litmus/network-chaos/lib/loss/loss.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package loss
-
-import (
-	network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-//PodNetworkLossChaos contains the steps to prepare and inject chaos
-func PodNetworkLossChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	args := "loss " + experimentsDetails.NetworkPacketLossPercentage
-	return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args)
-}
diff --git a/chaoslib/litmus/network-chaos/lib/network-chaos.go b/chaoslib/litmus/network-chaos/lib/network-chaos.go
deleted file mode 100644
index e956e34..0000000
--- a/chaoslib/litmus/network-chaos/lib/network-chaos.go
+++ /dev/null
@@ -1,514 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"net"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	k8serrors "k8s.io/apimachinery/pkg/api/errors"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var serviceMesh = []string{"istio", "envoy"}
-var destIpsSvcMesh string
-var destIps string
-
-//PrepareAndInjectChaos contains the preparation & injection steps
-func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args string) error {
-
-	var err error
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-	//set up the tunables if provided in range
-	SetChaosTunables(experimentsDetails)
-	logExperimentFields(experimentsDetails)
-
-	targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// Getting the serviceAccountName, need permission inside helper pod to create the events
-	if experimentsDetails.ChaosServiceAccount == "" {
-		experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not  experiment service account")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode inject the network chaos in all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// creating the helper pod to perform network chaos
-	for _, pod := range targetPodList.Items {
-
-		serviceMesh, err := setDestIps(pod, experimentsDetails, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not set destination ips")
-		}
-
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		runID := stringutils.GetRunID()
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer, serviceMesh), pod.Spec.NodeName, runID, args); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-		//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-		log.Info("[Status]: Checking the status of the helper pods")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-
-		// Wait till the completion of the helper pod
-		// set an upper limit for the waiting time
-		log.Info("[Wait]: waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-		}
-
-		//Deleting all the helper pod for network chaos
-		log.Info("[Cleanup]: Deleting the helper pod")
-		if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-
-	return nil
-}
-
-// injectChaosInParallelMode inject the network chaos in all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-	var err error
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	targets, err := filterPodsForNodes(targetPodList, experimentsDetails, clients)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not filter target pods")
-	}
-
-	runID := stringutils.GetRunID()
-
-	for node, tar := range targets {
-		var targetsPerNode []string
-		for _, k := range tar.Target {
-			targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s:%s", k.Name, k.Namespace, k.TargetContainer, k.ServiceMesh))
-		}
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-	//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-	log.Info("[Status]: Checking the status of the helper pods")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	// Wait till the completion of the helper pod
-	// set an upper limit for the waiting time
-	log.Info("[Wait]: waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-	}
-
-	//Deleting all the helper pod for container-kill chaos
-	log.Info("[Cleanup]: Deleting all the helper pod")
-	if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets string, nodeName, runID, args string) error {
-
-	privilegedEnable := true
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			HostPID:                       true,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			ServiceAccountName:            experimentsDetails.ChaosServiceAccount,
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			NodeName:                      nodeName,
-			Volumes: []apiv1.Volume{
-				{
-					Name: "cri-socket",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: experimentsDetails.SocketPath,
-						},
-					},
-				},
-			},
-
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"./helpers -name network-chaos",
-					},
-					Resources: chaosDetails.Resources,
-					Env:       getPodEnv(experimentsDetails, targets, args),
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "cri-socket",
-							MountPath: experimentsDetails.SocketPath,
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privilegedEnable,
-						Capabilities: &apiv1.Capabilities{
-							Add: []apiv1.Capability{
-								"NET_ADMIN",
-								"SYS_ADMIN",
-							},
-						},
-					},
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-// getPodEnv derive all the env required for the helper pod
-func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string, args string) []apiv1.EnvVar {
-
-	var envDetails common.ENVDetails
-	envDetails.SetEnv("TARGETS", targets).
-		SetEnv("TOTAL_CHAOS_DURATION", strconv.Itoa(experimentsDetails.ChaosDuration)).
-		SetEnv("CHAOS_NAMESPACE", experimentsDetails.ChaosNamespace).
-		SetEnv("CHAOSENGINE", experimentsDetails.EngineName).
-		SetEnv("CHAOS_UID", string(experimentsDetails.ChaosUID)).
-		SetEnv("CONTAINER_RUNTIME", experimentsDetails.ContainerRuntime).
-		SetEnv("NETEM_COMMAND", args).
-		SetEnv("NETWORK_INTERFACE", experimentsDetails.NetworkInterface).
-		SetEnv("EXPERIMENT_NAME", experimentsDetails.ExperimentName).
-		SetEnv("SOCKET_PATH", experimentsDetails.SocketPath).
-		SetEnv("INSTANCE_ID", experimentsDetails.InstanceID).
-		SetEnv("DESTINATION_IPS", destIps).
-		SetEnv("DESTINATION_IPS_SERVICE_MESH", destIpsSvcMesh).
-		SetEnv("SOURCE_PORTS", experimentsDetails.SourcePorts).
-		SetEnv("DESTINATION_PORTS", experimentsDetails.DestinationPorts).
-		SetEnvFromDownwardAPI("v1", "metadata.name")
-
-	return envDetails.ENV
-}
-
-type targetsDetails struct {
-	Target []target
-}
-
-type target struct {
-	Namespace       string
-	Name            string
-	TargetContainer string
-	ServiceMesh     string
-}
-
-// GetTargetIps return the comma separated target ips
-// It fetches the ips from the target ips (if defined by users)
-// it appends the ips from the host, if target host is provided
-func GetTargetIps(targetIPs, targetHosts string, clients clients.ClientSets, serviceMesh bool) (string, error) {
-
-	ipsFromHost, err := getIpsForTargetHosts(targetHosts, clients, serviceMesh)
-	if err != nil {
-		return "", stacktrace.Propagate(err, "could not get ips from target hosts")
-	}
-	if targetIPs == "" {
-		targetIPs = ipsFromHost
-	} else if ipsFromHost != "" {
-		targetIPs = targetIPs + "," + ipsFromHost
-	}
-	return targetIPs, nil
-}
-
-// it derives the pod ips from the kubernetes service
-func getPodIPFromService(host string, clients clients.ClientSets) ([]string, error) {
-	var ips []string
-	svcFields := strings.Split(host, ".")
-	if len(svcFields) != 5 {
-		return ips, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{host: %s}", host), Reason: "provide the valid FQDN for service in '<svc-name>.<namespace>.svc.cluster.local format"}
-	}
-	svcName, svcNs := svcFields[0], svcFields[1]
-	svc, err := clients.KubeClient.CoreV1().Services(svcNs).Get(context.Background(), svcName, v1.GetOptions{})
-	if err != nil {
-		if k8serrors.IsForbidden(err) {
-			log.Warnf("forbidden - failed to get %v service in %v namespace, err: %v", svcName, svcNs, err)
-			return ips, nil
-		}
-		return ips, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{serviceName: %s, namespace: %s}", svcName, svcNs), Reason: err.Error()}
-	}
-
-	if svc.Spec.Selector == nil {
-		return nil, nil
-	}
-	var svcSelector string
-	for k, v := range svc.Spec.Selector {
-		if svcSelector == "" {
-			svcSelector += fmt.Sprintf("%s=%s", k, v)
-			continue
-		}
-		svcSelector += fmt.Sprintf(",%s=%s", k, v)
-	}
-
-	pods, err := clients.KubeClient.CoreV1().Pods(svcNs).List(context.Background(), v1.ListOptions{LabelSelector: svcSelector})
-	if err != nil {
-		return ips, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{svcName: %s,podLabel: %s, namespace: %s}", svcNs, svcSelector, svcNs), Reason: fmt.Sprintf("failed to derive pods from service: %s", err.Error())}
-	}
-	for _, p := range pods.Items {
-		ips = append(ips, p.Status.PodIP)
-	}
-
-	return ips, nil
-}
-
-// getIpsForTargetHosts resolves IP addresses for comma-separated list of target hosts and returns comma-separated ips
-func getIpsForTargetHosts(targetHosts string, clients clients.ClientSets, serviceMesh bool) (string, error) {
-	if targetHosts == "" {
-		return "", nil
-	}
-	hosts := strings.Split(targetHosts, ",")
-	finalHosts := ""
-	var commaSeparatedIPs []string
-	for i := range hosts {
-		hosts[i] = strings.TrimSpace(hosts[i])
-		if strings.Contains(hosts[i], "svc.cluster.local") && serviceMesh {
-			ips, err := getPodIPFromService(hosts[i], clients)
-			if err != nil {
-				return "", stacktrace.Propagate(err, "could not get pod ips from service")
-			}
-			log.Infof("Host: {%v}, IP address: {%v}", hosts[i], ips)
-			commaSeparatedIPs = append(commaSeparatedIPs, ips...)
-			if finalHosts == "" {
-				finalHosts = hosts[i]
-			} else {
-				finalHosts = finalHosts + "," + hosts[i]
-			}
-			finalHosts = finalHosts + "," + hosts[i]
-			continue
-		}
-		ips, err := net.LookupIP(hosts[i])
-		if err != nil {
-			log.Warnf("Unknown host: {%v}, it won't be included in the scope of chaos", hosts[i])
-		} else {
-			for j := range ips {
-				log.Infof("Host: {%v}, IP address: {%v}", hosts[i], ips[j])
-				commaSeparatedIPs = append(commaSeparatedIPs, ips[j].String())
-			}
-			if finalHosts == "" {
-				finalHosts = hosts[i]
-			} else {
-				finalHosts = finalHosts + "," + hosts[i]
-			}
-		}
-	}
-	if len(commaSeparatedIPs) == 0 {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("hosts: %s", targetHosts), Reason: "provided hosts are invalid, unable to resolve"}
-	}
-	log.Infof("Injecting chaos on {%v} hosts", finalHosts)
-	return strings.Join(commaSeparatedIPs, ","), nil
-}
-
-//SetChaosTunables will set up a random value within a given range of values
-//If the value is not provided in range it'll set up the initial provided value.
-func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.NetworkPacketLossPercentage = common.ValidateRange(experimentsDetails.NetworkPacketLossPercentage)
-	experimentsDetails.NetworkPacketCorruptionPercentage = common.ValidateRange(experimentsDetails.NetworkPacketCorruptionPercentage)
-	experimentsDetails.NetworkPacketDuplicationPercentage = common.ValidateRange(experimentsDetails.NetworkPacketDuplicationPercentage)
-	experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
-
-// It checks if pod contains service mesh sidecar
-func isServiceMeshEnabledForPod(pod apiv1.Pod) bool {
-	for _, c := range pod.Spec.Containers {
-		if common.SubStringExistsInSlice(c.Name, serviceMesh) {
-			return true
-		}
-	}
-	return false
-}
-
-func setDestIps(pod apiv1.Pod, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, error) {
-	var err error
-	if isServiceMeshEnabledForPod(pod) {
-		if destIpsSvcMesh == "" {
-			destIpsSvcMesh, err = GetTargetIps(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, clients, true)
-			if err != nil {
-				return "false", err
-			}
-		}
-		return "true", nil
-	}
-	if destIps == "" {
-		destIps, err = GetTargetIps(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, clients, false)
-		if err != nil {
-			return "false", err
-		}
-	}
-	return "false", nil
-}
-
-func filterPodsForNodes(targetPodList apiv1.PodList, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (map[string]*targetsDetails, error) {
-	targets := make(map[string]*targetsDetails)
-	targetContainer := experimentsDetails.TargetContainer
-
-	for _, pod := range targetPodList.Items {
-		serviceMesh, err := setDestIps(pod, experimentsDetails, clients)
-		if err != nil {
-			return targets, stacktrace.Propagate(err, "could not set destination ips")
-		}
-
-		if experimentsDetails.TargetContainer == "" {
-			targetContainer = pod.Spec.Containers[0].Name
-		}
-
-		td := target{
-			Name:            pod.Name,
-			Namespace:       pod.Namespace,
-			TargetContainer: targetContainer,
-			ServiceMesh:     serviceMesh,
-		}
-
-		if targets[pod.Spec.NodeName] == nil {
-			targets[pod.Spec.NodeName] = &targetsDetails{
-				Target: []target{td},
-			}
-		} else {
-			targets[pod.Spec.NodeName].Target = append(targets[pod.Spec.NodeName].Target, td)
-		}
-	}
-	return targets, nil
-}
-
-func logExperimentFields(experimentsDetails *experimentTypes.ExperimentDetails) {
-	switch experimentsDetails.NetworkChaosType {
-	case "network-loss":
-		log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-			"NetworkPacketLossPercentage": experimentsDetails.NetworkPacketLossPercentage,
-			"Sequence":                    experimentsDetails.Sequence,
-			"PodsAffectedPerc":            experimentsDetails.PodsAffectedPerc,
-		})
-	case "network-latency":
-		log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-			"NetworkLatency":   strconv.Itoa(experimentsDetails.NetworkLatency),
-			"Jitter":           experimentsDetails.Jitter,
-			"Sequence":         experimentsDetails.Sequence,
-			"PodsAffectedPerc": experimentsDetails.PodsAffectedPerc,
-		})
-	case "network-corruption":
-		log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-			"NetworkPacketCorruptionPercentage": experimentsDetails.NetworkPacketCorruptionPercentage,
-			"Sequence":                          experimentsDetails.Sequence,
-			"PodsAffectedPerc":                  experimentsDetails.PodsAffectedPerc,
-		})
-	case "network-duplication":
-		log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-			"NetworkPacketDuplicationPercentage": experimentsDetails.NetworkPacketDuplicationPercentage,
-			"Sequence":                           experimentsDetails.Sequence,
-			"PodsAffectedPerc":                   experimentsDetails.PodsAffectedPerc,
-		})
-	}
-}
diff --git a/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go b/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go
deleted file mode 100644
index 6e9e1f0..0000000
--- a/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go
+++ /dev/null
@@ -1,288 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// PrepareNodeCPUHog contains preparation steps before chaos injection
-func PrepareNodeCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//set up the tunables if provided in range
-	setChaosTunables(experimentsDetails)
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"Node CPU Cores":           experimentsDetails.NodeCPUcores,
-		"CPU Load":                 experimentsDetails.CPULoad,
-		"Node Affected Percentage": experimentsDetails.NodesAffectedPerc,
-		"Sequence":                 experimentsDetails.Sequence,
-	})
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//Select node for node-cpu-hog
-	nodesAffectedPerc, _ := strconv.Atoi(experimentsDetails.NodesAffectedPerc)
-	targetNodeList, err := common.GetNodeList(experimentsDetails.TargetNodes, experimentsDetails.NodeLabel, nodesAffectedPerc, clients)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get node list")
-	}
-
-	log.InfoWithValues("[Info]: Details of Nodes under chaos injection", logrus.Fields{
-		"No. Of Nodes": len(targetNodeList),
-		"Node Names":   targetNodeList,
-	})
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode stress the cpu of all the target nodes serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	nodeCPUCores := experimentsDetails.NodeCPUcores
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	for _, appNode := range targetNodeList {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + appNode + " node"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		// When number of cpu cores for hogging is not defined , it will take it from node capacity
-		if nodeCPUCores == "0" {
-			if err := setCPUCapacity(experimentsDetails, appNode, clients); err != nil {
-				return stacktrace.Propagate(err, "could not get node cpu capacity")
-			}
-		}
-
-		log.InfoWithValues("[Info]: Details of Node under chaos injection", logrus.Fields{
-			"NodeName":     appNode,
-			"NodeCPUCores": experimentsDetails.NodeCPUcores,
-		})
-
-		experimentsDetails.RunID = stringutils.GetRunID()
-
-		// Creating the helper pod to perform node cpu hog
-		if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-		//Checking the status of helper pod
-		log.Info("[Status]: Checking the status of the helper pod")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-
-		common.SetTargets(appNode, "targeted", "node", chaosDetails)
-
-		// Wait till the completion of helper pod
-		log.Info("[Wait]: Waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-		}
-
-		//Deleting the helper pod
-		log.Info("[Cleanup]: Deleting the helper pod")
-		if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode stress the cpu of  all the target nodes in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	nodeCPUCores := experimentsDetails.NodeCPUcores
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	experimentsDetails.RunID = stringutils.GetRunID()
-
-	for _, appNode := range targetNodeList {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + appNode + " node"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		// When number of cpu cores for hogging is not defined , it will take it from node capacity
-		if nodeCPUCores == "0" {
-			if err := setCPUCapacity(experimentsDetails, appNode, clients); err != nil {
-				return stacktrace.Propagate(err, "could not get node cpu capacity")
-			}
-		}
-
-		log.InfoWithValues("[Info]: Details of Node under chaos injection", logrus.Fields{
-			"NodeName":     appNode,
-			"NodeCPUcores": experimentsDetails.NodeCPUcores,
-		})
-
-		// Creating the helper pod to perform node cpu hog
-		if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-	//Checking the status of helper pod
-	log.Info("[Status]: Checking the status of the helper pods")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	for _, appNode := range targetNodeList {
-		common.SetTargets(appNode, "targeted", "node", chaosDetails)
-	}
-
-	// Wait till the completion of helper pod
-	log.Info("[Wait]: Waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-	}
-
-	//Deleting the helper pod
-	log.Info("[Cleanup]: Deleting the helper pod")
-	if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-//setCPUCapacity fetch the node cpu capacity
-func setCPUCapacity(experimentsDetails *experimentTypes.ExperimentDetails, appNode string, clients clients.ClientSets) error {
-	node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), appNode, v1.GetOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", appNode), Reason: err.Error()}
-	}
-	experimentsDetails.NodeCPUcores = node.Status.Capacity.Cpu().String()
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets) error {
-
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, experimentsDetails.RunID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			NodeName:                      appNode,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"stress-ng",
-					},
-					Args: []string{
-						"--cpu",
-						experimentsDetails.NodeCPUcores,
-						"--cpu-load",
-						experimentsDetails.CPULoad,
-						"--timeout",
-						strconv.Itoa(experimentsDetails.ChaosDuration),
-					},
-					Resources: chaosDetails.Resources,
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-//setChaosTunables will set up a random value within a given range of values
-//If the value is not provided in range it'll set up the initial provided value.
-func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.NodeCPUcores = common.ValidateRange(experimentsDetails.NodeCPUcores)
-	experimentsDetails.CPULoad = common.ValidateRange(experimentsDetails.CPULoad)
-	experimentsDetails.NodesAffectedPerc = common.ValidateRange(experimentsDetails.NodesAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/node-drain/lib/node-drain.go b/chaoslib/litmus/node-drain/lib/node-drain.go
deleted file mode 100644
index 860c675..0000000
--- a/chaoslib/litmus/node-drain/lib/node-drain.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/exec"
-	"os/signal"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-drain/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/retry"
-	apierrors "k8s.io/apimachinery/pkg/api/errors"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-//PrepareNodeDrain contains the preparation steps before chaos injection
-func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	if experimentsDetails.TargetNode == "" {
-		//Select node for kubelet-service-kill
-		experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get node name")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + experimentsDetails.TargetNode + " node"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(experimentsDetails, clients, resultDetails, chaosDetails, eventsDetails)
-
-	// Drain the application node
-	if err := drainNode(experimentsDetails, clients, chaosDetails); err != nil {
-		log.Info("[Revert]: Reverting chaos because error during draining of node")
-		if uncordonErr := uncordonNode(experimentsDetails, clients, chaosDetails); uncordonErr != nil {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(uncordonErr).Error())}
-		}
-		return stacktrace.Propagate(err, "could not drain node")
-	}
-
-	// Verify the status of AUT after reschedule
-	log.Info("[Status]: Verify the status of AUT after reschedule")
-	if err = status.AUTStatusCheck(clients, chaosDetails); err != nil {
-		log.Info("[Revert]: Reverting chaos because application status check failed")
-		if uncordonErr := uncordonNode(experimentsDetails, clients, chaosDetails); uncordonErr != nil {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(uncordonErr).Error())}
-		}
-		return err
-	}
-
-	// Verify the status of Auxiliary Applications after reschedule
-	if experimentsDetails.AuxiliaryAppInfo != "" {
-		log.Info("[Status]: Verify that the Auxiliary Applications are running")
-		if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Info("[Revert]: Reverting chaos because auxiliary application status check failed")
-			if uncordonErr := uncordonNode(experimentsDetails, clients, chaosDetails); uncordonErr != nil {
-				return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(uncordonErr).Error())}
-			}
-			return err
-		}
-	}
-
-	log.Infof("[Chaos]: Waiting for %vs", experimentsDetails.ChaosDuration)
-
-	common.WaitForDuration(experimentsDetails.ChaosDuration)
-
-	log.Info("[Chaos]: Stopping the experiment")
-
-	// Uncordon the application node
-	if err := uncordonNode(experimentsDetails, clients, chaosDetails); err != nil {
-		return stacktrace.Propagate(err, "could not uncordon the target node")
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// drainNode drain the target node
-func drainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		log.Infof("[Inject]: Draining the %v node", experimentsDetails.TargetNode)
-
-		command := exec.Command("kubectl", "drain", experimentsDetails.TargetNode, "--ignore-daemonsets", "--delete-emptydir-data", "--force", "--timeout", strconv.Itoa(experimentsDetails.ChaosDuration)+"s")
-		if err := common.RunCLICommands(command, "", fmt.Sprintf("{node: %s}", experimentsDetails.TargetNode), "failed to drain the target node", cerrors.ErrorTypeChaosInject); err != nil {
-			return err
-		}
-
-		common.SetTargets(experimentsDetails.TargetNode, "injected", "node", chaosDetails)
-
-		return retry.
-			Times(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).
-			Wait(time.Duration(experimentsDetails.Delay) * time.Second).
-			Try(func(attempt uint) error {
-				nodeSpec, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), experimentsDetails.TargetNode, v1.GetOptions{})
-				if err != nil {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{node: %s}", experimentsDetails.TargetNode), Reason: err.Error()}
-				}
-				if !nodeSpec.Spec.Unschedulable {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{node: %s}", experimentsDetails.TargetNode), Reason: "node is not in unschedule state"}
-				}
-				return nil
-			})
-	}
-	return nil
-}
-
-// uncordonNode uncordon the application node
-func uncordonNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-
-	targetNodes := strings.Split(experimentsDetails.TargetNode, ",")
-	for _, targetNode := range targetNodes {
-
-		//Check node exist before uncordon the node
-		_, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), targetNode, v1.GetOptions{})
-		if err != nil {
-			if apierrors.IsNotFound(err) {
-				log.Infof("[Info]: The %v node is no longer exist, skip uncordon the node", targetNode)
-				common.SetTargets(targetNode, "noLongerExist", "node", chaosDetails)
-				continue
-			} else {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{node: %s}", targetNode), Reason: err.Error()}
-			}
-		}
-
-		log.Infof("[Recover]: Uncordon the %v node", targetNode)
-		command := exec.Command("kubectl", "uncordon", targetNode)
-		if err := common.RunCLICommands(command, "", fmt.Sprintf("{node: %s}", targetNode), "failed to uncordon the target node", cerrors.ErrorTypeChaosInject); err != nil {
-			return err
-		}
-		common.SetTargets(targetNode, "reverted", "node", chaosDetails)
-	}
-
-	return retry.
-		Times(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).
-		Wait(time.Duration(experimentsDetails.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			targetNodes := strings.Split(experimentsDetails.TargetNode, ",")
-			for _, targetNode := range targetNodes {
-				nodeSpec, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), targetNode, v1.GetOptions{})
-				if err != nil {
-					if apierrors.IsNotFound(err) {
-						continue
-					} else {
-						return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{node: %s}", targetNode), Reason: err.Error()}
-					}
-				}
-				if nodeSpec.Spec.Unschedulable {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{node: %s}", targetNode), Reason: "target node is in unschedule state"}
-				}
-			}
-			return nil
-		})
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails) {
-	// waiting till the abort signal received
-	<-abort
-
-	log.Info("[Chaos]: Killing process started because of terminated signal received")
-	log.Info("Chaos Revert Started")
-	// retry thrice for the chaos revert
-	retry := 3
-	for retry > 0 {
-		if err := uncordonNode(experimentsDetails, clients, chaosDetails); err != nil {
-			log.Errorf("Unable to uncordon the node, err: %v", err)
-		}
-		retry--
-		time.Sleep(1 * time.Second)
-	}
-	log.Info("Chaos Revert Completed")
-	os.Exit(0)
-}
diff --git a/chaoslib/litmus/node-io-stress/lib/node-io-stress.go b/chaoslib/litmus/node-io-stress/lib/node-io-stress.go
deleted file mode 100644
index abcc153..0000000
--- a/chaoslib/litmus/node-io-stress/lib/node-io-stress.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-io-stress/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// PrepareNodeIOStress contains preparation steps before chaos injection
-func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//set up the tunables if provided in range
-	setChaosTunables(experimentsDetails)
-
-	log.InfoWithValues("[Info]: The details of chaos tunables are:", logrus.Fields{
-		"FilesystemUtilizationBytes":      experimentsDetails.FilesystemUtilizationBytes,
-		"FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage,
-		"CPU Core":                        experimentsDetails.CPU,
-		"NumberOfWorkers":                 experimentsDetails.NumberOfWorkers,
-		"Node Affected Percentage":        experimentsDetails.NodesAffectedPerc,
-		"Sequence":                        experimentsDetails.Sequence,
-	})
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//Select node for node-io-stress
-	nodesAffectedPerc, _ := strconv.Atoi(experimentsDetails.NodesAffectedPerc)
-	targetNodeList, err := common.GetNodeList(experimentsDetails.TargetNodes, experimentsDetails.NodeLabel, nodesAffectedPerc, clients)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get node list")
-	}
-	log.InfoWithValues("[Info]: Details of Nodes under chaos injection", logrus.Fields{
-		"No. Of Nodes": len(targetNodeList),
-		"Node Names":   targetNodeList,
-	})
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode stress the io of all the target nodes serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	for _, appNode := range targetNodeList {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + appNode + " node"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		log.InfoWithValues("[Info]: Details of Node under chaos injection", logrus.Fields{
-			"NodeName":                        appNode,
-			"FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage,
-			"NumberOfWorkers":                 experimentsDetails.NumberOfWorkers,
-		})
-
-		experimentsDetails.RunID = stringutils.GetRunID()
-
-		// Creating the helper pod to perform node io stress
-		if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-		//Checking the status of helper pod
-		log.Info("[Status]: Checking the status of the helper pod")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-		common.SetTargets(appNode, "injected", "node", chaosDetails)
-
-		log.Info("[Wait]: Waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName)
-		common.SetTargets(appNode, "reverted", "node", chaosDetails)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-		}
-
-		//Deleting the helper pod
-		log.Info("[Cleanup]: Deleting the helper pod")
-		if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode stress the io of all the target nodes in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	experimentsDetails.RunID = stringutils.GetRunID()
-
-	for _, appNode := range targetNodeList {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + appNode + " node"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		log.InfoWithValues("[Info]: Details of Node under chaos injection", logrus.Fields{
-			"NodeName":                        appNode,
-			"FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage,
-			"NumberOfWorkers":                 experimentsDetails.NumberOfWorkers,
-		})
-
-		// Creating the helper pod to perform node io stress
-		if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-	//Checking the status of helper pod
-	log.Info("[Status]: Checking the status of the helper pod")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	for _, appNode := range targetNodeList {
-		common.SetTargets(appNode, "injected", "node", chaosDetails)
-	}
-
-	log.Info("[Wait]: Waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	for _, appNode := range targetNodeList {
-		common.SetTargets(appNode, "reverted", "node", chaosDetails)
-	}
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-	}
-
-	//Deleting the helper pod
-	log.Info("[Cleanup]: Deleting the helper pod")
-	if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets) error {
-
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, experimentsDetails.RunID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			NodeName:                      appNode,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"stress-ng",
-					},
-					Args:      getContainerArguments(experimentsDetails),
-					Resources: chaosDetails.Resources,
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-// getContainerArguments derives the args for the pumba stress helper pod
-func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails) []string {
-
-	var hddbytes string
-	if experimentsDetails.FilesystemUtilizationBytes == "0" {
-		if experimentsDetails.FilesystemUtilizationPercentage == "0" {
-			hddbytes = "10%"
-			log.Info("Neither of FilesystemUtilizationPercentage or FilesystemUtilizationBytes provided, proceeding with a default FilesystemUtilizationPercentage value of 10%")
-		} else {
-			hddbytes = experimentsDetails.FilesystemUtilizationPercentage + "%"
-		}
-	} else {
-		if experimentsDetails.FilesystemUtilizationPercentage == "0" {
-			hddbytes = experimentsDetails.FilesystemUtilizationBytes + "G"
-		} else {
-			hddbytes = experimentsDetails.FilesystemUtilizationPercentage + "%"
-			log.Warn("Both FsUtilPercentage & FsUtilBytes provided as inputs, using the FsUtilPercentage value to proceed with stress exp")
-		}
-	}
-
-	stressArgs := []string{
-		"--cpu",
-		experimentsDetails.CPU,
-		"--vm",
-		experimentsDetails.VMWorkers,
-		"--io",
-		experimentsDetails.NumberOfWorkers,
-		"--hdd",
-		experimentsDetails.NumberOfWorkers,
-		"--hdd-bytes",
-		hddbytes,
-		"--timeout",
-		strconv.Itoa(experimentsDetails.ChaosDuration) + "s",
-		"--temp-path",
-		"/tmp",
-	}
-	return stressArgs
-}
-
-//setChaosTunables will set up a random value within a given range of values
-//If the value is not provided in range it'll set up the initial provided value.
-func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.FilesystemUtilizationBytes = common.ValidateRange(experimentsDetails.FilesystemUtilizationBytes)
-	experimentsDetails.FilesystemUtilizationPercentage = common.ValidateRange(experimentsDetails.FilesystemUtilizationPercentage)
-	experimentsDetails.CPU = common.ValidateRange(experimentsDetails.CPU)
-	experimentsDetails.VMWorkers = common.ValidateRange(experimentsDetails.VMWorkers)
-	experimentsDetails.NumberOfWorkers = common.ValidateRange(experimentsDetails.NumberOfWorkers)
-	experimentsDetails.NodesAffectedPerc = common.ValidateRange(experimentsDetails.NodesAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go b/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go
deleted file mode 100644
index 9ab8bc2..0000000
--- a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go
+++ /dev/null
@@ -1,373 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-memory-hog/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// PrepareNodeMemoryHog contains preparation steps before chaos injection
-func PrepareNodeMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//set up the tunables if provided in range
-	setChaosTunables(experimentsDetails)
-
-	log.InfoWithValues("[Info]: The details of chaos tunables are:", logrus.Fields{
-		"MemoryConsumptionMebibytes":  experimentsDetails.MemoryConsumptionMebibytes,
-		"MemoryConsumptionPercentage": experimentsDetails.MemoryConsumptionPercentage,
-		"NumberOfWorkers":             experimentsDetails.NumberOfWorkers,
-		"Node Affected Percentage":    experimentsDetails.NodesAffectedPerc,
-		"Sequence":                    experimentsDetails.Sequence,
-	})
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//Select node for node-memory-hog
-	nodesAffectedPerc, _ := strconv.Atoi(experimentsDetails.NodesAffectedPerc)
-	targetNodeList, err := common.GetNodeList(experimentsDetails.TargetNodes, experimentsDetails.NodeLabel, nodesAffectedPerc, clients)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get node list")
-	}
-
-	log.InfoWithValues("[Info]: Details of Nodes under chaos injection", logrus.Fields{
-		"No. Of Nodes": len(targetNodeList),
-		"Node Names":   targetNodeList,
-	})
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode stress the memory of all the target nodes serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	for _, appNode := range targetNodeList {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + appNode + " node"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		log.InfoWithValues("[Info]: Details of Node under chaos injection", logrus.Fields{
-			"NodeName":                      appNode,
-			"Memory Consumption Percentage": experimentsDetails.MemoryConsumptionPercentage,
-			"Memory Consumption Mebibytes":  experimentsDetails.MemoryConsumptionMebibytes,
-		})
-
-		experimentsDetails.RunID = stringutils.GetRunID()
-
-		//Getting node memory details
-		memoryCapacity, memoryAllocatable, err := getNodeMemoryDetails(appNode, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get node memory details")
-		}
-
-		//Getting the exact memory value to exhaust
-		MemoryConsumption, err := calculateMemoryConsumption(experimentsDetails, memoryCapacity, memoryAllocatable)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not calculate memory consumption value")
-		}
-
-		// Creating the helper pod to perform node memory hog
-		if err = createHelperPod(experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-		//Checking the status of helper pod
-		log.Info("[Status]: Checking the status of the helper pod")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-
-		common.SetTargets(appNode, "targeted", "node", chaosDetails)
-
-		// Wait till the completion of helper pod
-		log.Info("[Wait]: Waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, experimentsDetails.ExperimentName)
-		if err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-		} else if podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return errors.Errorf("helper pod status is %v", podStatus)
-		}
-
-		//Deleting the helper pod
-		log.Info("[Cleanup]: Deleting the helper pod")
-		if err := common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode stress the memory all the target nodes in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	experimentsDetails.RunID = stringutils.GetRunID()
-
-	for _, appNode := range targetNodeList {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + appNode + " node"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		log.InfoWithValues("[Info]: Details of Node under chaos injection", logrus.Fields{
-			"NodeName":                      appNode,
-			"Memory Consumption Percentage": experimentsDetails.MemoryConsumptionPercentage,
-			"Memory Consumption Mebibytes":  experimentsDetails.MemoryConsumptionMebibytes,
-		})
-
-		//Getting node memory details
-		memoryCapacity, memoryAllocatable, err := getNodeMemoryDetails(appNode, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get node memory details")
-		}
-
-		//Getting the exact memory value to exhaust
-		MemoryConsumption, err := calculateMemoryConsumption(experimentsDetails, memoryCapacity, memoryAllocatable)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not calculate memory consumption value")
-		}
-
-		// Creating the helper pod to perform node memory hog
-		if err = createHelperPod(experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, experimentsDetails.RunID)
-
-	//Checking the status of helper pod
-	log.Info("[Status]: Checking the status of the helper pod")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	for _, appNode := range targetNodeList {
-		common.SetTargets(appNode, "targeted", "node", chaosDetails)
-	}
-
-	// Wait till the completion of helper pod
-	log.Info("[Wait]: Waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, false)
-	}
-
-	//Deleting the helper pod
-	log.Info("[Cleanup]: Deleting the helper pod")
-	if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// getNodeMemoryDetails will return the total memory capacity and memory allocatable of an application node
-func getNodeMemoryDetails(appNodeName string, clients clients.ClientSets) (int, int, error) {
-
-	nodeDetails, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), appNodeName, v1.GetOptions{})
-	if err != nil {
-		return 0, 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", appNodeName), Reason: err.Error()}
-	}
-
-	memoryCapacity := int(nodeDetails.Status.Capacity.Memory().Value())
-	memoryAllocatable := int(nodeDetails.Status.Allocatable.Memory().Value())
-
-	if memoryCapacity == 0 || memoryAllocatable == 0 {
-		return memoryCapacity, memoryAllocatable, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{nodeName: %s}", appNodeName), Reason: "failed to get memory details of the target node"}
-	}
-
-	return memoryCapacity, memoryAllocatable, nil
-}
-
-// calculateMemoryConsumption will calculate the amount of memory to be consumed for a given unit.
-func calculateMemoryConsumption(experimentsDetails *experimentTypes.ExperimentDetails, memoryCapacity, memoryAllocatable int) (string, error) {
-
-	var totalMemoryConsumption int
-	var MemoryConsumption string
-	var selector string
-
-	if experimentsDetails.MemoryConsumptionMebibytes == "0" {
-		if experimentsDetails.MemoryConsumptionPercentage == "0" {
-			log.Info("Neither of MemoryConsumptionPercentage or MemoryConsumptionMebibytes provided, proceeding with a default MemoryConsumptionPercentage value of 30%%")
-			return "30%", nil
-		}
-		selector = "percentage"
-	} else {
-		if experimentsDetails.MemoryConsumptionPercentage == "0" {
-			selector = "mebibytes"
-		} else {
-			log.Warn("Both MemoryConsumptionPercentage & MemoryConsumptionMebibytes provided as inputs, using the MemoryConsumptionPercentage value to proceed with the experiment")
-			selector = "percentage"
-		}
-	}
-
-	switch selector {
-
-	case "percentage":
-
-		//Getting the total memory under chaos
-		memoryConsumptionPercentage, _ := strconv.ParseFloat(experimentsDetails.MemoryConsumptionPercentage, 64)
-		memoryForChaos := (memoryConsumptionPercentage / 100) * float64(memoryCapacity)
-
-		//Get the percentage of memory under chaos wrt allocatable memory
-		totalMemoryConsumption = int((memoryForChaos / float64(memoryAllocatable)) * 100)
-		if totalMemoryConsumption > 100 {
-			log.Infof("[Info]: PercentageOfMemoryCapacity To Be Used: %v percent, which is more than 100 percent (%d percent) of Allocatable Memory, so the experiment will only consume upto 100 percent of Allocatable Memory", experimentsDetails.MemoryConsumptionPercentage, totalMemoryConsumption)
-			MemoryConsumption = "100%"
-		} else {
-			log.Infof("[Info]: PercentageOfMemoryCapacity To Be Used: %v percent, which is %d percent of Allocatable Memory", experimentsDetails.MemoryConsumptionPercentage, totalMemoryConsumption)
-			MemoryConsumption = strconv.Itoa(totalMemoryConsumption) + "%"
-		}
-		return MemoryConsumption, nil
-
-	case "mebibytes":
-
-		// Bringing all the values in Ki unit to compare
-		// since 1Mi = 1025.390625Ki
-		memoryConsumptionMebibytes, _ := strconv.ParseFloat(experimentsDetails.MemoryConsumptionMebibytes, 64)
-
-		TotalMemoryConsumption := memoryConsumptionMebibytes * 1025.390625
-		// since 1Ki = 1024 bytes
-		memoryAllocatable := memoryAllocatable / 1024
-
-		if memoryAllocatable < int(TotalMemoryConsumption) {
-			MemoryConsumption = strconv.Itoa(memoryAllocatable) + "k"
-			log.Infof("[Info]: The memory for consumption %vKi is more than the available memory %vKi, so the experiment will hog the memory upto %vKi", int(TotalMemoryConsumption), memoryAllocatable, memoryAllocatable)
-		} else {
-			MemoryConsumption = experimentsDetails.MemoryConsumptionMebibytes + "m"
-		}
-		return MemoryConsumption, nil
-	}
-	return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "specify the memory consumption value either in percentage or mebibytes in a non-decimal format using respective envs"}
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets, MemoryConsumption string) error {
-
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, experimentsDetails.RunID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			NodeName:                      appNode,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"stress-ng",
-					},
-					Args: []string{
-						"--vm",
-						experimentsDetails.NumberOfWorkers,
-						"--vm-bytes",
-						MemoryConsumption,
-						"--timeout",
-						strconv.Itoa(experimentsDetails.ChaosDuration) + "s",
-					},
-					Resources: chaosDetails.Resources,
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-//setChaosTunables will set up a random value within a given range of values
-//If the value is not provided in range it'll set up the initial provided value.
-func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.MemoryConsumptionMebibytes = common.ValidateRange(experimentsDetails.MemoryConsumptionMebibytes)
-	experimentsDetails.MemoryConsumptionPercentage = common.ValidateRange(experimentsDetails.MemoryConsumptionPercentage)
-	experimentsDetails.NumberOfWorkers = common.ValidateRange(experimentsDetails.NumberOfWorkers)
-	experimentsDetails.NodesAffectedPerc = common.ValidateRange(experimentsDetails.NodesAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/node-restart/lib/node-restart.go b/chaoslib/litmus/node-restart/lib/node-restart.go
deleted file mode 100644
index 11daac7..0000000
--- a/chaoslib/litmus/node-restart/lib/node-restart.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package lib
-
-import (
-    "fmt"
-	"os"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-    "github.com/palantir/stacktrace"
-	"os/signal"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kubernetes/node-restart/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec"
-	"github.com/sirupsen/logrus"
-	corev1 "k8s.io/api/core/v1"
-)
-
-func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, podName string, clients clients.ClientSets) error {
-	// It will contains all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-	command := []string{"/bin/sh", "-c", experimentsDetails.ChaosInjectCmd}
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, experimentsDetails.TargetContainer, experimentsDetails.AppNS)
-	_, err := litmusexec.Exec(&execCommandDetails, clients, command)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to inject chaos: %s", err.Error())}
-	}
-	return nil
-}
-
-func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-    // Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	podNames := []string{}
-	for _, pod := range targetPodList.Items {
-		podNames = append(podNames, pod.Name)
-	}
-	log.Infof("Target pods list for chaos, %v", podNames)
-
-	return runChaos(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails)
-}
-
-func runChaos(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-
-	for _, pod := range targetPodList.Items {
-
-        //Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-			"container": experimentsDetails.TargetContainer,
-			"Pod":       pod.Name,
-		})
-		
-		go injectChaos(experimentsDetails, pod.Name, clients)
-
-		log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-		// signChan channel is used to transmit signal notifications.
-		signChan := make(chan os.Signal, 1)
-		// Catch and relay certain signal(s) to signChan channel.
-		signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-	loop:
-		for {
-			endTime = time.After(timeDelay)
-			select {
-			case <-signChan:
-				log.Info("[Chaos]: Revert Started")
-				if err := killChaos(experimentsDetails, pod.Name, clients);err != nil {
-						log.Error("unable to kill chaos process after receiving abortion signal")
-				}
-				log.Info("[Chaos]: Revert Completed")
-				os.Exit(1)
-			case <-endTime:
-				log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-				endTime = nil
-				break loop
-			}
-		}
-		if err := killChaos(experimentsDetails, pod.Name, clients); err != nil {
-			return stacktrace.Propagate(err, "could not revert chaos")
-		}
-	}
-	return nil
-}
-
-//PrepareChaos contains the preparation steps before chaos injection
-func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	//Starting the CPU stress experiment
-	if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails);err != nil {
-		return stacktrace.Propagate(err, "could not execute experiment")
-	}
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-func killChaos(experimentsDetails *experimentTypes.ExperimentDetails, podName string, clients clients.ClientSets) error {
-	// It will contains all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-
-	command := []string{"/bin/sh", "-c", experimentsDetails.ChaosKillCmd}
-
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, experimentsDetails.TargetContainer, experimentsDetails.AppNS)
-	_, err := litmusexec.Exec(&execCommandDetails, clients, command)
-	if err != nil {
-    		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to revert chaos: %s", err.Error())}
-    }
-	return nil
-}
diff --git a/chaoslib/litmus/node-taint/lib/node-taint.go b/chaoslib/litmus/node-taint/lib/node-taint.go
deleted file mode 100644
index 1d48078..0000000
--- a/chaoslib/litmus/node-taint/lib/node-taint.go
+++ /dev/null
@@ -1,251 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-taint/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-//PrepareNodeTaint contains the preparation steps before chaos injection
-func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	if experimentsDetails.TargetNode == "" {
-		//Select node for kubelet-service-kill
-		experimentsDetails.TargetNode, err = common.GetNodeName(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.NodeLabel, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get node name")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + experimentsDetails.TargetNode + " node"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(experimentsDetails, clients, resultDetails, chaosDetails, eventsDetails)
-
-	// taint the application node
-	if err := taintNode(experimentsDetails, clients, chaosDetails); err != nil {
-		return stacktrace.Propagate(err, "could not taint node")
-	}
-
-	// Verify the status of AUT after reschedule
-	log.Info("[Status]: Verify the status of AUT after reschedule")
-	if err = status.AUTStatusCheck(clients, chaosDetails); err != nil {
-		log.Info("[Revert]: Reverting chaos because application status check failed")
-		if taintErr := removeTaintFromNode(experimentsDetails, clients, chaosDetails); taintErr != nil {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(taintErr).Error())}
-		}
-		return err
-	}
-
-	if experimentsDetails.AuxiliaryAppInfo != "" {
-		log.Info("[Status]: Verify that the Auxiliary Applications are running")
-		if err = status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Info("[Revert]: Reverting chaos because auxiliary application status check failed")
-			if taintErr := removeTaintFromNode(experimentsDetails, clients, chaosDetails); taintErr != nil {
-				return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(taintErr).Error())}
-			}
-			return err
-		}
-	}
-
-	log.Infof("[Chaos]: Waiting for %vs", experimentsDetails.ChaosDuration)
-
-	common.WaitForDuration(experimentsDetails.ChaosDuration)
-
-	log.Info("[Chaos]: Stopping the experiment")
-
-	// remove taint from the application node
-	if err := removeTaintFromNode(experimentsDetails, clients, chaosDetails); err != nil {
-		return stacktrace.Propagate(err, "could not remove taint from node")
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// taintNode taint the application node
-func taintNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-
-	// get the taint labels & effect
-	taintKey, taintValue, taintEffect := getTaintDetails(experimentsDetails)
-
-	log.Infof("Add %v taints to the %v node", taintKey+"="+taintValue+":"+taintEffect, experimentsDetails.TargetNode)
-
-	// get the node details
-	node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), experimentsDetails.TargetNode, v1.GetOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{nodeName: %s}", experimentsDetails.TargetNode), Reason: err.Error()}
-	}
-
-	// check if the taint already exists
-	tainted := false
-	for _, taint := range node.Spec.Taints {
-		if taint.Key == taintKey {
-			tainted = true
-			break
-		}
-	}
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		if !tainted {
-			node.Spec.Taints = append(node.Spec.Taints, apiv1.Taint{
-				Key:    taintKey,
-				Value:  taintValue,
-				Effect: apiv1.TaintEffect(taintEffect),
-			})
-
-			_, err := clients.KubeClient.CoreV1().Nodes().Update(context.Background(), node, v1.UpdateOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{nodeName: %s}", node.Name), Reason: fmt.Sprintf("failed to add taints: %s", err.Error())}
-			}
-		}
-
-		common.SetTargets(node.Name, "injected", "node", chaosDetails)
-
-		log.Infof("Successfully added taint in %v node", experimentsDetails.TargetNode)
-	}
-	return nil
-}
-
-// removeTaintFromNode remove the taint from the application node
-func removeTaintFromNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-
-	// Get the taint key
-	taintLabel := strings.Split(experimentsDetails.Taints, ":")
-	taintKey := strings.Split(taintLabel[0], "=")[0]
-
-	// get the node details
-	node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), experimentsDetails.TargetNode, v1.GetOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{nodeName: %s}", experimentsDetails.TargetNode), Reason: err.Error()}
-	}
-
-	// check if the taint already exists
-	tainted := false
-	for _, taint := range node.Spec.Taints {
-		if taint.Key == taintKey {
-			tainted = true
-			break
-		}
-	}
-
-	if tainted {
-		var newTaints []apiv1.Taint
-		// remove all the taints with matching key
-		for _, taint := range node.Spec.Taints {
-			if taint.Key != taintKey {
-				newTaints = append(newTaints, taint)
-			}
-		}
-		node.Spec.Taints = newTaints
-		updatedNodeWithTaint, err := clients.KubeClient.CoreV1().Nodes().Update(context.Background(), node, v1.UpdateOptions{})
-		if err != nil || updatedNodeWithTaint == nil {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{nodeName: %s}", node.Name), Reason: fmt.Sprintf("failed to remove taints: %s", err.Error())}
-		}
-	}
-
-	common.SetTargets(node.Name, "reverted", "node", chaosDetails)
-
-	log.Infof("Successfully removed taint from the %v node", node.Name)
-	return nil
-}
-
-// GetTaintDetails return the key, value and effect for the taint
-func getTaintDetails(experimentsDetails *experimentTypes.ExperimentDetails) (string, string, string) {
-	taintValue := "node-taint"
-	taintEffect := string(apiv1.TaintEffectNoExecute)
-
-	taints := strings.Split(experimentsDetails.Taints, ":")
-	taintLabel := strings.Split(taints[0], "=")
-	taintKey := taintLabel[0]
-
-	// It will set the value for taint label from `TAINT` env, if provided
-	// otherwise it will use the `node-taint` value as default value.
-	if len(taintLabel) >= 2 {
-		taintValue = taintLabel[1]
-	}
-	// It will set the value for taint effect from `TAINT` env, if provided
-	// otherwise it will use `NoExecute` value as default value.
-	if len(taints) >= 2 {
-		taintEffect = taints[1]
-	}
-
-	return taintKey, taintValue, taintEffect
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails) {
-	// waiting till the abort signal received
-	<-abort
-
-	log.Info("[Chaos]: Killing process started because of terminated signal received")
-	log.Info("Chaos Revert Started")
-	// retry thrice for the chaos revert
-	retry := 3
-	for retry > 0 {
-		if err := removeTaintFromNode(experimentsDetails, clients, chaosDetails); err != nil {
-			log.Errorf("Unable to untaint node, err: %v", err)
-		}
-		retry--
-		time.Sleep(1 * time.Second)
-	}
-	log.Info("Chaos Revert Completed")
-	os.Exit(0)
-}
diff --git a/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go b/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go
deleted file mode 100644
index 813d514..0000000
--- a/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go
+++ /dev/null
@@ -1,439 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/math"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/retry"
-	"github.com/sirupsen/logrus"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
-	retries "k8s.io/client-go/util/retry"
-)
-
-var (
-	err                     error
-	appsv1DeploymentClient  appsv1.DeploymentInterface
-	appsv1StatefulsetClient appsv1.StatefulSetInterface
-)
-
-//PreparePodAutoscaler contains the preparation steps and chaos injection steps
-func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// initialise the resource clients
-	appsv1DeploymentClient = clients.KubeClient.AppsV1().Deployments(experimentsDetails.AppNS)
-	appsv1StatefulsetClient = clients.KubeClient.AppsV1().StatefulSets(experimentsDetails.AppNS)
-
-	switch strings.ToLower(experimentsDetails.AppKind) {
-	case "deployment", "deployments":
-
-		appsUnderTest, err := getDeploymentDetails(experimentsDetails)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get deployment details")
-		}
-
-		deploymentList := []string{}
-		for _, deployment := range appsUnderTest {
-			deploymentList = append(deploymentList, deployment.AppName)
-		}
-		log.InfoWithValues("[Info]: Details of Deployments under chaos injection", logrus.Fields{
-			"Number Of Deployment": len(deploymentList),
-			"Target Deployments":   deploymentList,
-		})
-
-		//calling go routine which will continuously watch for the abort signal
-		go abortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails)
-
-		if err = podAutoscalerChaosInDeployment(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not scale deployment")
-		}
-
-		if err = autoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not revert scaling in deployment")
-		}
-
-	case "statefulset", "statefulsets":
-
-		appsUnderTest, err := getStatefulsetDetails(experimentsDetails)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get statefulset details")
-		}
-
-		var stsList []string
-		for _, sts := range appsUnderTest {
-			stsList = append(stsList, sts.AppName)
-		}
-		log.InfoWithValues("[Info]: Details of Statefulsets under chaos injection", logrus.Fields{
-			"Number Of Statefulsets": len(stsList),
-			"Target Statefulsets":    stsList,
-		})
-
-		//calling go routine which will continuously watch for the abort signal
-		go abortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails)
-
-		if err = podAutoscalerChaosInStatefulset(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not scale statefulset")
-		}
-
-		if err = autoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not revert scaling in statefulset")
-		}
-
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{kind: %s}", experimentsDetails.AppKind), Reason: "application type is not supported"}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-func getSliceOfTotalApplicationsTargeted(appList []experimentTypes.ApplicationUnderTest, experimentsDetails *experimentTypes.ExperimentDetails) []experimentTypes.ApplicationUnderTest {
-
-	newAppListLength := math.Maximum(1, math.Adjustment(math.Minimum(experimentsDetails.AppAffectPercentage, 100), len(appList)))
-	return appList[:newAppListLength]
-}
-
-//getDeploymentDetails is used to get the name and total number of replicas of the deployment
-func getDeploymentDetails(experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) {
-
-	deploymentList, err := appsv1DeploymentClient.List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel})
-	if err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: deployment, labels: %s}", experimentsDetails.AppLabel), Reason: err.Error()}
-	} else if len(deploymentList.Items) == 0 {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: deployment, labels: %s}", experimentsDetails.AppLabel), Reason: "no deployment found with matching labels"}
-	}
-	var appsUnderTest []experimentTypes.ApplicationUnderTest
-	for _, app := range deploymentList.Items {
-		log.Infof("[Info]: Found deployment name '%s' with replica count '%d'", app.Name, int(*app.Spec.Replicas))
-		appsUnderTest = append(appsUnderTest, experimentTypes.ApplicationUnderTest{AppName: app.Name, ReplicaCount: int(*app.Spec.Replicas)})
-	}
-	// Applying the APP_AFFECTED_PERC variable to determine the total target deployments to scale
-	return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails), nil
-}
-
-//getStatefulsetDetails is used to get the name and total number of replicas of the statefulsets
-func getStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) {
-
-	statefulsetList, err := appsv1StatefulsetClient.List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel})
-	if err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: statefulset, labels: %s}", experimentsDetails.AppLabel), Reason: err.Error()}
-	} else if len(statefulsetList.Items) == 0 {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Target: fmt.Sprintf("{kind: statefulset, labels: %s}", experimentsDetails.AppLabel), Reason: "no statefulset found with matching labels"}
-	}
-
-	appsUnderTest := []experimentTypes.ApplicationUnderTest{}
-	for _, app := range statefulsetList.Items {
-		log.Infof("[Info]: Found statefulset name '%s' with replica count '%d'", app.Name, int(*app.Spec.Replicas))
-		appsUnderTest = append(appsUnderTest, experimentTypes.ApplicationUnderTest{AppName: app.Name, ReplicaCount: int(*app.Spec.Replicas)})
-	}
-	// Applying the APP_AFFECT_PERC variable to determine the total target deployments to scale
-	return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails), nil
-}
-
-//podAutoscalerChaosInDeployment scales up the replicas of deployment and verify the status
-func podAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// Scale Application
-	retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
-		for _, app := range appsUnderTest {
-			// Retrieve the latest version of Deployment before attempting update
-			// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
-			appUnderTest, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: err.Error()}
-			}
-			// modifying the replica count
-			appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas))
-			log.Infof("Updating deployment '%s' to number of replicas '%d'", appUnderTest.ObjectMeta.Name, experimentsDetails.Replicas)
-			_, err = appsv1DeploymentClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to scale deployment :%s", err.Error())}
-			}
-			common.SetTargets(app.AppName, "injected", "deployment", chaosDetails)
-		}
-		return nil
-	})
-	if retryErr != nil {
-		return retryErr
-	}
-	log.Info("[Info]: The application started scaling")
-
-	return deploymentStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails)
-}
-
-//podAutoscalerChaosInStatefulset scales up the replicas of statefulset and verify the status
-func podAutoscalerChaosInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// Scale Application
-	retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
-		for _, app := range appsUnderTest {
-			// Retrieve the latest version of Statefulset before attempting update
-			// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
-			appUnderTest, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: err.Error()}
-			}
-			// modifying the replica count
-			appUnderTest.Spec.Replicas = int32Ptr(int32(experimentsDetails.Replicas))
-			_, err = appsv1StatefulsetClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to scale statefulset :%s", err.Error())}
-			}
-			common.SetTargets(app.AppName, "injected", "statefulset", chaosDetails)
-		}
-		return nil
-	})
-	if retryErr != nil {
-		return retryErr
-	}
-	log.Info("[Info]: The application started scaling")
-
-	return statefulsetStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails)
-}
-
-// deploymentStatusCheck check the status of deployment and verify the available replicas
-func deploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-
-	err = retry.
-		Times(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)).
-		Wait(time.Duration(experimentsDetails.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			for _, app := range appsUnderTest {
-				deployment, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-				if err != nil {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()}
-				}
-				if int(deployment.Status.ReadyReplicas) != experimentsDetails.Replicas {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to scale deployment, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, deployment.Status.ReadyReplicas)}
-				}
-			}
-			return nil
-		})
-
-	if err != nil {
-		if scaleErr := autoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest, chaosDetails); scaleErr != nil {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(scaleErr).Error())}
-		}
-		return stacktrace.Propagate(err, "failed to scale replicas")
-	}
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-	if duration < experimentsDetails.ChaosDuration {
-		log.Info("[Wait]: Waiting for completion of chaos duration")
-		time.Sleep(time.Duration(experimentsDetails.ChaosDuration-duration) * time.Second)
-	}
-
-	return nil
-}
-
-// statefulsetStatusCheck check the status of statefulset and verify the available replicas
-func statefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-
-	err = retry.
-		Times(uint(experimentsDetails.ChaosDuration / experimentsDetails.Delay)).
-		Wait(time.Duration(experimentsDetails.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			for _, app := range appsUnderTest {
-				statefulset, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-				if err != nil {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()}
-				}
-				if int(statefulset.Status.ReadyReplicas) != experimentsDetails.Replicas {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to scale statefulset, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, statefulset.Status.ReadyReplicas)}
-				}
-			}
-			return nil
-		})
-
-	if err != nil {
-		if scaleErr := autoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest, chaosDetails); scaleErr != nil {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(scaleErr).Error())}
-		}
-		return stacktrace.Propagate(err, "failed to scale replicas")
-	}
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-	if duration < experimentsDetails.ChaosDuration {
-		log.Info("[Wait]: Waiting for completion of chaos duration")
-		time.Sleep(time.Duration(experimentsDetails.ChaosDuration-duration) * time.Second)
-	}
-
-	return nil
-}
-
-//autoscalerRecoveryInDeployment rollback the replicas to initial values in deployment
-func autoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, chaosDetails *types.ChaosDetails) error {
-
-	// Scale back to initial number of replicas
-	retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
-		// Retrieve the latest version of Deployment before attempting update
-		// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
-		for _, app := range appsUnderTest {
-			appUnderTest, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()}
-			}
-			appUnderTest.Spec.Replicas = int32Ptr(int32(app.ReplicaCount)) // modify replica count
-			_, err = appsv1DeploymentClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to revert scaling in deployment :%s", err.Error())}
-			}
-			common.SetTargets(app.AppName, "reverted", "deployment", chaosDetails)
-		}
-		return nil
-	})
-
-	if retryErr != nil {
-		return retryErr
-	}
-	log.Info("[Info]: Application started rolling back to original replica count")
-
-	return retry.
-		Times(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).
-		Wait(time.Duration(experimentsDetails.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			for _, app := range appsUnderTest {
-				applicationDeploy, err := appsv1DeploymentClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-				if err != nil {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()}
-				}
-				if int(applicationDeploy.Status.ReadyReplicas) != app.ReplicaCount {
-					log.Infof("[Info]: Application ready replica count is: %v", applicationDeploy.Status.ReadyReplicas)
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: deployment, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to rollback deployment scaling, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, applicationDeploy.Status.ReadyReplicas)}
-				}
-			}
-			log.Info("[RollBack]: Application rollback to the initial number of replicas")
-			return nil
-		})
-}
-
-//autoscalerRecoveryInStatefulset rollback the replicas to initial values in deployment
-func autoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, chaosDetails *types.ChaosDetails) error {
-
-	// Scale back to initial number of replicas
-	retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error {
-		for _, app := range appsUnderTest {
-			// Retrieve the latest version of Statefulset before attempting update
-			// RetryOnConflict uses exponential backoff to avoid exhausting the apiserver
-			appUnderTest, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()}
-			}
-
-			appUnderTest.Spec.Replicas = int32Ptr(int32(app.ReplicaCount)) // modify replica count
-			_, err = appsv1StatefulsetClient.Update(context.Background(), appUnderTest, metav1.UpdateOptions{})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, name: %s, namespace: %s}", app.AppName, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to revert scaling in statefulset :%s", err.Error())}
-			}
-			common.SetTargets(app.AppName, "reverted", "statefulset", chaosDetails)
-		}
-		return nil
-	})
-	if retryErr != nil {
-		return retryErr
-	}
-	log.Info("[Info]: Application pod started rolling back")
-
-	return retry.
-		Times(uint(experimentsDetails.Timeout / experimentsDetails.Delay)).
-		Wait(time.Duration(experimentsDetails.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			for _, app := range appsUnderTest {
-				applicationDeploy, err := appsv1StatefulsetClient.Get(context.Background(), app.AppName, metav1.GetOptions{})
-				if err != nil {
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: err.Error()}
-				}
-				if int(applicationDeploy.Status.ReadyReplicas) != app.ReplicaCount {
-					log.Infof("Application ready replica count is: %v", applicationDeploy.Status.ReadyReplicas)
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{kind: statefulset, namespace: %s, name: %s}", experimentsDetails.AppNS, app.AppName), Reason: fmt.Sprintf("failed to rollback statefulset scaling, the desired replica count is: %v and ready replica count is: %v", experimentsDetails.Replicas, applicationDeploy.Status.ReadyReplicas)}
-				}
-			}
-			log.Info("[RollBack]: Application roll back to initial number of replicas")
-			return nil
-		})
-}
-
-func int32Ptr(i int32) *int32 { return &i }
-
-//abortPodAutoScalerChaos go routine will continuously watch for the abort signal for the entire chaos duration and generate the required events and result
-func abortPodAutoScalerChaos(appsUnderTest []experimentTypes.ApplicationUnderTest, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) {
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM, syscall.SIGKILL)
-
-	// waiting till the abort signal received
-	<-signChan
-
-	log.Info("[Chaos]: Revert Started")
-	// Note that we are attempting recovery (in this case scaling down to original replica count) after ..
-	// .. the tasks to patch results & generate events. This is so because the func AutoscalerRecovery..
-	// ..takes more time to complete - it involves a status check post the downscale. We have a period of ..
-	// .. few seconds before the pod deletion/removal occurs from the time the TERM is caught and thereby..
-	// ..run the risk of not updating the status of the objects/create events. With the current approach..
-	// ..tests indicate we succeed with the downscale/patch call, even if the status checks take longer
-	// As such, this is a workaround, and other solutions such as usage of pre-stop hooks etc., need to be explored
-	// Other experiments have simpler "recoveries" that are more or less guaranteed to work.
-	switch strings.ToLower(experimentsDetails.AppKind) {
-	case "deployment", "deployments":
-		if err := autoscalerRecoveryInDeployment(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil {
-			log.Errorf("the recovery after abortion failed err: %v", err)
-		}
-
-	case "statefulset", "statefulsets":
-		if err := autoscalerRecoveryInStatefulset(experimentsDetails, clients, appsUnderTest, chaosDetails); err != nil {
-			log.Errorf("the recovery after abortion failed err: %v", err)
-		}
-
-	default:
-		log.Errorf("application type '%s' is not supported for the chaos", experimentsDetails.AppKind)
-	}
-	log.Info("[Chaos]: Revert Completed")
-
-	os.Exit(1)
-}
diff --git a/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go b/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go
deleted file mode 100644
index 0e4c86c..0000000
--- a/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go
+++ /dev/null
@@ -1,319 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-cpu-hog-exec/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec"
-	"github.com/sirupsen/logrus"
-	corev1 "k8s.io/api/core/v1"
-)
-
-var inject chan os.Signal
-
-//PrepareCPUExecStress contains the chaos preparation and injection steps
-func PrepareCPUExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	//Starting the CPU stress experiment
-	if err := experimentCPU(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-		return stacktrace.Propagate(err, "could not stress cpu")
-	}
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// stressCPU Uses the REST API to exec into the target container of the target pod
-// The function will be constantly increasing the CPU utilisation until it reaches the maximum available or allowed number.
-// Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last
-func stressCPU(experimentsDetails *experimentTypes.ExperimentDetails, podName, ns string, clients clients.ClientSets, stressErr chan error) {
-	// It will contains all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-	command := []string{"/bin/sh", "-c", experimentsDetails.ChaosInjectCmd}
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, experimentsDetails.TargetContainer, ns)
-	_, _, err := litmusexec.Exec(&execCommandDetails, clients, command)
-	stressErr <- err
-}
-
-//experimentCPU function orchestrates the experiment by calling the StressCPU function for every core, of every container, of every pod that is targeted
-func experimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-
-	targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	podNames := []string{}
-	for _, pod := range targetPodList.Items {
-		podNames = append(podNames, pod.Name)
-	}
-	log.Infof("Target pods list for chaos, %v", podNames)
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode stressed the cpu of all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		time.Sleep(10 * time.Second)
-		os.Exit(0)
-	default:
-		for _, pod := range targetPodList.Items {
-
-			// creating err channel to receive the error from the go routine
-			stressErr := make(chan error)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//Get the target container name of the application pod
-			if !experimentsDetails.IsTargetContainerProvided {
-				experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-			}
-
-			log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-				"Target Container": experimentsDetails.TargetContainer,
-				"Target Pod":       pod.Name,
-				"CPU CORE":         experimentsDetails.CPUcores,
-			})
-
-			for i := 0; i < experimentsDetails.CPUcores; i++ {
-				go stressCPU(experimentsDetails, pod.Name, pod.Namespace, clients, stressErr)
-			}
-
-			common.SetTargets(pod.Name, "injected", "pod", chaosDetails)
-
-			log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-		loop:
-			for {
-				endTime = time.After(timeDelay)
-				select {
-				case err := <-stressErr:
-					// skipping the execution, if received any error other than 137, while executing stress command and marked result as fail
-					// it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass
-					// oom kill occurs if memory to be stressed exceed than the resource limit for the target container
-					if err != nil {
-						if strings.Contains(err.Error(), "137") {
-							log.Warn("Chaos process OOM killed")
-							return nil
-						}
-						return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("podName: %s, namespace: %s, container: %s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: fmt.Sprintf("failed to stress cpu of target pod: %s", err.Error())}
-					}
-				case <-signChan:
-					log.Info("[Chaos]: Revert Started")
-					if err := killStressCPUSerial(experimentsDetails, pod.Name, pod.Namespace, clients, chaosDetails); err != nil {
-						log.Errorf("Error in Kill stress after abortion, err: %v", err)
-					}
-					// updating the chaosresult after stopped
-					err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: "experiment is aborted"}
-					failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase))
-					types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode)
-					if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil {
-						log.Errorf("failed to update chaos result %s", err.Error())
-					}
-					log.Info("[Chaos]: Revert Completed")
-					os.Exit(1)
-				case <-endTime:
-					log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-					endTime = nil
-					break loop
-				}
-			}
-			if err := killStressCPUSerial(experimentsDetails, pod.Name, pod.Namespace, clients, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not revert cpu stress")
-			}
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode stressed the cpu of all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	// creating err channel to receive the error from the go routine
-	stressErr := make(chan error)
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		time.Sleep(10 * time.Second)
-		os.Exit(0)
-	default:
-		for _, pod := range targetPodList.Items {
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-			//Get the target container name of the application pod
-			if !experimentsDetails.IsTargetContainerProvided {
-				experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-			}
-
-			log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-				"Target Container": experimentsDetails.TargetContainer,
-				"Target Pod":       pod.Name,
-				"CPU CORE":         experimentsDetails.CPUcores,
-			})
-			for i := 0; i < experimentsDetails.CPUcores; i++ {
-				go stressCPU(experimentsDetails, pod.Name, pod.Namespace, clients, stressErr)
-			}
-			common.SetTargets(pod.Name, "injected", "pod", chaosDetails)
-		}
-	}
-
-	log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-loop:
-	for {
-		endTime = time.After(timeDelay)
-		select {
-		case err := <-stressErr:
-			// skipping the execution, if received any error other than 137, while executing stress command and marked result as fail
-			// it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass
-			// oom kill occurs if memory to be stressed exceed than the resource limit for the target container
-			if err != nil {
-				if strings.Contains(err.Error(), "137") {
-					log.Warn("Chaos process OOM killed")
-					return nil
-				}
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to stress cpu of target pod: %s", err.Error())}
-			}
-		case <-signChan:
-			log.Info("[Chaos]: Revert Started")
-			if err := killStressCPUParallel(experimentsDetails, targetPodList, clients, chaosDetails); err != nil {
-				log.Errorf("Error in Kill stress after abortion, err: %v", err)
-			}
-			// updating the chaosresult after stopped
-			err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"}
-			failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase))
-			types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode)
-			if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil {
-				log.Errorf("failed to update chaos result %s", err.Error())
-			}
-			log.Info("[Chaos]: Revert Completed")
-			os.Exit(1)
-		case <-endTime:
-			log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-			endTime = nil
-			break loop
-		}
-	}
-	return killStressCPUParallel(experimentsDetails, targetPodList, clients, chaosDetails)
-}
-
-// killStressCPUSerial function to kill a stress process running inside target container
-//  Triggered by either timeout of chaos duration or termination of the experiment
-func killStressCPUSerial(experimentsDetails *experimentTypes.ExperimentDetails, podName, ns string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-	// It will contain all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-
-	command := []string{"/bin/sh", "-c", experimentsDetails.ChaosKillCmd}
-
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, experimentsDetails.TargetContainer, ns)
-	out, _, err := litmusexec.Exec(&execCommandDetails, clients, command)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, ns), Reason: fmt.Sprintf("failed to revert chaos: %s", out)}
-	}
-	common.SetTargets(podName, "reverted", "pod", chaosDetails)
-	return nil
-}
-
-// killStressCPUParallel function to kill all the stress process running inside target container
-// Triggered by either timeout of chaos duration or termination of the experiment
-func killStressCPUParallel(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-	var errList []string
-	for _, pod := range targetPodList.Items {
-		if err := killStressCPUSerial(experimentsDetails, pod.Name, pod.Namespace, clients, chaosDetails); err != nil {
-			errList = append(errList, err.Error())
-		}
-	}
-	if len(errList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/pod-delete/lib/pod-delete.go b/chaoslib/litmus/pod-delete/lib/pod-delete.go
deleted file mode 100644
index a513beb..0000000
--- a/chaoslib/litmus/pod-delete/lib/pod-delete.go
+++ /dev/null
@@ -1,260 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/workloads"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// PreparePodDelete contains the prepration steps before chaos injection
-func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//set up the tunables if provided in range
-	SetChaosTunables(experimentsDetails)
-
-	log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-		"PodsAffectedPerc": experimentsDetails.PodsAffectedPerc,
-		"Sequence":         experimentsDetails.Sequence,
-	})
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// injectChaosInSerialMode delete the target application pods serial mode(one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	GracePeriod := int64(0)
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-		// Get the target pod details for the chaos execution
-		// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-		if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-		}
-
-		targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get target pods")
-		}
-
-		// deriving the parent name of the target resources
-		for _, pod := range targetPodList.Items {
-			kind, parentName, err := workloads.GetPodOwnerTypeAndName(&pod, clients.DynamicClient)
-			if err != nil {
-				return stacktrace.Propagate(err, "could not get pod owner name and kind")
-			}
-			common.SetParentName(parentName, kind, pod.Namespace, chaosDetails)
-		}
-		for _, target := range chaosDetails.ParentsResources {
-			common.SetTargets(target.Name, "targeted", target.Kind, chaosDetails)
-		}
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		//Deleting the application pod
-		for _, pod := range targetPodList.Items {
-
-			log.InfoWithValues("[Info]: Killing the following pods", logrus.Fields{
-				"PodName": pod.Name})
-
-			if experimentsDetails.Force {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{GracePeriodSeconds: &GracePeriod})
-			} else {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{})
-			}
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())}
-			}
-
-			switch chaosDetails.Randomness {
-			case true:
-				if err := common.RandomInterval(experimentsDetails.ChaosInterval); err != nil {
-					return stacktrace.Propagate(err, "could not get random chaos interval")
-				}
-			default:
-				//Waiting for the chaos interval after chaos injection
-				if experimentsDetails.ChaosInterval != "" {
-					log.Infof("[Wait]: Wait for the chaos interval %vs", experimentsDetails.ChaosInterval)
-					waitTime, _ := strconv.Atoi(experimentsDetails.ChaosInterval)
-					common.WaitForDuration(waitTime)
-				}
-			}
-
-			//Verify the status of pod after the chaos injection
-			log.Info("[Status]: Verification for the recreation of application pod")
-			for _, parent := range chaosDetails.ParentsResources {
-				target := types.AppDetails{
-					Names:     []string{parent.Name},
-					Kind:      parent.Kind,
-					Namespace: parent.Namespace,
-				}
-				if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-					return stacktrace.Propagate(err, "could not check pod statuses by workload names")
-				}
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-
-	}
-	log.Infof("[Completion]: %v chaos is done", experimentsDetails.ExperimentName)
-
-	return nil
-
-}
-
-// injectChaosInParallelMode delete the target application pods in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	GracePeriod := int64(0)
-	//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-	ChaosStartTimeStamp := time.Now()
-	duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-	for duration < experimentsDetails.ChaosDuration {
-		// Get the target pod details for the chaos execution
-		// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-		if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or TARGET_PODS"}
-		}
-		targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get target pods")
-		}
-
-		// deriving the parent name of the target resources
-		for _, pod := range targetPodList.Items {
-			kind, parentName, err := workloads.GetPodOwnerTypeAndName(&pod, clients.DynamicClient)
-			if err != nil {
-				return stacktrace.Propagate(err, "could not get pod owner name and kind")
-			}
-			common.SetParentName(parentName, kind, pod.Namespace, chaosDetails)
-		}
-		for _, target := range chaosDetails.ParentsResources {
-			common.SetTargets(target.Name, "targeted", target.Kind, chaosDetails)
-		}
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-
-		//Deleting the application pod
-		for _, pod := range targetPodList.Items {
-
-			log.InfoWithValues("[Info]: Killing the following pods", logrus.Fields{
-				"PodName": pod.Name})
-
-			if experimentsDetails.Force {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{GracePeriodSeconds: &GracePeriod})
-			} else {
-				err = clients.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, v1.DeleteOptions{})
-			}
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to delete the target pod: %s", err.Error())}
-			}
-		}
-
-		switch chaosDetails.Randomness {
-		case true:
-			if err := common.RandomInterval(experimentsDetails.ChaosInterval); err != nil {
-				return stacktrace.Propagate(err, "could not get random chaos interval")
-			}
-		default:
-			//Waiting for the chaos interval after chaos injection
-			if experimentsDetails.ChaosInterval != "" {
-				log.Infof("[Wait]: Wait for the chaos interval %vs", experimentsDetails.ChaosInterval)
-				waitTime, _ := strconv.Atoi(experimentsDetails.ChaosInterval)
-				common.WaitForDuration(waitTime)
-			}
-		}
-
-		//Verify the status of pod after the chaos injection
-		log.Info("[Status]: Verification for the recreation of application pod")
-		for _, parent := range chaosDetails.ParentsResources {
-			target := types.AppDetails{
-				Names:     []string{parent.Name},
-				Kind:      parent.Kind,
-				Namespace: parent.Namespace,
-			}
-			if err = status.CheckUnTerminatedPodStatusesByWorkloadName(target, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				return stacktrace.Propagate(err, "could not check pod statuses by workload names")
-			}
-		}
-		duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-	}
-
-	log.Infof("[Completion]: %v chaos is done", experimentsDetails.ExperimentName)
-
-	return nil
-}
-
-// SetChaosTunables will setup a random value within a given range of values
-// If the value is not provided in range it'll setup the initial provided value.
-func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go b/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go
deleted file mode 100644
index c80a5a1..0000000
--- a/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go
+++ /dev/null
@@ -1,293 +0,0 @@
-package helper
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"os"
-	"os/exec"
-	"os/signal"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-var (
-	abort, injectAbort chan os.Signal
-	err                error
-)
-
-const (
-	// ProcessAlreadyKilled contains error code when process is already killed
-	ProcessAlreadyKilled = "no such process"
-)
-
-// Helper injects the dns chaos
-func Helper(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-	resultDetails := types.ResultDetails{}
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// injectAbort channel is used to transmit signal notifications.
-	injectAbort = make(chan os.Signal, 1)
-
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(injectAbort, os.Interrupt, syscall.SIGTERM)
-
-	//Fetching all the ENV passed for the helper pod
-	log.Info("[PreReq]: Getting the ENV variables")
-	getENV(&experimentsDetails)
-
-	// Initialise the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialise Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	if err := preparePodDNSChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil {
-		// update failstep inside chaosresult
-		if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil {
-			log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr)
-		}
-		log.Fatalf("helper pod failed, err: %v", err)
-	}
-
-}
-
-//preparePodDNSChaos contains the preparation steps before chaos injection
-func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error {
-
-	targetList, err := common.ParseTargets(chaosDetails.ChaosPodName)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not parse targets")
-	}
-
-	var targets []targetDetails
-
-	for _, t := range targetList.Target {
-		td := targetDetails{
-			Name:            t.Name,
-			Namespace:       t.Namespace,
-			TargetContainer: t.TargetContainer,
-			Source:          chaosDetails.ChaosPodName,
-		}
-
-		td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container id")
-		}
-
-		// extract out the pid of the target container
-		td.Pid, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container pid")
-		}
-		targets = append(targets, td)
-	}
-
-	// watching for the abort signal and revert the chaos if an abort signal is received
-	go abortWatcher(targets, resultDetails.Name, chaosDetails.ChaosNamespace)
-
-	select {
-	case <-injectAbort:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(1)
-	default:
-	}
-
-	done := make(chan error, 1)
-
-	for index, t := range targets {
-		targets[index].Cmd, err = injectChaos(experimentsDetails, t)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not inject chaos")
-		}
-		log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-		if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil {
-			if revertErr := terminateProcess(t); revertErr != nil {
-				return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())}
-			}
-			return stacktrace.Propagate(err, "could not annotate chaosresult")
-		}
-	}
-
-	// record the event inside chaosengine
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	log.Info("[Wait]: Waiting for chaos completion")
-	// channel to check the completion of the stress process
-	go func() {
-		var errList []string
-		for _, t := range targets {
-			if err := t.Cmd.Wait(); err != nil {
-				errList = append(errList, err.Error())
-			}
-		}
-		if len(errList) != 0 {
-			log.Errorf("err: %v", strings.Join(errList, ", "))
-			done <- fmt.Errorf("err: %v", strings.Join(errList, ", "))
-		}
-		done <- nil
-	}()
-
-	// check the timeout for the command
-	// Note: timeout will occur when process didn't complete even after 10s of chaos duration
-	timeout := time.After((time.Duration(experimentsDetails.ChaosDuration) + 30) * time.Second)
-
-	select {
-	case <-timeout:
-		// the stress process gets timeout before completion
-		log.Infof("[Chaos] The stress process is not yet completed after the chaos duration of %vs", experimentsDetails.ChaosDuration+30)
-		log.Info("[Timeout]: Killing the stress process")
-		var errList []string
-		for _, t := range targets {
-			if err = terminateProcess(t); err != nil {
-				errList = append(errList, err.Error())
-				continue
-			}
-			if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-				errList = append(errList, err.Error())
-			}
-		}
-		if len(errList) != 0 {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-		}
-	case doneErr := <-done:
-		select {
-		case <-injectAbort:
-			// wait for the completion of abort handler
-			time.Sleep(10 * time.Second)
-		default:
-			log.Info("[Info]: Reverting Chaos")
-			var errList []string
-			for _, t := range targets {
-				if err := terminateProcess(t); err != nil {
-					errList = append(errList, err.Error())
-					continue
-				}
-				if err := result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-					errList = append(errList, err.Error())
-				}
-			}
-			if len(errList) != 0 {
-				return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-			}
-			return doneErr
-		}
-	}
-
-	return nil
-}
-
-func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, t targetDetails) (*exec.Cmd, error) {
-
-	// prepare dns interceptor
-	var out bytes.Buffer
-	commandTemplate := fmt.Sprintf("sudo TARGET_PID=%d CHAOS_TYPE=%s SPOOF_MAP='%s' TARGET_HOSTNAMES='%s' CHAOS_DURATION=%d MATCH_SCHEME=%s nsutil -p -n -t %d -- dns_interceptor", t.Pid, experimentsDetails.ChaosType, experimentsDetails.SpoofMap, experimentsDetails.TargetHostNames, experimentsDetails.ChaosDuration, experimentsDetails.MatchScheme, t.Pid)
-	cmd := exec.Command("/bin/bash", "-c", commandTemplate)
-	log.Info(cmd.String())
-	cmd.Stdout = &out
-	cmd.Stderr = &out
-
-	if err = cmd.Start(); err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: experimentsDetails.ChaosPodName, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("faild to inject chaos: %s", out.String())}
-	}
-	return cmd, nil
-}
-
-func terminateProcess(t targetDetails) error {
-	// kill command
-	killTemplate := fmt.Sprintf("sudo kill %d", t.Cmd.Process.Pid)
-	kill := exec.Command("/bin/bash", "-c", killTemplate)
-	var out bytes.Buffer
-	kill.Stderr = &out
-	kill.Stdout = &out
-	if err = kill.Run(); err != nil {
-		if strings.Contains(strings.ToLower(out.String()), ProcessAlreadyKilled) {
-			return nil
-		}
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s}", t.Name, t.Namespace), Reason: fmt.Sprintf("failed to revert chaos %s", out.String())}
-	} else {
-		log.Errorf("dns interceptor process stopped")
-		log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-	}
-	return nil
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(targets []targetDetails, resultName, chaosNS string) {
-
-	<-abort
-
-	log.Info("[Chaos]: Killing process started because of terminated signal received")
-	log.Info("[Abort]: Chaos Revert Started")
-	// retry thrice for the chaos revert
-	retry := 3
-	for retry > 0 {
-		for _, t := range targets {
-			if err = terminateProcess(t); err != nil {
-				log.Errorf("unable to revert for %v pod, err :%v", t.Name, err)
-				continue
-			}
-			if err = result.AnnotateChaosResult(resultName, chaosNS, "reverted", "pod", t.Name); err != nil {
-				log.Errorf("unable to annotate the chaosresult for %v pod, err :%v", t.Name, err)
-			}
-		}
-		retry--
-		time.Sleep(1 * time.Second)
-	}
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
-
-//getENV fetches all the env variables from the runner pod
-func getENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60"))
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.ContainerRuntime = types.Getenv("CONTAINER_RUNTIME", "")
-	experimentDetails.TargetHostNames = types.Getenv("TARGET_HOSTNAMES", "")
-	experimentDetails.SpoofMap = types.Getenv("SPOOF_MAP", "")
-	experimentDetails.MatchScheme = types.Getenv("MATCH_SCHEME", "exact")
-	experimentDetails.ChaosType = types.Getenv("CHAOS_TYPE", "error")
-	experimentDetails.SocketPath = types.Getenv("SOCKET_PATH", "")
-}
-
-type targetDetails struct {
-	Name            string
-	Namespace       string
-	TargetContainer string
-	ContainerId     string
-	Pid             int
-	CommandPid      int
-	Cmd             *exec.Cmd
-	Source          string
-}
diff --git a/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go b/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go
deleted file mode 100644
index 3910a48..0000000
--- a/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go
+++ /dev/null
@@ -1,285 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-//PrepareAndInjectChaos contains the preparation & injection steps
-func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-	targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	podNames := []string{}
-	for _, pod := range targetPodList.Items {
-		podNames = append(podNames, pod.Name)
-	}
-	log.Infof("Target pods list for chaos, %v", podNames)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// Getting the serviceAccountName, need permission inside helper pod to create the events
-	if experimentsDetails.ChaosServiceAccount == "" {
-		experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not  experiment service account")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode inject the DNS Chaos in all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// creating the helper pod to perform DNS Chaos
-	for _, pod := range targetPodList.Items {
-
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{
-			"PodName":       pod.Name,
-			"NodeName":      pod.Spec.NodeName,
-			"ContainerName": experimentsDetails.TargetContainer,
-		})
-		runID := stringutils.GetRunID()
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-		//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-		log.Info("[Status]: Checking the status of the helper pods")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return errors.Errorf("helper pods are not in running state, err: %v", err)
-		}
-
-		// Wait till the completion of the helper pod
-		// set an upper limit for the waiting time
-		log.Info("[Wait]: waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-		}
-
-		//Deleting all the helper pod for pod-dns chaos
-		log.Info("[Cleanup]: Deleting the helper pod")
-		if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-
-	return nil
-}
-
-// injectChaosInParallelMode inject the DNS Chaos in all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	var err error
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	runID := stringutils.GetRunID()
-	targets := common.FilterPodsForNodes(targetPodList, experimentsDetails.TargetContainer)
-
-	for node, tar := range targets {
-		var targetsPerNode []string
-		for _, k := range tar.Target {
-			targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer))
-		}
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-	//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-	log.Info("[Status]: Checking the status of the helper pods")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	// Wait till the completion of the helper pod
-	// set an upper limit for the waiting time
-	log.Info("[Wait]: waiting till the completion of the helper pod")
-	containerNames := []string{experimentsDetails.ExperimentName}
-	if chaosDetails.SideCar != nil {
-		containerNames = append(containerNames, experimentsDetails.ExperimentName+"-sidecar")
-	}
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, containerNames...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-	}
-
-	//Deleting all the helper pod for pod-dns chaos
-	log.Info("[Cleanup]: Deleting all the helper pod")
-	if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error {
-
-	privilegedEnable := true
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			HostPID:                       true,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			ServiceAccountName:            experimentsDetails.ChaosServiceAccount,
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			NodeName:                      nodeName,
-			Volumes: []apiv1.Volume{
-				{
-					Name: "cri-socket",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: experimentsDetails.SocketPath,
-						},
-					},
-				},
-			},
-
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"./helpers -name dns-chaos",
-					},
-					Resources: chaosDetails.Resources,
-					Env:       getPodEnv(experimentsDetails, targets),
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "cri-socket",
-							MountPath: experimentsDetails.SocketPath,
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privilegedEnable,
-					},
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-// getPodEnv derive all the env required for the helper pod
-func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar {
-
-	var envDetails common.ENVDetails
-	envDetails.SetEnv("TARGETS", targets).
-		SetEnv("TOTAL_CHAOS_DURATION", strconv.Itoa(experimentsDetails.ChaosDuration)).
-		SetEnv("CHAOS_NAMESPACE", experimentsDetails.ChaosNamespace).
-		SetEnv("CHAOSENGINE", experimentsDetails.EngineName).
-		SetEnv("CHAOS_UID", string(experimentsDetails.ChaosUID)).
-		SetEnv("CONTAINER_RUNTIME", experimentsDetails.ContainerRuntime).
-		SetEnv("EXPERIMENT_NAME", experimentsDetails.ExperimentName).
-		SetEnv("SOCKET_PATH", experimentsDetails.SocketPath).
-		SetEnv("TARGET_HOSTNAMES", experimentsDetails.TargetHostNames).
-		SetEnv("SPOOF_MAP", experimentsDetails.SpoofMap).
-		SetEnv("MATCH_SCHEME", experimentsDetails.MatchScheme).
-		SetEnv("CHAOS_TYPE", experimentsDetails.ChaosType).
-		SetEnv("INSTANCE_ID", experimentsDetails.InstanceID).
-		SetEnvFromDownwardAPI("v1", "metadata.name")
-
-	return envDetails.ENV
-}
diff --git a/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go b/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go
deleted file mode 100644
index a289bd5..0000000
--- a/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go
+++ /dev/null
@@ -1,296 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-fio-stress/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec"
-	"github.com/sirupsen/logrus"
-	corev1 "k8s.io/api/core/v1"
-)
-
-//PrepareChaos contains the chaos preparation and injection steps
-func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	//Starting the Fio stress experiment
-	if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-		return stacktrace.Propagate(err, "could not inject chaos")
-	}
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// stressStorage uses the REST API to exec into the target container of the target pod
-// The function will be constantly increasing the storage utilisation until it reaches the maximum available or allowed number.
-// Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last
-func stressStorage(experimentDetails *experimentTypes.ExperimentDetails, podName, ns string, clients clients.ClientSets, stressErr chan error) {
-
-	log.Infof("The storage consumption is: %vM", experimentDetails.Size)
-
-	// It will contain all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-	fioCmd := fmt.Sprintf("fio --name=testchaos --ioengine=%v --iodepth=%v --rw=%v --bs=%v --size=%vM --numjobs=%v", experimentDetails.IOEngine, experimentDetails.IODepth, experimentDetails.ReadWrite, experimentDetails.BlockSize, experimentDetails.Size, experimentDetails.NumJobs)
-	if experimentDetails.GroupReporting {
-		fioCmd += " --group_reporting"
-	}
-	log.Infof("Running the command:\n%v", fioCmd)
-	command := []string{"/bin/sh", "-c", fioCmd}
-
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, experimentDetails.TargetContainer, ns)
-	_, _, err := litmusexec.Exec(&execCommandDetails, clients, command)
-
-	stressErr <- err
-}
-
-//experimentExecution function orchestrates the experiment by calling the StressStorage function, of every container, of every pod that is targeted
-func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-
-	targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	podNames := []string{}
-	for _, pod := range targetPodList.Items {
-		podNames = append(podNames, pod.Name)
-	}
-	log.Infof("Target pods list for chaos, %v", podNames)
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode stressed the storage of all target application in serial mode (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	// creating err channel to receive the error from the go routine
-	stressErr := make(chan error)
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	for _, pod := range targetPodList.Items {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-			"Target Container":      experimentsDetails.TargetContainer,
-			"Target Pod":            pod.Name,
-			"Space Consumption(MB)": experimentsDetails.Size,
-		})
-		go stressStorage(experimentsDetails, pod.Name, pod.Namespace, clients, stressErr)
-
-		log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-		// signChan channel is used to transmit signal notifications.
-		signChan := make(chan os.Signal, 1)
-		// Catch and relay certain signal(s) to signChan channel.
-		signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-
-	loop:
-		for {
-			endTime = time.After(timeDelay)
-			select {
-			case err := <-stressErr:
-				// skipping the execution, if received any error other than 137, while executing stress command and marked result as fail
-				// it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass
-				// oom kill occurs if resource to be stressed exceed than the resource limit for the target container
-				if err != nil {
-					if strings.Contains(err.Error(), "137") {
-						log.Warn("Chaos process OOM killed")
-						return nil
-					}
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("podName: %s, namespace: %s, container: %s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: fmt.Sprintf("failed to stress cpu of target pod: %s", err.Error())}
-				}
-			case <-signChan:
-				log.Info("[Chaos]: Revert Started")
-				if err := killStressSerial(experimentsDetails.TargetContainer, pod.Name, pod.Namespace, experimentsDetails.ChaosKillCmd, clients); err != nil {
-					log.Errorf("Error in Kill stress after abortion, err: %v", err)
-				}
-				err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: "experiment is aborted"}
-				failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase))
-				types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode)
-				if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil {
-					log.Errorf("failed to update chaos result %s", err.Error())
-				}
-				log.Info("[Chaos]: Revert Completed")
-				os.Exit(1)
-			case <-endTime:
-				log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-				endTime = nil
-				break loop
-			}
-		}
-		if err := killStressSerial(experimentsDetails.TargetContainer, pod.Name, pod.Namespace, experimentsDetails.ChaosKillCmd, clients); err != nil {
-			return stacktrace.Propagate(err, "could not revert chaos")
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode stressed the storage of all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	// creating err channel to receive the error from the go routine
-	stressErr := make(chan error)
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	for _, pod := range targetPodList.Items {
-
-		if experimentsDetails.EngineName != "" {
-			msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-			types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-			events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-		}
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-			"Target Container":        experimentsDetails.TargetContainer,
-			"Target Pod":              pod.Name,
-			"Storage Consumption(MB)": experimentsDetails.Size,
-		})
-		go stressStorage(experimentsDetails, pod.Name, pod.Namespace, clients, stressErr)
-	}
-
-	log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-loop:
-	for {
-		endTime = time.After(timeDelay)
-		select {
-		case err := <-stressErr:
-			// skipping the execution, if received any error other than 137, while executing stress command and marked result as fail
-			// it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass
-			// oom kill occurs if resource to be stressed exceed than the resource limit for the target container
-			if err != nil {
-				if strings.Contains(err.Error(), "137") {
-					log.Warn("Chaos process OOM killed")
-					return nil
-				}
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to injcet chaos: %s", err.Error())}
-			}
-		case <-signChan:
-			log.Info("[Chaos]: Revert Started")
-			if err := killStressParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients); err != nil {
-				log.Errorf("Error in Kill stress after abortion, err: %v", err)
-			}
-			err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"}
-			failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase))
-			types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode)
-			if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil {
-				log.Errorf("failed to update chaos result %s", err.Error())
-			}
-			log.Info("[Chaos]: Revert Completed")
-			os.Exit(1)
-		case <-endTime:
-			log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-			break loop
-		}
-	}
-	if err := killStressParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients); err != nil {
-		return stacktrace.Propagate(err, "could revert chaos")
-	}
-
-	return nil
-}
-
-// killStressSerial function to kill a stress process running inside target container
-//  Triggered by either timeout of chaos duration or termination of the experiment
-func killStressSerial(containerName, podName, namespace, KillCmd string, clients clients.ClientSets) error {
-	// It will contain all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-
-	command := []string{"/bin/sh", "-c", KillCmd}
-
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, containerName, namespace)
-	out, _, err := litmusexec.Exec(&execCommandDetails, clients, command)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, namespace), Reason: fmt.Sprintf("failed to revert chaos: %s", out)}
-	}
-	return nil
-}
-
-// killStressParallel function to kill all the stress process running inside target container
-// Triggered by either timeout of chaos duration or termination of the experiment
-func killStressParallel(containerName string, targetPodList corev1.PodList, KillCmd string, clients clients.ClientSets) error {
-	var errList []string
-	for _, pod := range targetPodList.Items {
-		if err := killStressSerial(containerName, pod.Name, pod.Namespace, KillCmd, clients); err != nil {
-			errList = append(errList, err.Error())
-		}
-	}
-	if len(errList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go b/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go
deleted file mode 100644
index 18aec9a..0000000
--- a/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go
+++ /dev/null
@@ -1,323 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-memory-hog-exec/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec"
-	"github.com/sirupsen/logrus"
-	corev1 "k8s.io/api/core/v1"
-)
-
-var inject chan os.Signal
-
-//PrepareMemoryExecStress contains the chaos preparation and injection steps
-func PrepareMemoryExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	//Starting the Memory stress experiment
-	if err := experimentMemory(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-		return stacktrace.Propagate(err, "could not stress memory")
-	}
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// stressMemory Uses the REST API to exec into the target container of the target pod
-// The function will be constantly increasing the Memory utilisation until it reaches the maximum available or allowed number.
-// Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last
-func stressMemory(MemoryConsumption, containerName, podName, namespace string, clients clients.ClientSets, stressErr chan error) {
-
-	log.Infof("The memory consumption is: %v", MemoryConsumption)
-
-	// It will contain all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-
-	ddCmd := fmt.Sprintf("dd if=/dev/zero of=/dev/null bs=" + MemoryConsumption + "M")
-	command := []string{"/bin/sh", "-c", ddCmd}
-
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, containerName, namespace)
-	_, _, err := litmusexec.Exec(&execCommandDetails, clients, command)
-
-	stressErr <- err
-}
-
-//experimentMemory function orchestrates the experiment by calling the StressMemory function, of every container, of every pod that is targeted
-func experimentMemory(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-
-	targetPodList, err := common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	podNames := []string{}
-	for _, pod := range targetPodList.Items {
-		podNames = append(podNames, pod.Name)
-	}
-	log.Infof("Target pods list for chaos, %v", podNames)
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode stressed the memory of all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		time.Sleep(10 * time.Second)
-		os.Exit(0)
-	default:
-		for _, pod := range targetPodList.Items {
-
-			// creating err channel to receive the error from the go routine
-			stressErr := make(chan error)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//Get the target container name of the application pod
-			if !experimentsDetails.IsTargetContainerProvided {
-				experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-			}
-
-			log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-				"Target Container":       experimentsDetails.TargetContainer,
-				"Target Pod":             pod.Name,
-				"Memory Consumption(MB)": experimentsDetails.MemoryConsumption,
-			})
-			go stressMemory(strconv.Itoa(experimentsDetails.MemoryConsumption), experimentsDetails.TargetContainer, pod.Name, pod.Namespace, clients, stressErr)
-
-			common.SetTargets(pod.Name, "injected", "pod", chaosDetails)
-
-			log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-		loop:
-			for {
-				endTime = time.After(timeDelay)
-				select {
-				case err := <-stressErr:
-					// skipping the execution, if received any error other than 137, while executing stress command and marked result as fail
-					// it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass
-					// oom kill occurs if memory to be stressed exceed than the resource limit for the target container
-					if err != nil {
-						if strings.Contains(err.Error(), "137") {
-							log.Warn("Chaos process OOM killed")
-							return nil
-						}
-						return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("podName: %s, namespace: %s, container: %s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: fmt.Sprintf("failed to stress memory of target pod: %s", err.Error())}
-					}
-				case <-signChan:
-					log.Info("[Chaos]: Revert Started")
-					if err := killStressMemorySerial(experimentsDetails.TargetContainer, pod.Name, pod.Namespace, experimentsDetails.ChaosKillCmd, clients, chaosDetails); err != nil {
-						log.Errorf("Error in Kill stress after abortion, err: %v", err)
-					}
-					// updating the chaosresult after stopped
-					err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), Reason: "experiment is aborted"}
-					failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase))
-					types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode)
-					if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil {
-						log.Errorf("failed to update chaos result %s", err.Error())
-					}
-					log.Info("[Chaos]: Revert Completed")
-					os.Exit(1)
-				case <-endTime:
-					log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-					endTime = nil
-					break loop
-				}
-			}
-			if err := killStressMemorySerial(experimentsDetails.TargetContainer, pod.Name, pod.Namespace, experimentsDetails.ChaosKillCmd, clients, chaosDetails); err != nil {
-				return stacktrace.Propagate(err, "could not revert memory stress")
-			}
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode stressed the memory of all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	// creating err channel to receive the error from the go routine
-	stressErr := make(chan error)
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		time.Sleep(10 * time.Second)
-		os.Exit(0)
-	default:
-		for _, pod := range targetPodList.Items {
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			//Get the target container name of the application pod
-			//It checks the empty target container for the first iteration only
-			if !experimentsDetails.IsTargetContainerProvided {
-				experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-			}
-
-			log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-				"Target Container":       experimentsDetails.TargetContainer,
-				"Target Pod":             pod.Name,
-				"Memory Consumption(MB)": experimentsDetails.MemoryConsumption,
-			})
-
-			go stressMemory(strconv.Itoa(experimentsDetails.MemoryConsumption), experimentsDetails.TargetContainer, pod.Name, pod.Namespace, clients, stressErr)
-		}
-	}
-
-	log.Infof("[Chaos]:Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-loop:
-	for {
-		endTime = time.After(timeDelay)
-		select {
-		case err := <-stressErr:
-			// skipping the execution, if received any error other than 137, while executing stress command and marked result as fail
-			// it will ignore the error code 137(oom kill), it will skip further execution and marked the result as pass
-			// oom kill occurs if memory to be stressed exceed than the resource limit for the target container
-			if err != nil {
-				if strings.Contains(err.Error(), "137") {
-					log.Warn("Chaos process OOM killed")
-					return nil
-				}
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to stress memory of target pod: %s", err.Error())}
-			}
-		case <-signChan:
-			log.Info("[Chaos]: Revert Started")
-			if err := killStressMemoryParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients, chaosDetails); err != nil {
-				log.Errorf("Error in Kill stress after abortion, err: %v", err)
-			}
-			// updating the chaosresult after stopped
-			err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"}
-			failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase))
-			types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode)
-			if err := result.ChaosResult(chaosDetails, clients, resultDetails, "EOT"); err != nil {
-				log.Errorf("failed to update chaos result %s", err.Error())
-			}
-			log.Info("[Chaos]: Revert Completed")
-			os.Exit(1)
-		case <-endTime:
-			log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-			break loop
-		}
-	}
-	return killStressMemoryParallel(experimentsDetails.TargetContainer, targetPodList, experimentsDetails.ChaosKillCmd, clients, chaosDetails)
-}
-
-// killStressMemorySerial function to kill a stress process running inside target container
-//  Triggered by either timeout of chaos duration or termination of the experiment
-func killStressMemorySerial(containerName, podName, namespace, memFreeCmd string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-	// It will contains all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-
-	command := []string{"/bin/sh", "-c", memFreeCmd}
-
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, containerName, namespace)
-	out, _, err := litmusexec.Exec(&execCommandDetails, clients, command)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", podName, namespace), Reason: fmt.Sprintf("failed to revert chaos: %s", out)}
-	}
-	common.SetTargets(podName, "reverted", "pod", chaosDetails)
-	return nil
-}
-
-// killStressMemoryParallel function to kill all the stress process running inside target container
-// Triggered by either timeout of chaos duration or termination of the experiment
-func killStressMemoryParallel(containerName string, targetPodList corev1.PodList, memFreeCmd string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-	var errList []string
-	for _, pod := range targetPodList.Items {
-		if err := killStressMemorySerial(containerName, pod.Name, pod.Namespace, memFreeCmd, clients, chaosDetails); err != nil {
-			errList = append(errList, err.Error())
-		}
-	}
-	if len(errList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/pod-network-partition/lib/network-policy.go b/chaoslib/litmus/pod-network-partition/lib/network-policy.go
deleted file mode 100644
index 786e321..0000000
--- a/chaoslib/litmus/pod-network-partition/lib/network-policy.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/palantir/stacktrace"
-	"strings"
-
-	network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/types"
-	"gopkg.in/yaml.v2"
-	corev1 "k8s.io/api/core/v1"
-	networkv1 "k8s.io/api/networking/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/util/intstr"
-)
-
-const (
-	// AllIPs cidr contains all ips
-	AllIPs string = "0.0.0.0/0"
-)
-
-// NetworkPolicy contains details about the network-policy
-type NetworkPolicy struct {
-	TargetPodLabels   map[string]string
-	PolicyType        []networkv1.PolicyType
-	Egress            []networkv1.NetworkPolicyEgressRule
-	Ingress           []networkv1.NetworkPolicyIngressRule
-	ExceptIPs         []string
-	NamespaceSelector map[string]string
-	PodSelector       map[string]string
-	Ports             []networkv1.NetworkPolicyPort
-}
-
-// Port contains the port details
-type Port struct {
-	TCP  []int32 `json:"tcp"`
-	UDP  []int32 `json:"udp"`
-	SCTP []int32 `json:"sctp"`
-}
-
-// initialize creates an instance of network policy struct
-func initialize() *NetworkPolicy {
-	return &NetworkPolicy{}
-}
-
-// getNetworkPolicyDetails collects all the data required for network policy
-func (np *NetworkPolicy) getNetworkPolicyDetails(experimentsDetails *experimentTypes.ExperimentDetails) error {
-	np.setLabels(experimentsDetails.AppLabel).
-		setPolicy(experimentsDetails.PolicyTypes).
-		setPodSelector(experimentsDetails.PodSelector).
-		setNamespaceSelector(experimentsDetails.NamespaceSelector)
-
-	// sets the ports for the traffic control
-	if err := np.setPort(experimentsDetails.PORTS); err != nil {
-		return stacktrace.Propagate(err, "could not set port")
-	}
-
-	// sets the destination ips for which the traffic should be blocked
-	if err := np.setExceptIPs(experimentsDetails); err != nil {
-		return stacktrace.Propagate(err, "could not set ips")
-	}
-
-	// sets the egress traffic rules
-	if strings.ToLower(experimentsDetails.PolicyTypes) == "egress" || strings.ToLower(experimentsDetails.PolicyTypes) == "all" {
-		np.setEgressRules()
-	}
-
-	// sets the ingress traffic rules
-	if strings.ToLower(experimentsDetails.PolicyTypes) == "ingress" || strings.ToLower(experimentsDetails.PolicyTypes) == "all" {
-		np.setIngressRules()
-	}
-
-	return nil
-}
-
-// setLabels sets the target application label
-func (np *NetworkPolicy) setLabels(appLabel string) *NetworkPolicy {
-	key, value := getKeyValue(appLabel)
-	if key != "" || value != "" {
-		np.TargetPodLabels = map[string]string{
-			key: value,
-		}
-	}
-	return np
-}
-
-// getKeyValue returns the key & value from the label
-func getKeyValue(label string) (string, string) {
-	labels := strings.Split(label, "=")
-	switch {
-	case len(labels) == 2:
-		return labels[0], labels[1]
-	default:
-		return labels[0], ""
-	}
-}
-
-// setPolicy sets the network policy types
-func (np *NetworkPolicy) setPolicy(policy string) *NetworkPolicy {
-	switch strings.ToLower(policy) {
-	case "ingress":
-		np.PolicyType = []networkv1.PolicyType{networkv1.PolicyTypeIngress}
-	case "egress":
-		np.PolicyType = []networkv1.PolicyType{networkv1.PolicyTypeEgress}
-	default:
-		np.PolicyType = []networkv1.PolicyType{networkv1.PolicyTypeEgress, networkv1.PolicyTypeIngress}
-	}
-	return np
-}
-
-// setPodSelector sets the pod labels selector
-func (np *NetworkPolicy) setPodSelector(podLabel string) *NetworkPolicy {
-	podSelector := map[string]string{}
-	labels := strings.Split(podLabel, ",")
-	for i := range labels {
-		key, value := getKeyValue(labels[i])
-		if key != "" || value != "" {
-			podSelector[key] = value
-		}
-	}
-	np.PodSelector = podSelector
-	return np
-}
-
-// setNamespaceSelector sets the namespace labels selector
-func (np *NetworkPolicy) setNamespaceSelector(nsLabel string) *NetworkPolicy {
-	nsSelector := map[string]string{}
-	labels := strings.Split(nsLabel, ",")
-	for i := range labels {
-		key, value := getKeyValue(labels[i])
-		if key != "" || value != "" {
-			nsSelector[key] = value
-		}
-	}
-	np.NamespaceSelector = nsSelector
-	return np
-}
-
-// setPort sets all the protocols and ports
-func (np *NetworkPolicy) setPort(p string) error {
-	var ports []networkv1.NetworkPolicyPort
-	var port Port
-	// unmarshal the protocols and ports from the env
-	if err := yaml.Unmarshal([]byte(strings.TrimSpace(parseCommand(p))), &port); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to unmarshal ports: %s", err.Error())}
-	}
-
-	// sets all the tcp ports
-	for _, p := range port.TCP {
-		ports = append(ports, getPort(p, corev1.ProtocolTCP))
-	}
-
-	// sets all the udp ports
-	for _, p := range port.UDP {
-		ports = append(ports, getPort(p, corev1.ProtocolUDP))
-	}
-
-	// sets all the sctp ports
-	for _, p := range port.SCTP {
-		ports = append(ports, getPort(p, corev1.ProtocolSCTP))
-	}
-
-	np.Ports = ports
-	return nil
-}
-
-// getPort return the port details
-func getPort(port int32, protocol corev1.Protocol) networkv1.NetworkPolicyPort {
-	networkPorts := networkv1.NetworkPolicyPort{
-		Protocol: &protocol,
-		Port: &intstr.IntOrString{
-			Type:   intstr.Int,
-			IntVal: port,
-		},
-	}
-	return networkPorts
-}
-
-// setExceptIPs sets all the destination ips
-// for which traffic should be blocked
-func (np *NetworkPolicy) setExceptIPs(experimentsDetails *experimentTypes.ExperimentDetails) error {
-	// get all the target ips
-	destinationIPs, err := network_chaos.GetTargetIps(experimentsDetails.DestinationIPs, experimentsDetails.DestinationHosts, clients.ClientSets{}, false)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get destination ips")
-	}
-
-	ips := strings.Split(destinationIPs, ",")
-	var uniqueIps []string
-	// removing all the duplicates and ipv6 ips from the list, if any
-	for i := range ips {
-		isPresent := false
-		for j := range uniqueIps {
-			if ips[i] == uniqueIps[j] {
-				isPresent = true
-			}
-		}
-		if ips[i] != "" && !isPresent && !strings.Contains(ips[i], ":") {
-			uniqueIps = append(uniqueIps, ips[i]+"/32")
-		}
-	}
-	np.ExceptIPs = uniqueIps
-	return nil
-}
-
-// setIngressRules sets the ingress traffic rules
-func (np *NetworkPolicy) setIngressRules() *NetworkPolicy {
-
-	if len(np.getPeers()) != 0 || len(np.Ports) != 0 {
-		np.Ingress = []networkv1.NetworkPolicyIngressRule{
-			{
-				From:  np.getPeers(),
-				Ports: np.Ports,
-			},
-		}
-	}
-	return np
-}
-
-// setEgressRules sets the egress traffic rules
-func (np *NetworkPolicy) setEgressRules() *NetworkPolicy {
-
-	if len(np.getPeers()) != 0 || len(np.Ports) != 0 {
-		np.Egress = []networkv1.NetworkPolicyEgressRule{
-			{
-				To:    np.getPeers(),
-				Ports: np.Ports,
-			},
-		}
-	}
-	return np
-}
-
-// getPeers return the peer's ips, namespace selectors, and pod selectors
-func (np *NetworkPolicy) getPeers() []networkv1.NetworkPolicyPeer {
-	var peers []networkv1.NetworkPolicyPeer
-
-	// sets the namespace selectors
-	if np.NamespaceSelector != nil && len(np.NamespaceSelector) != 0 {
-		peers = append(peers, np.getNamespaceSelector())
-	}
-
-	// sets the pod selectors
-	if np.PodSelector != nil && len(np.PodSelector) != 0 {
-		peers = append(peers, np.getPodSelector())
-	}
-
-	// sets the ipblocks
-	if np.ExceptIPs != nil && len(np.ExceptIPs) != 0 {
-		peers = append(peers, np.getIPBlocks())
-	}
-
-	return peers
-}
-
-// getNamespaceSelector builds the namespace selector
-func (np *NetworkPolicy) getNamespaceSelector() networkv1.NetworkPolicyPeer {
-	nsSelector := networkv1.NetworkPolicyPeer{
-		NamespaceSelector: &v1.LabelSelector{
-			MatchLabels: np.NamespaceSelector,
-		},
-	}
-	return nsSelector
-}
-
-// getPodSelector builds the pod selectors
-func (np *NetworkPolicy) getPodSelector() networkv1.NetworkPolicyPeer {
-	podSelector := networkv1.NetworkPolicyPeer{
-		PodSelector: &v1.LabelSelector{
-			MatchLabels: np.PodSelector,
-		},
-	}
-	return podSelector
-}
-
-// getIPBlocks builds the ipblocks
-func (np *NetworkPolicy) getIPBlocks() networkv1.NetworkPolicyPeer {
-	ipBlocks := networkv1.NetworkPolicyPeer{
-		IPBlock: &networkv1.IPBlock{
-			CIDR:   AllIPs,
-			Except: np.ExceptIPs,
-		},
-	}
-	return ipBlocks
-}
-
-// parseCommand parse the protocols and ports
-func parseCommand(command string) string {
-	final := ""
-	c := strings.Split(command, ", ")
-	for i := range c {
-		final = final + strings.TrimSpace(c[i]) + "\n"
-	}
-	return final
-}
diff --git a/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go b/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go
deleted file mode 100644
index 7fb31b0..0000000
--- a/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go
+++ /dev/null
@@ -1,254 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/retry"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	corev1 "k8s.io/api/core/v1"
-	networkv1 "k8s.io/api/networking/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-var (
-	inject, abort chan os.Signal
-)
-
-//PrepareAndInjectChaos contains the prepration & injection steps
-func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	// validate the appLabels
-	if chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide the appLabel"}
-	}
-
-	// Get the target pod details for the chaos execution
-	targetPodList, err := common.GetPodList("", 100, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	podNames := []string{}
-	for _, pod := range targetPodList.Items {
-		podNames = append(podNames, pod.Name)
-	}
-	log.Infof("Target pods list for chaos, %v", podNames)
-
-	// generate a unique string
-	runID := stringutils.GetRunID()
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// collect all the data for the network policy
-	np := initialize()
-	if err := np.getNetworkPolicyDetails(experimentsDetails); err != nil {
-		return stacktrace.Propagate(err, "could not get network policy details")
-	}
-
-	//DISPLAY THE NETWORK POLICY DETAILS
-	log.InfoWithValues("The Network policy details are as follows", logrus.Fields{
-		"Target Label":      np.TargetPodLabels,
-		"Policy Type":       np.PolicyType,
-		"PodSelector":       np.PodSelector,
-		"NamespaceSelector": np.NamespaceSelector,
-		"Destination IPs":   np.ExceptIPs,
-		"Ports":             np.Ports,
-	})
-
-	// watching for the abort signal and revert the chaos
-	go abortWatcher(experimentsDetails, clients, chaosDetails, resultDetails, &targetPodList, runID)
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		// creating the network policy to block the traffic
-		if err := createNetworkPolicy(experimentsDetails, clients, np, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create network policy")
-		}
-		// updating chaos status to injected for the target pods
-		for _, pod := range targetPodList.Items {
-			common.SetTargets(pod.Name, "injected", "pod", chaosDetails)
-		}
-	}
-
-	// verify the presence of network policy inside cluster
-	if err := checkExistenceOfPolicy(experimentsDetails, clients, experimentsDetails.Timeout, experimentsDetails.Delay, runID); err != nil {
-		return stacktrace.Propagate(err, "could not check existence of network policy")
-	}
-
-	log.Infof("[Wait]: Wait for %v chaos duration", experimentsDetails.ChaosDuration)
-	common.WaitForDuration(experimentsDetails.ChaosDuration)
-
-	// deleting the network policy after chaos duration over
-	if err := deleteNetworkPolicy(experimentsDetails, clients, &targetPodList, chaosDetails, experimentsDetails.Timeout, experimentsDetails.Delay, runID); err != nil {
-		return stacktrace.Propagate(err, "could not delete network policy")
-	}
-
-	// updating chaos status to reverted for the target pods
-	for _, pod := range targetPodList.Items {
-		common.SetTargets(pod.Name, "reverted", "pod", chaosDetails)
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	return nil
-}
-
-// createNetworkPolicy creates the network policy in the application namespace
-// it blocks ingress/egress traffic for the targeted application for specific/all IPs
-func createNetworkPolicy(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, networkPolicy *NetworkPolicy, runID string) error {
-
-	np := &networkv1.NetworkPolicy{
-		ObjectMeta: v1.ObjectMeta{
-			Name:      experimentsDetails.ExperimentName + "-np-" + runID,
-			Namespace: experimentsDetails.AppNS,
-			Labels: map[string]string{
-				"name":                      experimentsDetails.ExperimentName + "-np-" + runID,
-				"chaosUID":                  string(experimentsDetails.ChaosUID),
-				"app.kubernetes.io/part-of": "litmus",
-			},
-		},
-		Spec: networkv1.NetworkPolicySpec{
-			PodSelector: v1.LabelSelector{
-				MatchLabels: networkPolicy.TargetPodLabels,
-			},
-			PolicyTypes: networkPolicy.PolicyType,
-			Egress:      networkPolicy.Egress,
-			Ingress:     networkPolicy.Ingress,
-		},
-	}
-
-	_, err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).Create(context.Background(), np, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to create network policy: %s", err.Error())}
-	}
-	return nil
-}
-
-// deleteNetworkPolicy deletes the network policy and wait until the network policy deleted completely
-func deleteNetworkPolicy(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, targetPodList *corev1.PodList, chaosDetails *types.ChaosDetails, timeout, delay int, runID string) error {
-	name := experimentsDetails.ExperimentName + "-np-" + runID
-	labels := "name=" + experimentsDetails.ExperimentName + "-np-" + runID
-	if err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).Delete(context.Background(), name, v1.DeleteOptions{}); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{name: %s, namespace: %s}", name, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to delete network policy: %s", err.Error())}
-	}
-
-	err := retry.
-		Times(uint(timeout / delay)).
-		Wait(time.Duration(delay) * time.Second).
-		Try(func(attempt uint) error {
-			npList, err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).List(context.Background(), v1.ListOptions{LabelSelector: labels})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to list network policies: %s", err.Error())}
-			} else if len(npList.Items) != 0 {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: "network policies are not deleted within timeout"}
-			}
-			return nil
-		})
-
-	if err != nil {
-		return err
-	}
-
-	for _, pod := range targetPodList.Items {
-		common.SetTargets(pod.Name, "reverted", "pod", chaosDetails)
-	}
-	return nil
-}
-
-// checkExistenceOfPolicy validate the presence of network policy inside the application namespace
-func checkExistenceOfPolicy(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, timeout, delay int, runID string) error {
-	labels := "name=" + experimentsDetails.ExperimentName + "-np-" + runID
-
-	return retry.
-		Times(uint(timeout / delay)).
-		Wait(time.Duration(delay) * time.Second).
-		Try(func(attempt uint) error {
-			npList, err := clients.KubeClient.NetworkingV1().NetworkPolicies(experimentsDetails.AppNS).List(context.Background(), v1.ListOptions{LabelSelector: labels})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: fmt.Sprintf("failed to list network policies: %s", err.Error())}
-			} else if len(npList.Items) == 0 {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{labels: %s, namespace: %s}", labels, experimentsDetails.AppNS), Reason: "no network policy found with matching labels"}
-			}
-			return nil
-		})
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, targetPodList *corev1.PodList, runID string) {
-	// waiting till the abort signal received
-	<-abort
-
-	log.Info("[Chaos]: Killing process started because of terminated signal received")
-	log.Info("Chaos Revert Started")
-	// retry thrice for the chaos revert
-	retry := 3
-	for retry > 0 {
-		if err := checkExistenceOfPolicy(experimentsDetails, clients, 2, 1, runID); err != nil {
-			if error, ok := err.(cerrors.Error); ok {
-				if strings.Contains(error.Reason, "no network policy found with matching labels") {
-					break
-				}
-			}
-			log.Infof("no active network policy found, err: %v", err.Error())
-			retry--
-			continue
-		}
-
-		if err := deleteNetworkPolicy(experimentsDetails, clients, targetPodList, chaosDetails, 2, 1, runID); err != nil {
-			log.Errorf("unable to delete network policy, err: %v", err)
-		}
-		retry--
-	}
-	// updating the chaosresult after stopped
-	err := cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Reason: "experiment is aborted"}
-	failStep, errCode := cerrors.GetRootCauseAndErrorCode(err, string(chaosDetails.Phase))
-	types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, errCode)
-	result.ChaosResult(chaosDetails, clients, resultDetails, "EOT")
-	log.Info("Chaos Revert Completed")
-	os.Exit(0)
-}
diff --git a/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go b/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go
deleted file mode 100644
index 659179b..0000000
--- a/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"time"
-
-	redfishLib "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish-node-restart/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/palantir/stacktrace"
-)
-
-// injectChaos initiates node restart chaos on the target node
-func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-	URL := fmt.Sprintf("https://%v/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset", experimentsDetails.IPMIIP)
-	return redfishLib.RebootNode(URL, experimentsDetails.User, experimentsDetails.Password)
-}
-
-// experimentExecution function orchestrates the experiment by calling the injectChaos function
-func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + experimentsDetails.IPMIIP + " node"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	if err := injectChaos(experimentsDetails, clients); err != nil {
-		return stacktrace.Propagate(err, "chaos injection failed")
-	}
-
-	log.Infof("[Chaos]: Waiting for: %vs", experimentsDetails.ChaosDuration)
-	time.Sleep(time.Duration(experimentsDetails.ChaosDuration) * time.Second)
-	return nil
-}
-
-// PrepareChaos contains the chaos prepration and injection steps
-func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	//Starting the Redfish node restart experiment
-	if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-		return err
-	}
-	common.SetTargets(experimentsDetails.IPMIIP, "targeted", "node", chaosDetails)
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go b/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go
deleted file mode 100644
index cb5bbb4..0000000
--- a/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go
+++ /dev/null
@@ -1,393 +0,0 @@
-package lib
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"net/http"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	corev1 "k8s.io/api/core/v1"
-
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/spring-boot/spring-boot-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-var revertAssault = experimentTypes.ChaosMonkeyAssaultRevert{
-	LatencyActive:         false,
-	KillApplicationActive: false,
-	CPUActive:             false,
-	MemoryActive:          false,
-	ExceptionsActive:      false,
-}
-
-// SetTargetPodList selects the targeted pod and add them to the experimentDetails
-func SetTargetPodList(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error {
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	var err error
-
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "please provide one of the appLabel or TARGET_PODS"}
-	}
-	if experimentsDetails.TargetPodList, err = common.GetPodList(experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails); err != nil {
-		return err
-	}
-	return nil
-
-}
-
-// PrepareChaos contains the preparation steps before chaos injection
-func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	// Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	log.InfoWithValues("[Info]: Chaos monkeys watchers will be injected to the target pods as follows", logrus.Fields{
-		"WebClient":      experimentsDetails.ChaosMonkeyWatchers.WebClient,
-		"Service":        experimentsDetails.ChaosMonkeyWatchers.Service,
-		"Component":      experimentsDetails.ChaosMonkeyWatchers.Component,
-		"Repository":     experimentsDetails.ChaosMonkeyWatchers.Repository,
-		"Controller":     experimentsDetails.ChaosMonkeyWatchers.Controller,
-		"RestController": experimentsDetails.ChaosMonkeyWatchers.RestController,
-	})
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	// Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-	return nil
-}
-
-// CheckChaosMonkey verifies if chaos monkey for spring boot is available in the selected pods
-// All pods are checked, even if some errors occur. But in case of one pod in error, the check will be in error
-func CheckChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, targetPods corev1.PodList) (bool, error) {
-	hasErrors := false
-
-	targetPodNames := []string{}
-
-	for _, pod := range targetPods.Items {
-
-		targetPodNames = append(targetPodNames, pod.Name)
-
-		endpoint := "http://" + pod.Status.PodIP + ":" + chaosMonkeyPort + chaosMonkeyPath
-		log.Infof("[Check]: Checking pod: %v (endpoint: %v)", pod.Name, endpoint)
-
-		resp, err := http.Get(endpoint)
-		if err != nil {
-			log.Errorf("failed to request chaos monkey endpoint on pod %s, %s", pod.Name, err.Error())
-			hasErrors = true
-			continue
-		}
-
-		if resp.StatusCode != 200 {
-			log.Errorf("failed to get chaos monkey endpoint on pod %s (status: %d)", pod.Name, resp.StatusCode)
-			hasErrors = true
-		}
-	}
-
-	if hasErrors {
-		return false, cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{podNames: %s}", targetPodNames), Reason: "failed to check chaos monkey on at least one pod, check logs for details"}
-	}
-	return true, nil
-}
-
-// enableChaosMonkey enables chaos monkey on selected pods
-func enableChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, pod corev1.Pod) error {
-	log.Infof("[Chaos]: Enabling Chaos Monkey on pod: %v", pod.Name)
-	resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/enable", "", nil) //nolint:bodyclose
-	if err != nil {
-		return err
-	}
-
-	if resp.StatusCode != 200 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to enable chaos monkey endpoint (status: %d)", resp.StatusCode)}
-	}
-
-	return nil
-}
-
-func setChaosMonkeyWatchers(chaosMonkeyPort string, chaosMonkeyPath string, watchers experimentTypes.ChaosMonkeyWatchers, pod corev1.Pod) error {
-	log.Infof("[Chaos]: Setting Chaos Monkey watchers on pod: %v", pod.Name)
-
-	jsonValue, err := json.Marshal(watchers)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to marshal chaos monkey watchers, %s", err.Error())}
-	}
-
-	resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/watchers", "application/json", bytes.NewBuffer(jsonValue))
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to set watchers, %s", err.Error())}
-	}
-
-	if resp.StatusCode != 200 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to set assault (status: %d)", resp.StatusCode)}
-	}
-
-	return nil
-}
-
-func startAssault(chaosMonkeyPort string, chaosMonkeyPath string, assault []byte, pod corev1.Pod) error {
-	if err := setChaosMonkeyAssault(chaosMonkeyPort, chaosMonkeyPath, assault, pod); err != nil {
-		return err
-	}
-	log.Infof("[Chaos]: Activating Chaos Monkey assault on pod: %v", pod.Name)
-	resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/assaults/runtime/attack", "", nil)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to start assault %s", err.Error())}
-	}
-
-	if resp.StatusCode != 200 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to activate runtime attack (status: %d)", resp.StatusCode)}
-	}
-	return nil
-}
-
-func setChaosMonkeyAssault(chaosMonkeyPort string, chaosMonkeyPath string, assault []byte, pod corev1.Pod) error {
-	log.Infof("[Chaos]: Setting Chaos Monkey assault on pod: %v", pod.Name)
-
-	resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/assaults", "application/json", bytes.NewBuffer(assault))
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to set assault, %s", err.Error())}
-	}
-
-	if resp.StatusCode != 200 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to set assault (status: %d)", resp.StatusCode)}
-	}
-	return nil
-}
-
-// disableChaosMonkey disables chaos monkey on selected pods
-func disableChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, pod corev1.Pod) error {
-	log.Infof("[Chaos]: disabling assaults on pod %s", pod.Name)
-	jsonValue, err := json.Marshal(revertAssault)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to marshal chaos monkey revert-chaos watchers, %s", err.Error())}
-	}
-	if err := setChaosMonkeyAssault(chaosMonkeyPort, chaosMonkeyPath, jsonValue, pod); err != nil {
-		return err
-	}
-
-	log.Infof("[Chaos]: disabling chaos monkey on pod %s", pod.Name)
-	resp, err := http.Post("http://"+pod.Status.PodIP+":"+chaosMonkeyPort+chaosMonkeyPath+"/disable", "", nil)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to call the chaos monkey api to disable assault, %s", err.Error())}
-	}
-
-	if resp.StatusCode != 200 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Target: fmt.Sprintf("{podName: %s, namespace: %s}", pod.Name, pod.Namespace), Reason: fmt.Sprintf("failed to disable chaos monkey endpoint (status: %d)", resp.StatusCode)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode injects chaos monkey assault on pods in serial mode(one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	select {
-	case <-signChan:
-		// stopping the chaos execution, if abort signal received
-		time.Sleep(10 * time.Second)
-		os.Exit(0)
-	default:
-		for _, pod := range experimentsDetails.TargetPodList.Items {
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				_ = events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			log.InfoWithValues("[Chaos]: Injecting on target pod", logrus.Fields{
-				"Target Pod": pod.Name,
-			})
-
-			if err := setChaosMonkeyWatchers(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.ChaosMonkeyWatchers, pod); err != nil {
-				log.Errorf("[Chaos]: Failed to set watchers, err: %v ", err)
-				return err
-			}
-
-			if err := startAssault(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.ChaosMonkeyAssault, pod); err != nil {
-				log.Errorf("[Chaos]: Failed to set assault, err: %v ", err)
-				return err
-			}
-
-			if err := enableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil {
-				log.Errorf("[Chaos]: Failed to enable chaos, err: %v ", err)
-				return err
-			}
-			common.SetTargets(pod.Name, "injected", "pod", chaosDetails)
-
-			log.Infof("[Chaos]: Waiting for: %vs", experimentsDetails.ChaosDuration)
-
-			endTime = time.After(timeDelay)
-		loop:
-			for {
-				select {
-				case <-signChan:
-					log.Info("[Chaos]: Revert Started")
-					if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil {
-						log.Errorf("Error in disabling chaos monkey, err: %v", err)
-					} else {
-						common.SetTargets(pod.Name, "reverted", "pod", chaosDetails)
-					}
-					// updating the chaosresult after stopped
-					failStep := "Chaos injection stopped!"
-					types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, cerrors.ErrorTypeExperimentAborted)
-					result.ChaosResult(chaosDetails, clients, resultDetails, "EOT")
-					log.Info("[Chaos]: Revert Completed")
-					os.Exit(1)
-				case <-endTime:
-					log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-					endTime = nil
-					break loop
-				}
-			}
-
-			if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil {
-				return err
-			}
-
-			common.SetTargets(pod.Name, "reverted", "pod", chaosDetails)
-		}
-	}
-	return nil
-
-}
-
-// injectChaosInParallelMode injects chaos monkey assault on pods in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// signChan channel is used to transmit signal notifications.
-	signChan := make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to signChan channel.
-	signal.Notify(signChan, os.Interrupt, syscall.SIGTERM)
-
-	var endTime <-chan time.Time
-	timeDelay := time.Duration(experimentsDetails.ChaosDuration) * time.Second
-
-	select {
-	case <-signChan:
-		// stopping the chaos execution, if abort signal received
-		time.Sleep(10 * time.Second)
-		os.Exit(0)
-	default:
-		for _, pod := range experimentsDetails.TargetPodList.Items {
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on " + pod.Name + " pod"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				_ = events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			log.InfoWithValues("[Chaos]: The Target application details", logrus.Fields{
-				"Target Pod": pod.Name,
-			})
-
-			if err := setChaosMonkeyWatchers(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.ChaosMonkeyWatchers, pod); err != nil {
-				log.Errorf("[Chaos]: Failed to set watchers, err: %v", err)
-				return err
-			}
-
-			if err := startAssault(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.ChaosMonkeyAssault, pod); err != nil {
-				log.Errorf("[Chaos]: Failed to set assault, err: %v", err)
-				return err
-			}
-
-			if err := enableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil {
-				log.Errorf("[Chaos]: Failed to enable chaos, err: %v", err)
-				return err
-			}
-			common.SetTargets(pod.Name, "injected", "pod", chaosDetails)
-		}
-		log.Infof("[Chaos]: Waiting for: %vs", experimentsDetails.ChaosDuration)
-	}
-loop:
-	for {
-		endTime = time.After(timeDelay)
-		select {
-		case <-signChan:
-			log.Info("[Chaos]: Revert Started")
-			for _, pod := range experimentsDetails.TargetPodList.Items {
-				if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil {
-					log.Errorf("Error in disabling chaos monkey, err: %v", err)
-				} else {
-					common.SetTargets(pod.Name, "reverted", "pod", chaosDetails)
-				}
-			}
-			// updating the chaosresult after stopped
-			failStep := "Chaos injection stopped!"
-			types.SetResultAfterCompletion(resultDetails, "Stopped", "Stopped", failStep, cerrors.ErrorTypeExperimentAborted)
-			result.ChaosResult(chaosDetails, clients, resultDetails, "EOT")
-			log.Info("[Chaos]: Revert Completed")
-			os.Exit(1)
-		case <-endTime:
-			log.Infof("[Chaos]: Time is up for experiment: %v", experimentsDetails.ExperimentName)
-			endTime = nil
-			break loop
-		}
-	}
-
-	var errorList []string
-	for _, pod := range experimentsDetails.TargetPodList.Items {
-		if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil {
-			errorList = append(errorList, err.Error())
-			continue
-		}
-		common.SetTargets(pod.Name, "reverted", "pod", chaosDetails)
-	}
-
-	if len(errorList) != 0 {
-		return cerrors.PreserveError{ErrString: fmt.Sprintf("error in disabling chaos monkey, [%s]", strings.Join(errorList, ","))}
-	}
-	return nil
-}
diff --git a/chaoslib/litmus/stress-chaos/helper/stress-helper.go b/chaoslib/litmus/stress-chaos/helper/stress-helper.go
deleted file mode 100644
index 1846d01..0000000
--- a/chaoslib/litmus/stress-chaos/helper/stress-helper.go
+++ /dev/null
@@ -1,577 +0,0 @@
-package helper
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-	"io"
-	"os"
-	"os/exec"
-	"os/signal"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/containerd/cgroups"
-	cgroupsv2 "github.com/containerd/cgroups/v2"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-//list of cgroups in a container
-var (
-	cgroupSubsystemList = []string{"cpu", "memory", "systemd", "net_cls",
-		"net_prio", "freezer", "blkio", "perf_event", "devices", "cpuset",
-		"cpuacct", "pids", "hugetlb",
-	}
-)
-
-var (
-	err           error
-	inject, abort chan os.Signal
-)
-
-const (
-	// ProcessAlreadyFinished contains error code when process is finished
-	ProcessAlreadyFinished = "os: process already finished"
-	// ProcessAlreadyKilled contains error code when process is already killed
-	ProcessAlreadyKilled = "no such process"
-)
-
-// Helper injects the stress chaos
-func Helper(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-	resultDetails := types.ResultDetails{}
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Fetching all the ENV passed for the helper pod
-	log.Info("[PreReq]: Getting the ENV variables")
-	getENV(&experimentsDetails)
-
-	// Intialise the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	// Intialise Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	if err := prepareStressChaos(&experimentsDetails, clients, &eventsDetails, &chaosDetails, &resultDetails); err != nil {
-		// update failstep inside chaosresult
-		if resultErr := result.UpdateFailedStepFromHelper(&resultDetails, &chaosDetails, clients, err); resultErr != nil {
-			log.Fatalf("helper pod failed, err: %v, resultErr: %v", err, resultErr)
-		}
-		log.Fatalf("helper pod failed, err: %v", err)
-	}
-}
-
-//prepareStressChaos contains the chaos preparation and injection steps
-func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error {
-	// get stressors in list format
-	stressorList := prepareStressor(experimentsDetails)
-	if len(stressorList) == 0 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: chaosDetails.ChaosPodName, Reason: "fail to prepare stressors"}
-	}
-	stressors := strings.Join(stressorList, " ")
-
-	targetList, err := common.ParseTargets(chaosDetails.ChaosPodName)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not parse targets")
-	}
-
-	var targets []targetDetails
-
-	for _, t := range targetList.Target {
-		td := targetDetails{
-			Name:            t.Name,
-			Namespace:       t.Namespace,
-			TargetContainer: t.TargetContainer,
-			Source:          chaosDetails.ChaosPodName,
-		}
-
-		td.ContainerId, err = common.GetContainerID(td.Namespace, td.Name, td.TargetContainer, clients, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container id")
-		}
-
-		// extract out the pid of the target container
-		td.Pid, err = common.GetPID(experimentsDetails.ContainerRuntime, td.ContainerId, experimentsDetails.SocketPath, td.Source)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get container pid")
-		}
-
-		td.CGroupManager, err = getCGroupManager(td)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not get cgroup manager")
-		}
-		targets = append(targets, td)
-	}
-
-	// watching for the abort signal and revert the chaos if an abort signal is received
-	go abortWatcher(targets, resultDetails.Name, chaosDetails.ChaosNamespace)
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(1)
-	default:
-	}
-
-	done := make(chan error, 1)
-
-	for index, t := range targets {
-		targets[index].Cmd, err = injectChaos(t, stressors)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not inject chaos")
-		}
-		log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-		if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil {
-			if revertErr := terminateProcess(t); revertErr != nil {
-				return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())}
-			}
-			return stacktrace.Propagate(err, "could not annotate chaosresult")
-		}
-	}
-
-	// record the event inside chaosengine
-	if experimentsDetails.EngineName != "" {
-		msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on application pod"
-		types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-		events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-	}
-
-	log.Info("[Wait]: Waiting for chaos completion")
-	// channel to check the completion of the stress process
-	go func() {
-		var errList []string
-		var exitErr error
-		for _, t := range targets {
-			if err := t.Cmd.Wait(); err != nil {
-				if _, ok := err.(*exec.ExitError); ok {
-					exitErr = err
-					continue
-				}
-				errList = append(errList, err.Error())
-			}
-		}
-		if exitErr != nil {
-			done <- exitErr
-		} else if len(errList) != 0 {
-			done <- fmt.Errorf("err: %v", strings.Join(errList, ", "))
-		} else {
-			done <- nil
-		}
-	}()
-
-	// check the timeout for the command
-	// Note: timeout will occur when process didn't complete even after 10s of chaos duration
-	timeout := time.After((time.Duration(experimentsDetails.ChaosDuration) + 30) * time.Second)
-
-	select {
-	case <-timeout:
-		// the stress process gets timeout before completion
-		log.Infof("[Chaos] The stress process is not yet completed after the chaos duration of %vs", experimentsDetails.ChaosDuration+30)
-		log.Info("[Timeout]: Killing the stress process")
-		var errList []string
-		for _, t := range targets {
-			if err = terminateProcess(t); err != nil {
-				errList = append(errList, err.Error())
-				continue
-			}
-			if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-				errList = append(errList, err.Error())
-			}
-		}
-		if len(errList) != 0 {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-		}
-	case err := <-done:
-		if err != nil {
-			exitErr, ok := err.(*exec.ExitError)
-			if ok {
-				status := exitErr.Sys().(syscall.WaitStatus)
-				if status.Signaled() && status.Signal() == syscall.SIGKILL {
-					// wait for the completion of abort handler
-					time.Sleep(10 * time.Second)
-					return cerrors.Error{ErrorCode: cerrors.ErrorTypeExperimentAborted, Source: chaosDetails.ChaosPodName, Reason: fmt.Sprintf("process stopped with SIGTERM signal")}
-				}
-			}
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: chaosDetails.ChaosPodName, Reason: err.Error()}
-		}
-		log.Info("[Info]: Reverting Chaos")
-		var errList []string
-		for _, t := range targets {
-			if err := terminateProcess(t); err != nil {
-				errList = append(errList, err.Error())
-				continue
-			}
-			log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-			if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "reverted", "pod", t.Name); err != nil {
-				errList = append(errList, err.Error())
-			}
-		}
-		if len(errList) != 0 {
-			return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s]", strings.Join(errList, ","))}
-		}
-	}
-
-	return nil
-}
-
-//terminateProcess will remove the stress process from the target container after chaos completion
-func terminateProcess(t targetDetails) error {
-	if err := syscall.Kill(-t.Cmd.Process.Pid, syscall.SIGKILL); err != nil {
-		if strings.Contains(err.Error(), ProcessAlreadyKilled) || strings.Contains(err.Error(), ProcessAlreadyFinished) {
-			return nil
-		}
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to revert chaos: %s", err.Error())}
-	}
-	log.Infof("successfully reverted chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer)
-	return nil
-}
-
-//prepareStressor will set the required stressors for the given experiment
-func prepareStressor(experimentDetails *experimentTypes.ExperimentDetails) []string {
-
-	stressArgs := []string{
-		"stress-ng",
-		"--timeout",
-		strconv.Itoa(experimentDetails.ChaosDuration) + "s",
-	}
-
-	switch experimentDetails.StressType {
-	case "pod-cpu-stress":
-
-		log.InfoWithValues("[Info]: Details of Stressor:", logrus.Fields{
-			"CPU Core": experimentDetails.CPUcores,
-			"CPU Load": experimentDetails.CPULoad,
-			"Timeout":  experimentDetails.ChaosDuration,
-		})
-		stressArgs = append(stressArgs, "--cpu "+experimentDetails.CPUcores)
-		stressArgs = append(stressArgs, " --cpu-load "+experimentDetails.CPULoad)
-
-	case "pod-memory-stress":
-
-		log.InfoWithValues("[Info]: Details of Stressor:", logrus.Fields{
-			"Number of Workers":  experimentDetails.NumberOfWorkers,
-			"Memory Consumption": experimentDetails.MemoryConsumption,
-			"Timeout":            experimentDetails.ChaosDuration,
-		})
-		stressArgs = append(stressArgs, "--vm "+experimentDetails.NumberOfWorkers+" --vm-bytes "+experimentDetails.MemoryConsumption+"M")
-
-	case "pod-io-stress":
-		var hddbytes string
-		if experimentDetails.FilesystemUtilizationBytes == "0" {
-			if experimentDetails.FilesystemUtilizationPercentage == "0" {
-				hddbytes = "10%"
-				log.Info("Neither of FilesystemUtilizationPercentage or FilesystemUtilizationBytes provided, proceeding with a default FilesystemUtilizationPercentage value of 10%")
-			} else {
-				hddbytes = experimentDetails.FilesystemUtilizationPercentage + "%"
-			}
-		} else {
-			if experimentDetails.FilesystemUtilizationPercentage == "0" {
-				hddbytes = experimentDetails.FilesystemUtilizationBytes + "G"
-			} else {
-				hddbytes = experimentDetails.FilesystemUtilizationPercentage + "%"
-				log.Warn("Both FsUtilPercentage & FsUtilBytes provided as inputs, using the FsUtilPercentage value to proceed with stress exp")
-			}
-		}
-		log.InfoWithValues("[Info]: Details of Stressor:", logrus.Fields{
-			"io":                experimentDetails.NumberOfWorkers,
-			"hdd":               experimentDetails.NumberOfWorkers,
-			"hdd-bytes":         hddbytes,
-			"Timeout":           experimentDetails.ChaosDuration,
-			"Volume Mount Path": experimentDetails.VolumeMountPath,
-		})
-		if experimentDetails.VolumeMountPath == "" {
-			stressArgs = append(stressArgs, "--io "+experimentDetails.NumberOfWorkers+" --hdd "+experimentDetails.NumberOfWorkers+" --hdd-bytes "+hddbytes)
-		} else {
-			stressArgs = append(stressArgs, "--io "+experimentDetails.NumberOfWorkers+" --hdd "+experimentDetails.NumberOfWorkers+" --hdd-bytes "+hddbytes+" --temp-path "+experimentDetails.VolumeMountPath)
-		}
-		if experimentDetails.CPUcores != "0" {
-			stressArgs = append(stressArgs, "--cpu %v", experimentDetails.CPUcores)
-		}
-
-	default:
-		log.Fatalf("stressor for %v experiment is not suported", experimentDetails.ExperimentName)
-	}
-	return stressArgs
-}
-
-//pidPath will get the pid path of the container
-func pidPath(t targetDetails) cgroups.Path {
-	processPath := "/proc/" + strconv.Itoa(t.Pid) + "/cgroup"
-	paths, err := parseCgroupFile(processPath, t)
-	if err != nil {
-		return getErrorPath(errors.Wrapf(err, "parse cgroup file %s", processPath))
-	}
-	return getExistingPath(paths, t.Pid, "")
-}
-
-//parseCgroupFile will read and verify the cgroup file entry of a container
-func parseCgroupFile(path string, t targetDetails) (map[string]string, error) {
-	file, err := os.Open(path)
-	if err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to parse cgroup: %s", err.Error())}
-	}
-	defer file.Close()
-	return parseCgroupFromReader(file, t)
-}
-
-//parseCgroupFromReader will parse the cgroup file from the reader
-func parseCgroupFromReader(r io.Reader, t targetDetails) (map[string]string, error) {
-	var (
-		cgroups = make(map[string]string)
-		s       = bufio.NewScanner(r)
-	)
-	for s.Scan() {
-		var (
-			text  = s.Text()
-			parts = strings.SplitN(text, ":", 3)
-		)
-		if len(parts) < 3 {
-			return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("invalid cgroup entry: %q", text)}
-		}
-		for _, subs := range strings.Split(parts[1], ",") {
-			if subs != "" {
-				cgroups[subs] = parts[2]
-			}
-		}
-	}
-	if err := s.Err(); err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("buffer scanner failed: %s", err.Error())}
-	}
-
-	return cgroups, nil
-}
-
-//getExistingPath will be used to get the existing valid cgroup path
-func getExistingPath(paths map[string]string, pid int, suffix string) cgroups.Path {
-	for n, p := range paths {
-		dest, err := getCgroupDestination(pid, n)
-		if err != nil {
-			return getErrorPath(err)
-		}
-		rel, err := filepath.Rel(dest, p)
-		if err != nil {
-			return getErrorPath(err)
-		}
-		if rel == "." {
-			rel = dest
-		}
-		paths[n] = filepath.Join("/", rel)
-	}
-	return func(name cgroups.Name) (string, error) {
-		root, ok := paths[string(name)]
-		if !ok {
-			if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok {
-				return "", cgroups.ErrControllerNotActive
-			}
-		}
-		if suffix != "" {
-			return filepath.Join(root, suffix), nil
-		}
-		return root, nil
-	}
-}
-
-//getErrorPath will give the invalid cgroup path
-func getErrorPath(err error) cgroups.Path {
-	return func(_ cgroups.Name) (string, error) {
-		return "", err
-	}
-}
-
-//getCgroupDestination will validate the subsystem with the mountpath in container mountinfo file.
-func getCgroupDestination(pid int, subsystem string) (string, error) {
-	mountinfoPath := fmt.Sprintf("/proc/%d/mountinfo", pid)
-	file, err := os.Open(mountinfoPath)
-	if err != nil {
-		return "", err
-	}
-	defer file.Close()
-	s := bufio.NewScanner(file)
-	for s.Scan() {
-		fields := strings.Fields(s.Text())
-		for _, opt := range strings.Split(fields[len(fields)-1], ",") {
-			if opt == subsystem {
-				return fields[3], nil
-			}
-		}
-	}
-	if err := s.Err(); err != nil {
-		return "", err
-	}
-	return "", errors.Errorf("no destination found for %v ", subsystem)
-}
-
-//findValidCgroup will be used to get a valid cgroup path
-func findValidCgroup(path cgroups.Path, t targetDetails) (string, error) {
-	for _, subsystem := range cgroupSubsystemList {
-		path, err := path(cgroups.Name(subsystem))
-		if err != nil {
-			log.Errorf("fail to retrieve the cgroup path, subsystem: %v, target: %v, err: %v", subsystem, t.ContainerId, err)
-			continue
-		}
-		if strings.Contains(path, t.ContainerId) {
-			return path, nil
-		}
-	}
-	return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: "could not find valid cgroup"}
-}
-
-//getENV fetches all the env variables from the runner pod
-func getENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.ContainerRuntime = types.Getenv("CONTAINER_RUNTIME", "")
-	experimentDetails.SocketPath = types.Getenv("SOCKET_PATH", "")
-	experimentDetails.CPUcores = types.Getenv("CPU_CORES", "")
-	experimentDetails.CPULoad = types.Getenv("CPU_LOAD", "")
-	experimentDetails.FilesystemUtilizationPercentage = types.Getenv("FILESYSTEM_UTILIZATION_PERCENTAGE", "")
-	experimentDetails.FilesystemUtilizationBytes = types.Getenv("FILESYSTEM_UTILIZATION_BYTES", "")
-	experimentDetails.NumberOfWorkers = types.Getenv("NUMBER_OF_WORKERS", "")
-	experimentDetails.MemoryConsumption = types.Getenv("MEMORY_CONSUMPTION", "")
-	experimentDetails.VolumeMountPath = types.Getenv("VOLUME_MOUNT_PATH", "")
-	experimentDetails.StressType = types.Getenv("STRESS_TYPE", "")
-}
-
-// abortWatcher continuously watch for the abort signals
-func abortWatcher(targets []targetDetails, resultName, chaosNS string) {
-
-	<-abort
-
-	log.Info("[Chaos]: Killing process started because of terminated signal received")
-	log.Info("[Abort]: Chaos Revert Started")
-	// retry thrice for the chaos revert
-	retry := 3
-	for retry > 0 {
-		for _, t := range targets {
-			if err = terminateProcess(t); err != nil {
-				log.Errorf("[Abort]: unable to revert for %v pod, err :%v", t.Name, err)
-				continue
-			}
-			if err = result.AnnotateChaosResult(resultName, chaosNS, "reverted", "pod", t.Name); err != nil {
-				log.Errorf("[Abort]: Unable to annotate the chaosresult for %v pod, err :%v", t.Name, err)
-			}
-		}
-		retry--
-		time.Sleep(1 * time.Second)
-	}
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
-
-// getCGroupManager will return the cgroup for the given pid of the process
-func getCGroupManager(t targetDetails) (interface{}, error) {
-	if cgroups.Mode() == cgroups.Unified {
-		groupPath, err := cgroupsv2.PidGroupPath(t.Pid)
-		if err != nil {
-			return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to get pid group path: %s", err.Error())}
-		}
-
-		cgroup2, err := cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath)
-		if err != nil {
-			return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to load the cgroup: %s", err.Error())}
-		}
-		return cgroup2, nil
-	}
-	path := pidPath(t)
-	cgroup, err := findValidCgroup(path, t)
-	if err != nil {
-		return nil, stacktrace.Propagate(err, "could not find valid cgroup")
-	}
-	cgroup1, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(cgroup))
-	if err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to load the cgroup: %s", err.Error())}
-	}
-
-	return cgroup1, nil
-}
-
-// addProcessToCgroup will add the process to cgroup
-// By default it will add to v1 cgroup
-func addProcessToCgroup(pid int, control interface{}) error {
-	if cgroups.Mode() == cgroups.Unified {
-		var cgroup1 = control.(*cgroupsv2.Manager)
-		return cgroup1.AddProc(uint64(pid))
-	}
-	var cgroup1 = control.(cgroups.Cgroup)
-	return cgroup1.Add(cgroups.Process{Pid: pid})
-}
-
-func injectChaos(t targetDetails, stressors string) (*exec.Cmd, error) {
-	stressCommand := "pause nsutil -t " + strconv.Itoa(t.Pid) + " -p -- " + stressors
-	log.Infof("[Info]: starting process: %v", stressCommand)
-
-	// launch the stress-ng process on the target container in paused mode
-	cmd := exec.Command("/bin/bash", "-c", stressCommand)
-	cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
-	var buf bytes.Buffer
-	cmd.Stdout = &buf
-	err = cmd.Start()
-	if err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("failed to start stress process: %s", err.Error())}
-	}
-
-	// add the stress process to the cgroup of target container
-	if err = addProcessToCgroup(cmd.Process.Pid, t.CGroupManager); err != nil {
-		if killErr := cmd.Process.Kill(); killErr != nil {
-			return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to add the stress process to cgroup %s and kill stress process: %s", err.Error(), killErr.Error())}
-		}
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to add the stress process to cgroup: %s", err.Error())}
-	}
-
-	log.Info("[Info]: Sending signal to resume the stress process")
-	// wait for the process to start before sending the resume signal
-	// TODO: need a dynamic way to check the start of the process
-	time.Sleep(700 * time.Millisecond)
-
-	// remove pause and resume or start the stress process
-	if err := cmd.Process.Signal(syscall.SIGCONT); err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to remove pause and start the stress process: %s", err.Error())}
-	}
-	return cmd, nil
-}
-
-type targetDetails struct {
-	Name            string
-	Namespace       string
-	TargetContainer string
-	ContainerId     string
-	Pid             int
-	CGroupManager   interface{}
-	Cmd             *exec.Cmd
-	Source          string
-}
diff --git a/chaoslib/litmus/stress-chaos/lib/stress-chaos.go b/chaoslib/litmus/stress-chaos/lib/stress-chaos.go
deleted file mode 100644
index 88df254..0000000
--- a/chaoslib/litmus/stress-chaos/lib/stress-chaos.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package lib
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/palantir/stacktrace"
-
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	"github.com/sirupsen/logrus"
-	apiv1 "k8s.io/api/core/v1"
-	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-//PrepareAndInjectStressChaos contains the prepration & injection steps for the stress experiments.
-func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	var err error
-	//Set up the tunables if provided in range
-	SetChaosTunables(experimentsDetails)
-
-	switch experimentsDetails.StressType {
-	case "pod-cpu-stress":
-		log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-			"CPU Core":            experimentsDetails.CPUcores,
-			"CPU Load Percentage": experimentsDetails.CPULoad,
-			"Sequence":            experimentsDetails.Sequence,
-			"PodsAffectedPerc":    experimentsDetails.PodsAffectedPerc,
-		})
-
-	case "pod-memory-stress":
-		log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-			"Number of Workers":  experimentsDetails.NumberOfWorkers,
-			"Memory Consumption": experimentsDetails.MemoryConsumption,
-			"Sequence":           experimentsDetails.Sequence,
-			"PodsAffectedPerc":   experimentsDetails.PodsAffectedPerc,
-		})
-
-	case "pod-io-stress":
-		log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{
-			"FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage,
-			"FilesystemUtilizationBytes":      experimentsDetails.FilesystemUtilizationBytes,
-			"NumberOfWorkers":                 experimentsDetails.NumberOfWorkers,
-			"Sequence":                        experimentsDetails.Sequence,
-			"PodsAffectedPerc":                experimentsDetails.PodsAffectedPerc,
-		})
-	}
-
-	// Get the target pod details for the chaos execution
-	// if the target pod is not defined it will derive the random target pod list using pod affected percentage
-	if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"}
-	}
-	targetPodList, err := common.GetTargetPods(experimentsDetails.NodeLabel, experimentsDetails.TargetPods, experimentsDetails.PodsAffectedPerc, clients, chaosDetails)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not get target pods")
-	}
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	// Getting the serviceAccountName, need permission inside helper pod to create the events
-	if experimentsDetails.ChaosServiceAccount == "" {
-		experimentsDetails.ChaosServiceAccount, err = common.GetServiceAccount(experimentsDetails.ChaosNamespace, experimentsDetails.ChaosPodName, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not  experiment service account")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		if err := common.SetHelperData(chaosDetails, experimentsDetails.SetHelperData, clients); err != nil {
-			return stacktrace.Propagate(err, "could not set helper data")
-		}
-	}
-
-	experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != ""
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode inject the stress chaos in all target application serially (one by one)
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	// creating the helper pod to perform the stress chaos
-	for _, pod := range targetPodList.Items {
-
-		//Get the target container name of the application pod
-		if !experimentsDetails.IsTargetContainerProvided {
-			experimentsDetails.TargetContainer = pod.Spec.Containers[0].Name
-		}
-
-		log.InfoWithValues("[Info]: Details of application under chaos injection", logrus.Fields{
-			"PodName":       pod.Name,
-			"NodeName":      pod.Spec.NodeName,
-			"ContainerName": experimentsDetails.TargetContainer,
-		})
-		runID := stringutils.GetRunID()
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-
-		appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-		//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-		log.Info("[Status]: Checking the status of the helper pods")
-		if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return stacktrace.Propagate(err, "could not check helper status")
-		}
-
-		// Wait till the completion of the helper pod
-		// set an upper limit for the waiting time
-		log.Info("[Wait]: waiting till the completion of the helper pod")
-		podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-		if err != nil || podStatus == "Failed" {
-			common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-			return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-		}
-
-		//Deleting all the helper pod for stress chaos
-		log.Info("[Cleanup]: Deleting the helper pod")
-		err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients)
-		if err != nil {
-			return stacktrace.Propagate(err, "could not delete helper pod(s)")
-		}
-	}
-	return nil
-}
-
-// injectChaosInParallelMode inject the stress chaos in all target application in parallel mode (all at once)
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error {
-
-	var err error
-	// run the probes during chaos
-	if len(resultDetails.ProbeDetails) != 0 {
-		if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-			return err
-		}
-	}
-
-	runID := stringutils.GetRunID()
-	targets := common.FilterPodsForNodes(targetPodList, experimentsDetails.TargetContainer)
-
-	for node, tar := range targets {
-		var targetsPerNode []string
-		for _, k := range tar.Target {
-			targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer))
-		}
-
-		if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil {
-			return stacktrace.Propagate(err, "could not create helper pod")
-		}
-	}
-
-	appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID)
-
-	//checking the status of the helper pods, wait till the pod comes to running state else fail the experiment
-	log.Info("[Status]: Checking the status of the helper pods")
-	if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return stacktrace.Propagate(err, "could not check helper status")
-	}
-
-	// Wait till the completion of the helper pod
-	// set an upper limit for the waiting time
-	log.Info("[Wait]: waiting till the completion of the helper pod")
-	podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...)
-	if err != nil || podStatus == "Failed" {
-		common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients)
-		return common.HelperFailedError(err, appLabel, chaosDetails.ChaosNamespace, true)
-	}
-
-	//Deleting all the helper pod for stress chaos
-	log.Info("[Cleanup]: Deleting all the helper pod")
-	err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients)
-	if err != nil {
-		return stacktrace.Propagate(err, "could not delete helper pod(s)")
-	}
-
-	return nil
-}
-
-// createHelperPod derive the attributes for helper pod and create the helper pod
-func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error {
-
-	privilegedEnable := true
-	terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds)
-
-	helperPod := &apiv1.Pod{
-		ObjectMeta: v1.ObjectMeta{
-			GenerateName: experimentsDetails.ExperimentName + "-helper-",
-			Namespace:    experimentsDetails.ChaosNamespace,
-			Labels:       common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName),
-			Annotations:  chaosDetails.Annotations,
-		},
-		Spec: apiv1.PodSpec{
-			HostPID:                       true,
-			TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
-			ImagePullSecrets:              chaosDetails.ImagePullSecrets,
-			ServiceAccountName:            experimentsDetails.ChaosServiceAccount,
-			RestartPolicy:                 apiv1.RestartPolicyNever,
-			NodeName:                      nodeName,
-
-			Volumes: []apiv1.Volume{
-				{
-					Name: "socket-path",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: experimentsDetails.SocketPath,
-						},
-					},
-				},
-				{
-					Name: "sys-path",
-					VolumeSource: apiv1.VolumeSource{
-						HostPath: &apiv1.HostPathVolumeSource{
-							Path: "/sys",
-						},
-					},
-				},
-			},
-
-			Containers: []apiv1.Container{
-				{
-					Name:            experimentsDetails.ExperimentName,
-					Image:           experimentsDetails.LIBImage,
-					ImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),
-					Command: []string{
-						"/bin/bash",
-					},
-					Args: []string{
-						"-c",
-						"./helpers -name stress-chaos",
-					},
-					Resources: chaosDetails.Resources,
-					Env:       getPodEnv(experimentsDetails, targets),
-					VolumeMounts: []apiv1.VolumeMount{
-						{
-							Name:      "socket-path",
-							MountPath: experimentsDetails.SocketPath,
-						},
-						{
-							Name:      "sys-path",
-							MountPath: "/sys",
-						},
-					},
-					SecurityContext: &apiv1.SecurityContext{
-						Privileged: &privilegedEnable,
-						RunAsUser:  ptrint64(0),
-						Capabilities: &apiv1.Capabilities{
-							Add: []apiv1.Capability{
-								"SYS_ADMIN",
-							},
-						},
-					},
-				},
-			},
-		},
-	}
-
-	if len(chaosDetails.SideCar) != 0 {
-		helperPod.Spec.Containers = append(helperPod.Spec.Containers, common.BuildSidecar(chaosDetails)...)
-		helperPod.Spec.Volumes = append(helperPod.Spec.Volumes, common.GetSidecarVolumes(chaosDetails)...)
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())}
-	}
-	return nil
-}
-
-// getPodEnv derive all the env required for the helper pod
-func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar {
-
-	var envDetails common.ENVDetails
-	envDetails.SetEnv("TARGETS", targets).
-		SetEnv("TOTAL_CHAOS_DURATION", strconv.Itoa(experimentsDetails.ChaosDuration)).
-		SetEnv("CHAOS_NAMESPACE", experimentsDetails.ChaosNamespace).
-		SetEnv("CHAOSENGINE", experimentsDetails.EngineName).
-		SetEnv("CHAOS_UID", string(experimentsDetails.ChaosUID)).
-		SetEnv("CONTAINER_RUNTIME", experimentsDetails.ContainerRuntime).
-		SetEnv("EXPERIMENT_NAME", experimentsDetails.ExperimentName).
-		SetEnv("SOCKET_PATH", experimentsDetails.SocketPath).
-		SetEnv("CPU_CORES", experimentsDetails.CPUcores).
-		SetEnv("CPU_LOAD", experimentsDetails.CPULoad).
-		SetEnv("FILESYSTEM_UTILIZATION_PERCENTAGE", experimentsDetails.FilesystemUtilizationPercentage).
-		SetEnv("FILESYSTEM_UTILIZATION_BYTES", experimentsDetails.FilesystemUtilizationBytes).
-		SetEnv("NUMBER_OF_WORKERS", experimentsDetails.NumberOfWorkers).
-		SetEnv("MEMORY_CONSUMPTION", experimentsDetails.MemoryConsumption).
-		SetEnv("VOLUME_MOUNT_PATH", experimentsDetails.VolumeMountPath).
-		SetEnv("STRESS_TYPE", experimentsDetails.StressType).
-		SetEnv("INSTANCE_ID", experimentsDetails.InstanceID).
-		SetEnvFromDownwardAPI("v1", "metadata.name")
-
-	return envDetails.ENV
-}
-
-func ptrint64(p int64) *int64 {
-	return &p
-}
-
-//SetChaosTunables will set up a random value within a given range of values
-//If the value is not provided in range it'll set up the initial provided value.
-func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) {
-	experimentsDetails.CPUcores = common.ValidateRange(experimentsDetails.CPUcores)
-	experimentsDetails.CPULoad = common.ValidateRange(experimentsDetails.CPULoad)
-	experimentsDetails.MemoryConsumption = common.ValidateRange(experimentsDetails.MemoryConsumption)
-	experimentsDetails.NumberOfWorkers = common.ValidateRange(experimentsDetails.NumberOfWorkers)
-	experimentsDetails.FilesystemUtilizationPercentage = common.ValidateRange(experimentsDetails.FilesystemUtilizationPercentage)
-	experimentsDetails.FilesystemUtilizationBytes = common.ValidateRange(experimentsDetails.FilesystemUtilizationBytes)
-	experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc)
-	experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence)
-}
diff --git a/chaoslib/litmus/vira/node-restart/lib/node-restart.go b/chaoslib/litmus/vira/node-restart/lib/node-restart.go
index fc53a6a..4fb029b 100644
--- a/chaoslib/litmus/vira/node-restart/lib/node-restart.go
+++ b/chaoslib/litmus/vira/node-restart/lib/node-restart.go
@@ -147,17 +147,16 @@ func restartNode(targetNodeList []string, experimentsDetails *experimentTypes.Ex
 		}
 		for _, appNode := range targetNodeList {
 			log.Infof("[Inject]: Restarting the %v node", appNode)
-			command := exec.Command("kubectl", "node_shell", appNode, "--", "shutdown", "-r", "+3")
+			command := exec.Command("kubectl", "node_shell", appNode, "--", "shutdown", "-r", "+1")
 			if err := common.RunCLICommands(command, "", fmt.Sprintf("{node: %s}", appNode), "failed to restart the target node", cerrors.ErrorTypeChaosInject); err != nil {
 				return err
 			}
 	
-		}
-
-		for _, appNode := range targetNodeList {
 			common.SetTargets(appNode, "injected", "node", chaosDetails)
+	
 		}
 
+
 	}
 	return nil
 }
diff --git a/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go b/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go
deleted file mode 100644
index e305578..0000000
--- a/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package lib
-
-import (
-	"fmt"
-	"os"
-	"os/signal"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/vmware"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/types"
-	"github.com/palantir/stacktrace"
-)
-
-var inject, abort chan os.Signal
-
-// InjectVMPowerOffChaos injects the chaos in serial or parallel mode
-func InjectVMPowerOffChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, cookie string) error {
-
-	// inject channel is used to transmit signal notifications.
-	inject = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to inject channel.
-	signal.Notify(inject, os.Interrupt, syscall.SIGTERM)
-
-	// abort channel is used to transmit signal notifications.
-	abort = make(chan os.Signal, 1)
-	// Catch and relay certain signal(s) to abort channel.
-	signal.Notify(abort, os.Interrupt, syscall.SIGTERM)
-
-	//Waiting for the ramp time before chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	//Fetching the target VM Ids
-	vmIdList := strings.Split(experimentsDetails.VMIds, ",")
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go abortWatcher(experimentsDetails, vmIdList, clients, resultDetails, chaosDetails, eventsDetails, cookie)
-
-	switch strings.ToLower(experimentsDetails.Sequence) {
-	case "serial":
-		if err := injectChaosInSerialMode(experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in serial mode")
-		}
-	case "parallel":
-		if err := injectChaosInParallelMode(experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil {
-			return stacktrace.Propagate(err, "could not run chaos in parallel mode")
-		}
-	default:
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("'%s' sequence is not supported", experimentsDetails.Sequence)}
-	}
-
-	//Waiting for the ramp time after chaos injection
-	if experimentsDetails.RampTime != 0 {
-		log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime)
-		common.WaitForDuration(experimentsDetails.RampTime)
-	}
-
-	return nil
-}
-
-// injectChaosInSerialMode stops VMs in serial mode i.e. one after the other
-func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, vmIdList []string, cookie string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target VM Id list, %v", vmIdList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos in VM"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			for i, vmId := range vmIdList {
-
-				//Stopping the VM
-				log.Infof("[Chaos]: Stopping %s VM", vmId)
-				if err := vmware.StopVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, fmt.Sprintf("failed to stop %s vm", vmId))
-				}
-
-				common.SetTargets(vmId, "injected", "VM", chaosDetails)
-
-				//Wait for the VM to completely stop
-				log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_OFF state", vmId)
-				if err := vmware.WaitForVMStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, "VM shutdown failed")
-				}
-
-				//Run the probes during the chaos
-				//The OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration
-				if len(resultDetails.ProbeDetails) != 0 && i == 0 {
-					if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-						return stacktrace.Propagate(err, "failed to run probes")
-					}
-				}
-
-				//Wait for chaos interval
-				log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-				time.Sleep(time.Duration(experimentsDetails.ChaosInterval) * time.Second)
-
-				//Starting the VM
-				log.Infof("[Chaos]: Starting back %s VM", vmId)
-				if err := vmware.StartVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, "failed to start back vm")
-				}
-
-				//Wait for the VM to completely start
-				log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_ON state", vmId)
-				if err := vmware.WaitForVMStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, "vm failed to start")
-				}
-
-				common.SetTargets(vmId, "reverted", "VM", chaosDetails)
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-
-	return nil
-}
-
-// injectChaosInParallelMode stops VMs in parallel mode i.e. all at once
-func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, vmIdList []string, cookie string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error {
-
-	select {
-	case <-inject:
-		// stopping the chaos execution, if abort signal received
-		os.Exit(0)
-	default:
-		//ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin
-		ChaosStartTimeStamp := time.Now()
-		duration := int(time.Since(ChaosStartTimeStamp).Seconds())
-
-		for duration < experimentsDetails.ChaosDuration {
-
-			log.Infof("[Info]: Target VM Id list, %v", vmIdList)
-
-			if experimentsDetails.EngineName != "" {
-				msg := "Injecting " + experimentsDetails.ExperimentName + " chaos in VM"
-				types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails)
-				events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine")
-			}
-
-			for _, vmId := range vmIdList {
-
-				//Stopping the VM
-				log.Infof("[Chaos]: Stopping %s VM", vmId)
-				if err := vmware.StopVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, fmt.Sprintf("failed to stop %s vm", vmId))
-				}
-
-				common.SetTargets(vmId, "injected", "VM", chaosDetails)
-			}
-
-			for _, vmId := range vmIdList {
-
-				//Wait for the VM to completely stop
-				log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_OFF state", vmId)
-				if err := vmware.WaitForVMStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, "vm failed to shutdown")
-				}
-			}
-
-			//Running the probes during chaos
-			if len(resultDetails.ProbeDetails) != 0 {
-				if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil {
-					return stacktrace.Propagate(err, "failed to run probes")
-				}
-			}
-
-			//Waiting for chaos interval
-			log.Infof("[Wait]: Waiting for chaos interval of %vs", experimentsDetails.ChaosInterval)
-			common.WaitForDuration(experimentsDetails.ChaosInterval)
-
-			for _, vmId := range vmIdList {
-
-				//Starting the VM
-				log.Infof("[Chaos]: Starting back %s VM", vmId)
-				if err := vmware.StartVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, fmt.Sprintf("failed to start back %s vm", vmId))
-				}
-			}
-
-			for _, vmId := range vmIdList {
-
-				//Wait for the VM to completely start
-				log.Infof("[Wait]: Wait for VM '%s' to get in POWERED_ON state", vmId)
-				if err := vmware.WaitForVMStart(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-					return stacktrace.Propagate(err, "vm failed to successfully start")
-				}
-			}
-
-			for _, vmId := range vmIdList {
-				common.SetTargets(vmId, "reverted", "VM", chaosDetails)
-			}
-
-			duration = int(time.Since(ChaosStartTimeStamp).Seconds())
-		}
-	}
-
-	return nil
-}
-
-// abortWatcher watches for the abort signal and reverts the chaos
-func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, vmIdList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, cookie string) {
-	<-abort
-
-	log.Info("[Abort]: Chaos Revert Started")
-	for _, vmId := range vmIdList {
-
-		vmStatus, err := vmware.GetVMStatus(experimentsDetails.VcenterServer, vmId, cookie)
-		if err != nil {
-			log.Errorf("failed to get vm status of %s when an abort signal is received: %s", vmId, err.Error())
-		}
-
-		if vmStatus != "POWERED_ON" {
-
-			log.Infof("[Abort]: Waiting for the VM %s to shutdown", vmId)
-			if err := vmware.WaitForVMStop(experimentsDetails.Timeout, experimentsDetails.Delay, experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-				log.Errorf("vm %s failed to successfully shutdown when an abort signal was received: %s", vmId, err.Error())
-			}
-
-			log.Infof("[Abort]: Starting %s VM as abort signal has been received", vmId)
-			if err := vmware.StartVM(experimentsDetails.VcenterServer, vmId, cookie); err != nil {
-				log.Errorf("vm %s failed to start when an abort signal was received: %s", vmId, err.Error())
-			}
-		}
-
-		common.SetTargets(vmId, "reverted", "VM", chaosDetails)
-	}
-
-	log.Info("[Abort]: Chaos Revert Completed")
-	os.Exit(1)
-}
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-id/README.md b/experiments/aws-ssm/aws-ssm-chaos-by-id/README.md
deleted file mode 100644
index 56080d4..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-id/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> AWS SSM Chaos By ID </td>
- <td> This experiment causes the chaos injection on AWS resources using Amazon SSM Run Command. This is carried out by using SSM Docs that defines the actions performed by Systems Manager on your managed instances (having SSM agent installed) which let us perform chaos experiments on resources. In this experiment a default SSM docs is used to perform resource stress chaos over the ec2 instances defined by target instace ID(s). One can also provide its own SSM docs mounted as configmap and with the path defined with `DOCUMENT_PATH` ENV. One or more target instance can be provided in the list format in `EC2_INSTANCE_ID` env as comma(,) seperated envs (eg: instance1,instance2)</td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/aws-ssm/aws-ssm-chaos-by-id/"> Here </a> </td>
-</tr>
-</table>
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go b/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go
deleted file mode 100644
index cf3445c..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib/ssm"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	ec2 "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// AWSSSMChaosByID inject the ssm chaos on ec2 instance
-func AWSSSMChaosByID(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "aws-ssm-chaos-by-id")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE INSTANCE INFORMATION
-	log.InfoWithValues("The instance information is as follows", logrus.Fields{
-		"Total Chaos Duration": experimentsDetails.ChaosDuration,
-		"Chaos Namespace":      experimentsDetails.ChaosNamespace,
-		"Instance ID":          experimentsDetails.EC2InstanceID,
-		"Sequence":             experimentsDetails.Sequence,
-	})
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	//Verify that the instance should have permission to perform ssm api calls
-	if err := ssm.CheckInstanceInformation(&experimentsDetails); err != nil {
-		log.Errorf("Failed perform ssm api calls: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	if chaosDetails.DefaultHealthCheck {
-		//Verify the aws ec2 instance is running (pre chaos)
-		if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil {
-			log.Errorf("Failed to get the ec2 instance status: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: EC2 instance is in running state")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareAWSSSMChaosByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		//Delete the ssm document on the given aws service monitoring docs
-		if experimentsDetails.IsDocsUploaded {
-			log.Info("[Recovery]: Delete the uploaded aws ssm docs")
-			if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil {
-				log.Errorf("Failed to delete ssm doc: %v", err)
-			}
-		}
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	if chaosDetails.DefaultHealthCheck {
-		//Verify the aws ec2 instance is running (post chaos)
-		if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil {
-			log.Errorf("Failed to get the ec2 instance status: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: EC2 instance is in running state (post chaos)")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult:  %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", reason)
-	}
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-}
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-id/rbac.yaml b/experiments/aws-ssm/aws-ssm-chaos-by-id/rbac.yaml
deleted file mode 100644
index 9e297dc..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-id/rbac.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: aws-ssm-chaos-by-id-sa
-  namespace: default
-  labels:
-    name: aws-ssm-chaos-by-id-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: aws-ssm-chaos-by-id-sa
-  labels:
-    name: aws-ssm-chaos-by-id-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: aws-ssm-chaos-by-id-sa
-  labels:
-    name: aws-ssm-chaos-by-id-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: aws-ssm-chaos-by-id-sa
-subjects:
-- kind: ServiceAccount
-  name: aws-ssm-chaos-by-id-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-id/test/test.yml b/experiments/aws-ssm/aws-ssm-chaos-by-id/test/test.yml
deleted file mode 100644
index b4389ea..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-id/test/test.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: aws-ssm-chaos-by-id-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-
-          - name: EC2_INSTANCE_ID
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: REGION
-            value: ''
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/                 
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-tag/README.md b/experiments/aws-ssm/aws-ssm-chaos-by-tag/README.md
deleted file mode 100644
index 137503b..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-tag/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> AWS SSM Chaos By Tag </td>
- <td> This experiment causes the chaos injection on AWS resources using Amazon SSM Run Command. This is carried out by using SSM Docs that defines the actions performed by Systems Manager on your managed instances (having SSM agent installed) which let us perform chaos experiments on resources. In this experiment a default SSM docs is used to perform resource stress chaos over the ec2 instances defined by a common instance Tag. One can also provide its own SSM docs mounted as configmap and with the path defined with `DOCUMENT_PATH` ENV. We can also control the number of target instance using `INSTANCE_AFFECTED_PERC` ENV.</td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/aws-ssm/aws-ssm-chaos-by-tag/"> Here </a> </td>
-</tr>
-</table>
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go b/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go
deleted file mode 100644
index d287e4b..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib/ssm"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	ec2 "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// AWSSSMChaosByTag inject the ssm chaos on ec2 instance
-func AWSSSMChaosByTag(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "aws-ssm-chaos-by-tag")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE INSTANCE INFORMATION
-	log.InfoWithValues("The instance information is as follows", logrus.Fields{
-		"Total Chaos Duration": experimentsDetails.ChaosDuration,
-		"Chaos Namespace":      experimentsDetails.ChaosNamespace,
-		"EC2 Instance Tag":     experimentsDetails.EC2InstanceTag,
-		"Sequence":             experimentsDetails.Sequence,
-	})
-
-	//Verify that the instance should have permission to perform ssm api calls
-	if err := ssm.CheckInstanceInformation(&experimentsDetails); err != nil {
-		log.Errorf("Target instance status check failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareAWSSSMChaosByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		//Delete the ssm document on the given aws service monitoring docs
-		if experimentsDetails.IsDocsUploaded {
-			log.Info("[Recovery]: Delete the uploaded aws ssm docs")
-			if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil {
-				log.Errorf("Failed to delete ssm document: %v", err)
-			}
-		}
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	if chaosDetails.DefaultHealthCheck {
-		//Verify the aws ec2 instance is running (post chaos)
-		if err := ec2.InstanceStatusCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region); err != nil {
-			log.Errorf("Failed to get the ec2 instance status: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: EC2 instance is in running state (post chaos)")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult:  %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", reason)
-	}
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-}
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-tag/rbac.yaml b/experiments/aws-ssm/aws-ssm-chaos-by-tag/rbac.yaml
deleted file mode 100644
index c9b3c6a..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-tag/rbac.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: aws-ssm-chaos-by-tag-sa
-  namespace: default
-  labels:
-    name: aws-ssm-chaos-by-tag-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: aws-ssm-chaos-by-tag-sa
-  labels:
-    name: aws-ssm-chaos-by-tag-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: aws-ssm-chaos-by-tag-sa
-  labels:
-    name: aws-ssm-chaos-by-tag-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: aws-ssm-chaos-by-tag-sa
-subjects:
-- kind: ServiceAccount
-  name: aws-ssm-chaos-by-tag-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-tag/test/test.yml b/experiments/aws-ssm/aws-ssm-chaos-by-tag/test/test.yml
deleted file mode 100644
index 87dbeb1..0000000
--- a/experiments/aws-ssm/aws-ssm-chaos-by-tag/test/test.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: aws-ssm-chaos-by-tag-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-
-          - name: EC2_INSTANCE_TAG
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: REGION
-            value: ''
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/                 
diff --git a/experiments/azure/azure-disk-loss/README.md b/experiments/azure/azure-disk-loss/README.md
deleted file mode 100644
index fd0edfa..0000000
--- a/experiments/azure/azure-disk-loss/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Azure Disk Loss </td>
- <td> This experiment causes the detachment of an virtual disk from an instance for a certain chaos duration and reattach as after chaos interval. The experiment is very specific to the volume and instance to which it is added</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/azure/azure-disk-loss/">Azure Disk Loss </a> </td>
- </tr>
- </table>
\ No newline at end of file
diff --git a/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go b/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go
deleted file mode 100644
index 9887f81..0000000
--- a/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/azure-disk-loss/lib"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	azureCommon "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common"
-	azureStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/disk"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// AzureDiskLoss contains steps to inject chaos
-func AzureDiskLoss(clients clients.ClientSets) {
-
-	var err error
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresults", types.AwaitedVerdict)
-	}
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The volume information is as follows", logrus.Fields{
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-		"Disk Names":     experimentsDetails.VirtualDiskNames,
-		"Resource Group": experimentsDetails.ResourceGroup,
-		"Sequence":       experimentsDetails.Sequence,
-	})
-
-	// Setting up Azure Subscription ID
-	if experimentsDetails.SubscriptionID, err = azureCommon.GetSubscriptionID(); err != nil {
-		log.Errorf("fail to get the subscription id: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// PRE-CHAOS VIRTUAL DISK STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the virtual disk are attached to VM instance(pre-chaos)")
-		if err = azureStatus.CheckVirtualDiskWithInstance(experimentsDetails.SubscriptionID, experimentsDetails.VirtualDiskNames, experimentsDetails.ResourceGroup); err != nil {
-			log.Errorf("Virtual disk status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("Chaos injection failed: %v", err)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	// POST-CHAOS VIRTUAL DISK STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the virtual disk are attached to VM instance(post-chaos)")
-		if err = azureStatus.CheckVirtualDiskWithInstance(experimentsDetails.SubscriptionID, experimentsDetails.VirtualDiskNames, experimentsDetails.ResourceGroup); err != nil {
-			log.Errorf("Virtual disk status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresults", reason)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-}
diff --git a/experiments/azure/azure-disk-loss/test/test.yml b/experiments/azure/azure-disk-loss/test/test.yml
deleted file mode 100644
index 3573cf7..0000000
--- a/experiments/azure/azure-disk-loss/test/test.yml
+++ /dev/null
@@ -1,81 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: azure-disk-loss-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-          
-          - name: CHAOS_INTERVAL
-            value: '30'
-
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: 'litmus'
-          
-          # provide the resouce group of the instance
-          - name: RESOURCE_GROUP
-            value: ''
-          
-          # provide the virtual disk names (comma seperated if multiple)
-          - name: VIRTUAL_DISK_NAMES
-            value: ''
-          
-          # whether the disk is attached to scale instance or not, accepted values are disable, enable
-          - name: SCALE_SET
-            value: 'disable'
-          
-          # provide the sequence type for the run. Options: serial/parallel
-          - name: SEQUENCE
-            value: 'parallel'
-          
-          # provide the path to aks credentials mounted from secret
-          - name: AZURE_AUTH_LOCATION
-            value: '/tmp/azure.auth'
-
-            
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-          
-        secrets:
-          - name: cloud-secret
-            mountPath: /tmp/
-
diff --git a/experiments/azure/instance-stop/README.md b/experiments/azure/instance-stop/README.md
deleted file mode 100644
index ef47a58..0000000
--- a/experiments/azure/instance-stop/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Azure Instance Stop </td>
- <td> This experiment causes stopping of an Azure instance before bringing it back to running state after the specified chaos duration.</td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/azure/azure-instance-stop/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/azure/instance-stop/experiment/azure-instance-stop.go b/experiments/azure/instance-stop/experiment/azure-instance-stop.go
deleted file mode 100644
index a934c53..0000000
--- a/experiments/azure/instance-stop/experiment/azure-instance-stop.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/azure-instance-stop/lib"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	azureCommon "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common"
-	azureStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance"
-
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// AzureInstanceStop inject the azure instance stop chaos
-func AzureInstanceStop(clients clients.ClientSets) {
-
-	var err error
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT")
-	if err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The instance information is as follows", logrus.Fields{
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-		"Resource Group": experimentsDetails.ResourceGroup,
-		"Instance Name":  experimentsDetails.AzureInstanceNames,
-		"Sequence":       experimentsDetails.Sequence,
-	})
-
-	// Setting up Azure Subscription ID
-	if experimentsDetails.SubscriptionID, err = azureCommon.GetSubscriptionID(); err != nil {
-		log.Errorf("Failed to get the subscription id: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResults"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresults", types.AwaitedVerdict)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails)
-			if err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	//Verify the azure target instance is running (pre-chaos)
-	if chaosDetails.DefaultHealthCheck {
-		if err = azureStatus.InstanceStatusCheckByName(experimentsDetails.AzureInstanceNames, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup); err != nil {
-			log.Errorf("Azure instance status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: Azure instance(s) is in running state (pre-chaos)")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.PrepareAzureStop(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Info("[Confirmation]: Azure instance stop chaos has been injected successfully")
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//Verify the azure instance is running (post chaos)
-	if chaosDetails.DefaultHealthCheck {
-		if err = azureStatus.InstanceStatusCheckByName(experimentsDetails.AzureInstanceNames, experimentsDetails.ScaleSet, experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup); err != nil {
-			log.Errorf("Azure instance status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: Azure instance is in running state (post chaos)")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails)
-			if err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT")
-	if err != nil {
-		log.Errorf("Unable to update the chaosresult:  %v", err)
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResults"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresults", reason)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-
-}
diff --git a/experiments/azure/instance-stop/rbac.yaml b/experiments/azure/instance-stop/rbac.yaml
deleted file mode 100644
index 3830f15..0000000
--- a/experiments/azure/instance-stop/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: azure-instance-stop-sa
-  namespace: default
-  labels:
-    name: azure-instance-stop-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: azure-instance-stop-sa
-  labels:
-    name: azure-instance-stop-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","secrets","events","pods/log","pods/exec","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: azure-instance-stop-sa
-  labels:
-    name: azure-instance-stop-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: azure-instance-stop-sa
-subjects:
-- kind: ServiceAccount
-  name: azure-instance-stop-sa
-  namespace: default
diff --git a/experiments/azure/instance-stop/test/test.yml b/experiments/azure/instance-stop/test/test.yml
deleted file mode 100644
index 5cd264f..0000000
--- a/experiments/azure/instance-stop/test/test.yml
+++ /dev/null
@@ -1,78 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: azure-instance-stop-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-          
-          - name: RAMP_TIME
-            value: ''
-          
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-          
-          - name: CHAOS_INTERVAL
-            value: '30'
-          
-          - name: CHAOS_NAMESPACE
-            value: 'litmus'
-
-          # provide the instance names (comma seperated if multiple)
-          - name: AZURE_INSTANCE_NAME
-            value: ''
-          
-          # provide the resouce group of the instance
-          - name: RESOURCE_GROUP
-            value: ''
-          
-          # whether the disk is attached to scale instance or not, accepted values are disable, enable
-          - name: SCALE_SET
-            value: 'disable'
-          
-          # provide the sequence type for the run. Options: serial/parallel
-          - name: SEQUENCE
-            value: 'parallel'
-          
-          # provide the path to aks credentials mounted from secret
-          - name: AZURE_AUTH_LOCATION
-            value: '/tmp/azure.auth'
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-          
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-          
-        secrets:
-          - name: cloud-secret
-            mountPath: /tmp/
\ No newline at end of file
diff --git a/experiments/baremetal/redfish-node-restart/README.md b/experiments/baremetal/redfish-node-restart/README.md
deleted file mode 100644
index ff7caeb..0000000
--- a/experiments/baremetal/redfish-node-restart/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Watch Progress </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Redfish node restart </td>
- <td> This experiment causes node restart of baremetal nodes using the provided IPMI IP and credentials via redfish
- API. It can help test study the behaviour of kubernetes cluster during the node failure.</td>
- <td> Monitor the Node: <br> ping <node ip> </td>
- <td> NA</td>
- </tr>
- </table>
diff --git a/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go b/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go
deleted file mode 100644
index e0d9a1b..0000000
--- a/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go
+++ /dev/null
@@ -1,218 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/redfish-node-restart/lib"
-	redfishLib "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish-node-restart/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish-node-restart/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// NodeRestart contains steps to inject chaos
-func NodeRestart(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE NODE INFORMATION
-	log.InfoWithValues("[Info]: The Node information is as follows", logrus.Fields{
-		"Node_IPMI_IP": experimentsDetails.IPMIIP,
-		"User":         experimentsDetails.User,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-	if experimentsDetails.AuxiliaryAppInfo != "" {
-		log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-		if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Auxiliary Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	// PRE-CHAOS NODE STATUS CHECK
-	log.Info("[Status]: Verify that the NUT (Node Under Test) is running (pre-chaos)")
-	nodeStatus, err := redfishLib.GetNodeStatus(experimentsDetails.IPMIIP, experimentsDetails.User, experimentsDetails.Password)
-	if err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("[Verification]: Unable to get node power status(pre-chaos). Error: %v", err)
-		return
-	}
-	if nodeStatus != "On" {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("[Verification]: Node is not in running state(pre-chaos)")
-		return
-	}
-	log.Info("[Verification]: Node is in running state(pre-chaos)")
-
-	if experimentsDetails.EngineName != "" {
-		// marking NUT as running, as we already checked the status of node under test
-		msg := "NUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("Chaos injection failed, err: %v", err)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-	if experimentsDetails.AuxiliaryAppInfo != "" {
-		log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-		if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Auxiliary Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	//POST-CHAOS NODE STATUS CHECK
-	log.Info("[Status]: Verify that the NUT (Node Under Test) is running (post-chaos)")
-	nodeStatus, err = redfishLib.GetNodeStatus(experimentsDetails.IPMIIP, experimentsDetails.User, experimentsDetails.Password)
-	if err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("[Verification]: Unable to get node power status. Error: %v ", err)
-		return
-	}
-	if nodeStatus != "On" {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("[Verification]: Node is not in running state(post-chaos)")
-		return
-	}
-	log.Info("[Verification]: Node is in running state(post-chaos)")
-
-	if experimentsDetails.EngineName != "" {
-		// marking NUT as running, as we already checked the status of application under test
-		msg := "NUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/baremetal/redfish-node-restart/rbac.yaml b/experiments/baremetal/redfish-node-restart/rbac.yaml
deleted file mode 100644
index 67a17a1..0000000
--- a/experiments/baremetal/redfish-node-restart/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: redfish-node-restart-sa
-  namespace: default
-  labels:
-    name: redfish-node-restart-sa
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
-  name: redfish-node-restart-sa
-  labels:
-    name: redfish-node-restart-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","jobs","secrets","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
-  name: redfish-node-restart-sa
-  labels:
-    name: redfish-node-restart-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: redfish-node-restart-sa
-subjects:
-- kind: ServiceAccount
-  name: redfish-node-restart-sa
-  namespace: default
diff --git a/experiments/baremetal/redfish-node-restart/test/test.yml b/experiments/baremetal/redfish-node-restart/test/test.yml
deleted file mode 100644
index 58363fc..0000000
--- a/experiments/baremetal/redfish-node-restart/test/test.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-    serviceAccountName: redfish-node-restart-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          ## IPMI IP of the target node which
-          ## Node should have redfish enabled
-          - name: IPMI_IP
-            value: ''
-
-          ## Credentials used to login to the IPMI console via redfish
-          - name: USER
-            value: ''
-
-          - name: PASSWORD
-            value: ''
-
-
diff --git a/experiments/cassandra/pod-delete/README.md b/experiments/cassandra/pod-delete/README.md
deleted file mode 100644
index 046b50d..0000000
--- a/experiments/cassandra/pod-delete/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Cassandra Pod Delete </td>
- <td> It causes (forced/graceful) pod failure of specific/random replicas of an cassandra statefulset. It tests cassandra sanity (replica availability & uninterrupted service) and recovery workflow of the cassandra statefulset. </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/cassandra/cassandra-pod-delete/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/cassandra/pod-delete/experiment/pod-delete.go b/experiments/cassandra/pod-delete/experiment/pod-delete.go
deleted file mode 100644
index c4af00d..0000000
--- a/experiments/cassandra/pod-delete/experiment/pod-delete.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-delete/lib"
-	"github.com/litmuschaos/litmus-go/pkg/cassandra"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/types"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// CasssandraPodDelete inject the cassandra-pod-delete chaos
-func CasssandraPodDelete(clients clients.ClientSets) {
-
-	var err error
-	var ResourceVersionBefore string
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ChaoslibDetail.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ChaoslibDetail.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application informations are as follows", logrus.Fields{
-		"Namespace":              experimentsDetails.ChaoslibDetail.AppNS,
-		"Label":                  experimentsDetails.ChaoslibDetail.AppLabel,
-		"CassandraLivenessImage": experimentsDetails.CassandraLivenessImage,
-		"CassandraLivenessCheck": experimentsDetails.CassandraLivenessCheck,
-		"CassandraPort":          experimentsDetails.CassandraPort,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ChaoslibDetail.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		// Checking the load distribution on the ring (pre-chaos)
-		log.Info("[Status]: Checking the load distribution on the ring (pre-chaos)")
-		if err = cassandra.NodeToolStatusCheck(&experimentsDetails, clients); err != nil {
-			log.Errorf("[Status]: Chaos node tool status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// Cassandra liveness check
-	if experimentsDetails.CassandraLivenessCheck == "enable" {
-		ResourceVersionBefore, err = cassandra.LivenessCheck(&experimentsDetails, clients)
-		if err != nil {
-			log.Errorf("[Liveness]: Cassandra liveness check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Confirmation]: The cassandra application liveness pod created successfully")
-	} else {
-		log.Warn("[Liveness]: Cassandra Liveness check skipped as it was not enable")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.PreparePodDelete(experimentsDetails.ChaoslibDetail, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ChaoslibDetail.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		// Checking the load distribution on the ring (post-chaos)
-		log.Info("[Status]: Checking the load distribution on the ring (post-chaos)")
-		if err = cassandra.NodeToolStatusCheck(&experimentsDetails, clients); err != nil {
-			log.Errorf("[Status]: Chaos node tool status check is failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// Cassandra statefulset liveness check (post-chaos)
-	log.Info("[Status]: Confirm that the cassandra liveness pod is running(post-chaos)")
-	// Checking the running status of cassandra liveness
-	if experimentsDetails.CassandraLivenessCheck == "enable" {
-		if err = status.CheckApplicationStatusesByLabels(experimentsDetails.ChaoslibDetail.AppNS, "name=cassandra-liveness-deploy-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil {
-			log.Errorf("Liveness status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		if err = cassandra.LivenessCleanup(&experimentsDetails, clients, ResourceVersionBefore); err != nil {
-			log.Errorf("Liveness cleanup failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-	//Updating the chaosResult in the end of experiment
-	log.Info("[The End]: Updating the chaos result of cassandra pod delete experiment (EOT)")
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ChaoslibDetail.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		msg := experimentsDetails.ChaoslibDetail.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/cassandra/pod-delete/rbac.yaml b/experiments/cassandra/pod-delete/rbac.yaml
deleted file mode 100644
index 2630161..0000000
--- a/experiments/cassandra/pod-delete/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-    apiVersion: v1
-    kind: ServiceAccount
-    metadata:
-      name: cassandra-pod-delete-sa
-      namespace: default
-      labels:
-        name: cassandra-pod-delete-sa
-    ---
-    apiVersion: rbac.authorization.k8s.io/v1
-    kind: Role
-    metadata:
-      name: cassandra-pod-delete-sa
-      namespace: default
-      labels:
-        name: cassandra-pod-delete-sa
-    rules:
-    - apiGroups: ["","litmuschaos.io","batch","apps"]
-      resources: ["pods","deployments","statefulsets","services","pods/log","pods/exec","events","jobs","chaosengines","chaosexperiments","chaosresults"]
-      verbs: ["create","list","get","patch","update","delete"]
-    ---
-    apiVersion: rbac.authorization.k8s.io/v1
-    kind: RoleBinding
-    metadata:
-      name: cassandra-pod-delete-sa
-      namespace: default
-      labels:
-        name: cassandra-pod-delete-sa
-    roleRef:
-      apiGroup: rbac.authorization.k8s.io
-      kind: Role
-      name: cassandra-pod-delete-sa
-    subjects:
-    - kind: ServiceAccount
-      name: cassandra-pod-delete-sa
-      namespace: default
-    
\ No newline at end of file
diff --git a/experiments/cassandra/pod-delete/test/test.yml b/experiments/cassandra/pod-delete/test/test.yml
deleted file mode 100644
index d5e355c..0000000
--- a/experiments/cassandra/pod-delete/test/test.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: cassandra-pod-delete-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: ''
-
-          - name: APP_LABEL
-            value: ''
-
-          - name: APP_KIND
-            value: ''
-
-          - name: CASSANDRA_SVC_NAME
-            value: 'cassandra'
-
-          - name: KEYSPACE_REPLICATION_FACTOR
-            value: ''
-
-          - name: CASSANDRA_PORT
-            value: '9042'
-
-          - name: LIVENESS_SVC_PORT
-            value: '8088'
-
-          - name: CASSANDRA_LIVENESS_IMAGE
-            value: 'litmuschaos/cassandra-client:latest'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '15'
-
-          - name: CHAOS_INTERVAL
-            value: '15'
-
-          - name: KILL_COUNT
-            value: ''
-
-          - name: CASSANDRA_LIVENESS_CHECK
-            value: ''
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: FORCE
-            value: 'true'
-
-          - name: CHAOS_NAMESPACE
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
\ No newline at end of file
diff --git a/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go b/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go
deleted file mode 100644
index 446d6aa..0000000
--- a/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	"google.golang.org/api/compute/v1"
-)
-
-// GCPVMDiskLossByLabel contains steps to inject chaos
-func GCPVMDiskLossByLabel(clients clients.ClientSets) {
-
-	var (
-		computeService *compute.Service
-		err            error
-	)
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The disk information is as follows", logrus.Fields{
-		"Disk Volume Label": experimentsDetails.DiskVolumeLabel,
-		"Zones":             experimentsDetails.Zones,
-		"Sequence":          experimentsDetails.Sequence,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// Create a compute service to access the compute engine resources
-	computeService, err = gcp.GetGCPComputeService()
-	if err != nil {
-		log.Errorf("Failed to obtain a gcp compute service, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	//selecting the target instances (pre-chaos)
-	if err := gcp.SetTargetDiskVolumes(computeService, &experimentsDetails); err != nil {
-		log.Errorf("Failed to get the target gcp disk volumes, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Info("[Status]: Disk volumes are attached to the VM instances (pre-chaos)")
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareDiskVolumeLossByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	// Checking disk volume attachment post-chaos
-	for i := range experimentsDetails.TargetDiskVolumeNamesList {
-		instanceName, err := gcp.GetVolumeAttachmentDetails(computeService, experimentsDetails.GCPProjectID, experimentsDetails.Zones, experimentsDetails.TargetDiskVolumeNamesList[i])
-		if err != nil || instanceName == "" {
-			log.Errorf("Failed to verify disk volume attachment status, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	log.Info("[Status]: Disk volumes are attached to the VM instances (post-chaos)")
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/gcp/gcp-vm-disk-loss-by-label/test/test.yml b/experiments/gcp/gcp-vm-disk-loss-by-label/test/test.yml
deleted file mode 100644
index 0652590..0000000
--- a/experiments/gcp/gcp-vm-disk-loss-by-label/test/test.yml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: gcp-vm-disk-loss-by-label-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-
-          - name: TOTAL_CHAOS_DURATION
-            value: ''
-
-          - name: CHAOS_INTERVAL
-            value: ''
-          
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-
-          - name: GCP_PROJECT_ID
-            value: ''
-
-          - name: DISK_ZONES
-            value: ''
-
-          # set the label of the target disk volumes
-          - name: DISK_VOLUME_LABEL
-            value: ''
-          
-          # set the percentage value of the disks with the given label 
-          # which should be targeted as part of the chaos injection
-          - name: DISK_AFFECTED_PERC
-            value: ''
-
-          # parallel or serial; determines how chaos is injected
-          - name: SEQUENCE
-            value: ''
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
diff --git a/experiments/gcp/gcp-vm-disk-loss/README.md b/experiments/gcp/gcp-vm-disk-loss/README.md
deleted file mode 100644
index 725d704..0000000
--- a/experiments/gcp/gcp-vm-disk-loss/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> GCP VM Disk Loss </td>
- <td> It causes chaos to disrupt state of GCP persistent disk volume by detaching it from its VM instance for a certain chaos duration using the disk name. </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/gcp/gcp-vm-disk-loss/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go b/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go
deleted file mode 100644
index abb182a..0000000
--- a/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/gcp-vm-disk-loss/lib"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	gcp "github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	"google.golang.org/api/compute/v1"
-)
-
-// VMDiskLoss injects the disk volume loss chaos
-func VMDiskLoss(clients clients.ClientSets) {
-
-	var (
-		computeService *compute.Service
-		err            error
-	)
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	experimentEnv.GetENV(&experimentsDetails)
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE VOLUME INFORMATION
-	log.InfoWithValues("The volume information is as follows", logrus.Fields{
-		"Volume IDs": experimentsDetails.DiskVolumeNames,
-		"Zones":      experimentsDetails.Zones,
-		"Sequence":   experimentsDetails.Sequence,
-	})
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// Create a compute service to access the compute engine resources
-	computeService, err = gcp.GetGCPComputeService()
-	if err != nil {
-		log.Errorf("Failed to obtain a gcp compute service, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Verify the vm instance is attached to disk volume
-	if chaosDetails.DefaultHealthCheck {
-		if err := gcp.DiskVolumeStateCheck(computeService, &experimentsDetails); err != nil {
-			log.Errorf("Volume status check failed pre chaos, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: Disk volumes are attached to the VM instances (pre-chaos)")
-	}
-
-	// Fetch target disk instance names
-	if err := gcp.SetTargetDiskInstanceNames(computeService, &experimentsDetails); err != nil {
-		log.Errorf("Failed to fetch the disk instance names, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.PrepareDiskVolumeLoss(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//Verify the vm instance is attached to disk volume
-	if chaosDetails.DefaultHealthCheck {
-		if err := gcp.DiskVolumeStateCheck(computeService, &experimentsDetails); err != nil {
-			log.Errorf("Volume status check failed post chaos, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: Disk volumes are attached to the VM instances (post-chaos)")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("unable to Update the Chaos Result, err: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/gcp/gcp-vm-disk-loss/rbac.yaml b/experiments/gcp/gcp-vm-disk-loss/rbac.yaml
deleted file mode 100644
index 286fe0e..0000000
--- a/experiments/gcp/gcp-vm-disk-loss/rbac.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: gcp-vm-disk-loss-sa
-  namespace: default
-  labels:
-    name: gcp-vm-disk-loss-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: gcp-vm-disk-loss-sa
-  labels:
-    name: gcp-vm-disk-loss-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: gcp-vm-disk-loss-sa
-  labels:
-    name: gcp-vm-disk-loss-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: gcp-vm-disk-loss-sa
-subjects:
-- kind: ServiceAccount
-  name: gcp-vm-disk-loss-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/gcp/gcp-vm-disk-loss/test/test.yml b/experiments/gcp/gcp-vm-disk-loss/test/test.yml
deleted file mode 100644
index d9bf7ea..0000000
--- a/experiments/gcp/gcp-vm-disk-loss/test/test.yml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: gcp-vm-disk-loss-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          - name: TOTAL_CHAOS_DURATION
-            value: ''
-
-          - name: CHAOS_INTERVAL
-            value: ''
-          
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          # set the GCP project id
-          - name: GCP_PROJECT_ID
-            value: ''
-
-          # set the disk volume name(s) as comma seperated values 
-          # eg. volume1,volume2,...
-          - name: DISK_VOLUME_NAMES
-            value: ''
-                    
-          # set the disk zone(s) as comma seperated values in the corresponding order 
-          # of DISK_VOLUME_NAME 
-          # eg. zone1,zone2,...
-          - name: DISK_ZONES
-            value: ''
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/
-
diff --git a/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go b/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go
deleted file mode 100644
index a4075b0..0000000
--- a/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	"google.golang.org/api/compute/v1"
-)
-
-// GCPVMInstanceStopByLabel contains steps to inject chaos
-func GCPVMInstanceStopByLabel(clients clients.ClientSets) {
-
-	var (
-		computeService *compute.Service
-		err            error
-	)
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE INSTANCE INFORMATION
-	log.InfoWithValues("The vm instance information is as follows", logrus.Fields{
-		"Instance Label":               experimentsDetails.InstanceLabel,
-		"Instance Affected Percentage": experimentsDetails.InstanceAffectedPerc,
-		"Zone":                         experimentsDetails.Zones,
-		"Sequence":                     experimentsDetails.Sequence,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// Create a compute service to access the compute engine resources
-	computeService, err = gcp.GetGCPComputeService()
-	if err != nil {
-		log.Errorf("Failed to obtain a gcp compute service, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	//selecting the target instances (pre-chaos)
-	if err = gcp.SetTargetInstance(computeService, &experimentsDetails); err != nil {
-		log.Errorf("Failed to get the target VM instances, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Info("[Status]: VM instances are in a running state (pre-chaos)")
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareVMStopByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	// Verify that GCP VM instance is running (post-chaos)
-	if experimentsDetails.ManagedInstanceGroup != "enable" {
-		if err := gcp.InstanceStatusCheck(computeService, experimentsDetails.TargetVMInstanceNameList, experimentsDetails.GCPProjectID, []string{experimentsDetails.Zones}); err != nil {
-			log.Errorf("Failed to get VM instance status, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	log.Info("[Status]: VM instances are in a running state (post-chaos)")
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/gcp/gcp-vm-instance-stop-by-label/test/test.yml b/experiments/gcp/gcp-vm-instance-stop-by-label/test/test.yml
deleted file mode 100644
index 944da47..0000000
--- a/experiments/gcp/gcp-vm-instance-stop-by-label/test/test.yml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: gcp-vm-instance-stop-by-label-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-
-          # set chaos duration (in sec) as desired
-          - name: TOTAL_CHAOS_DURATION
-            value: ''
-
-          # set chaos interval (in sec) as desired
-          - name: CHAOS_INTERVAL
-            value: ''
-          
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-
-          - name: GCP_PROJECT_ID
-            value: ''
-
-          # Label of the target vm instance(s)  
-          - name: INSTANCE_LABEL
-            value: ''
-
-          - name: INSTANCE_ZONES
-            value: ''
-
-          - name: SEQUENCE
-            value: ''
-
-          - name: AUTO_SCALING_GROUP
-            value: ''
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
diff --git a/experiments/gcp/gcp-vm-instance-stop/README.md b/experiments/gcp/gcp-vm-instance-stop/README.md
deleted file mode 100644
index a214b28..0000000
--- a/experiments/gcp/gcp-vm-instance-stop/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> GCP VM Instance Stop </td>
- <td> It causes power-off of a GCP VM instance by instance name or list of instance names before bringing it back to the running state after the specified chaos duration. It helps to check the performance of the application/process running on the VM instance. </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/gcp/gcp-vm-instance-stop/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go b/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go
deleted file mode 100644
index 7f3cbfb..0000000
--- a/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/gcp-vm-instance-stop/lib"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/gcp"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	"google.golang.org/api/compute/v1"
-)
-
-// VMInstanceStop executes the experiment steps by injecting chaos into the specified vm instances
-func VMInstanceStop(clients clients.ClientSets) {
-
-	var (
-		computeService *compute.Service
-		err            error
-	)
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	experimentEnv.GetENV(&experimentsDetails)
-	log.Infof("[PreReq]: Procured the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE INSTANCE INFORMATION
-	log.InfoWithValues("The vm instance information is as follows", logrus.Fields{
-		"Instance Names": experimentsDetails.VMInstanceName,
-		"Zones":          experimentsDetails.Zones,
-		"Sequence":       experimentsDetails.Sequence,
-	})
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// Create a compute service to access the compute engine resources
-	computeService, err = gcp.GetGCPComputeService()
-	if err != nil {
-		log.Errorf("Failed to obtain a gcp compute service, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Verify that the GCP VM instance(s) is in RUNNING state (pre-chaos)
-	if chaosDetails.DefaultHealthCheck {
-		if err := gcp.InstanceStatusCheckByName(computeService, experimentsDetails.ManagedInstanceGroup, experimentsDetails.Delay, experimentsDetails.Timeout, "pre-chaos", experimentsDetails.VMInstanceName, experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-			log.Errorf("Failed to get the vm instance status, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		log.Info("[Status]: VM instance is in running state (pre-chaos)")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareVMStop(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//Verify the GCP VM instance is in RUNNING status (post-chaos)
-	if chaosDetails.DefaultHealthCheck {
-		if err := gcp.InstanceStatusCheckByName(computeService, experimentsDetails.ManagedInstanceGroup, experimentsDetails.Delay, experimentsDetails.Timeout, "post-chaos", experimentsDetails.VMInstanceName, experimentsDetails.GCPProjectID, experimentsDetails.Zones); err != nil {
-			log.Errorf("failed to get the vm instance status, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		log.Info("[Status]: VM instance is in running state (post-chaos)")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err:  %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/gcp/gcp-vm-instance-stop/rbac.yaml b/experiments/gcp/gcp-vm-instance-stop/rbac.yaml
deleted file mode 100644
index 3ace6d0..0000000
--- a/experiments/gcp/gcp-vm-instance-stop/rbac.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: gcp-vm-instance-stop-sa
-  namespace: default
-  labels:
-    name: gcp-vm-instance-stop-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: gcp-vm-instance-stop-sa
-  labels:
-    name: gcp-vm-instance-stop-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["patch","get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: gcp-vm-instance-stop-sa
-  labels:
-    name: gcp-vm-instance-stop-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: gcp-vm-instance-stop-sa
-subjects:
-- kind: ServiceAccount
-  name: gcp-vm-instance-stop-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/gcp/gcp-vm-instance-stop/test/test.yml b/experiments/gcp/gcp-vm-instance-stop/test/test.yml
deleted file mode 100644
index 7c9cf21..0000000
--- a/experiments/gcp/gcp-vm-instance-stop/test/test.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: gcp-vm-instance-stop-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-
-          - name: GCP_PROJECT_ID
-            value: ''
-
-          - name: VM_INSTANCE_NAMES
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: INSTANCE_ZONES
-            value: ''
-
-          - name: RAMP_TIME
-            value: '0'
-
-          - name: SEQUENCE
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/
-
diff --git a/experiments/generic/container-kill/README.md b/experiments/generic/container-kill/README.md
deleted file mode 100644
index 9784994..0000000
--- a/experiments/generic/container-kill/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Container Kill </td>
- <td> This experiment executes SIGKILL on container of random replicas of an application deployment. It tests the deployment sanity (replica availability & uninterrupted service) and recovery workflows of an application. </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/container-kill/"> Here </a> </td>
- </tr>
- </table>
-
diff --git a/experiments/generic/container-kill/experiment/container-kill.go b/experiments/generic/container-kill/experiment/container-kill.go
deleted file mode 100644
index 7c06c37..0000000
--- a/experiments/generic/container-kill/experiment/container-kill.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/container-kill/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// ContainerKill inject the container-kill chaos
-func ContainerKill(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":          common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container": experimentsDetails.TargetContainer,
-		"Chaos Duration":   experimentsDetails.ChaosDuration,
-		"Chaos Interval":   experimentsDetails.ChaosInterval,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareContainerKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/container-kill/rbac.yaml b/experiments/generic/container-kill/rbac.yaml
deleted file mode 100644
index d10f6bf..0000000
--- a/experiments/generic/container-kill/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: container-kill-sa
-  namespace: default
-  labels:
-    name: container-kill-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: container-kill-sa
-  namespace: default
-  labels:
-    name: container-kill-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","jobs","pods/exec","pods/log","events","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: container-kill-sa
-  namespace: default
-  labels:
-    name: container-kill-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: container-kill-sa
-subjects:
-- kind: ServiceAccount
-  name: container-kill-sa
-  namespace: default
-
diff --git a/experiments/generic/container-kill/test/test.yml b/experiments/generic/container-kill/test/test.yml
deleted file mode 100644
index 309fabe..0000000
--- a/experiments/generic/container-kill/test/test.yml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: container-kill-sa 
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '20'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          - name: LIB_IMAGE  
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          # provide the container runtime path
-          # applicable only for containerd and crio runtime
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          # provide the name of container runtime
-          # it supports docker, containerd, crio
-          # defaults to containerd
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-           ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-          
-          - name: TARGET_POD
-            value: ''
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/disk-fill/README.md b/experiments/generic/disk-fill/README.md
deleted file mode 100644
index 9bb168f..0000000
--- a/experiments/generic/disk-fill/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Disk Fill </td>
- <td> This experiment causes disk stress by filling up the ephemeral storage space of the pod and forces the pod to get evicted if the used space exceeds the set ephemeral storage limit. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/disk-fill/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/disk-fill/experiment/disk-fill.go b/experiments/generic/disk-fill/experiment/disk-fill.go
deleted file mode 100644
index 4e8a737..0000000
--- a/experiments/generic/disk-fill/experiment/disk-fill.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package experiment
-
-import (
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/disk-fill/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	"os"
-)
-
-// DiskFill inject the disk-fill chaos
-func DiskFill(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":         common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Fill Percentage": experimentsDetails.FillPercentage,
-		"Chaos Duration":  experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareDiskFill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result err:  %v\n", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/disk-fill/rbac.yaml b/experiments/generic/disk-fill/rbac.yaml
deleted file mode 100644
index 1c835bd..0000000
--- a/experiments/generic/disk-fill/rbac.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: disk-fill-sa
-  namespace: default
-  labels:
-    name: disk-fill-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: disk-fill-sa
-  labels:
-    name: disk-fill-sa
-rules:
-- apiGroups: ["","apps","litmuschaos.io","batch"]
-  resources: ["pods","jobs","pods/exec","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: disk-fill-sa
-  labels:
-    name: disk-fill-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: disk-fill-sa
-subjects:
-- kind: ServiceAccount
-  name: disk-fill-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/disk-fill/test/test.yml b/experiments/generic/disk-fill/test/test.yml
deleted file mode 100644
index 056cca3..0000000
--- a/experiments/generic/disk-fill/test/test.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: disk-fill-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: DATA_BLOCK_SIZE
-            value: ''
-
-          - name: CONTAINER_PATH
-            value: '/var/lib/docker/containers'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/docker-service-kill/README.md b/experiments/generic/docker-service-kill/README.md
deleted file mode 100644
index 5d89960..0000000
--- a/experiments/generic/docker-service-kill/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Docker Service Kill </td>
- <td> This experiment causes docker service kill gracefully for a certain chaos duration. The experiment aims to verify resiliency of applications whose replicas may be evicted or becomes unreachable on account on nodes turning unschedulable (Not Ready) due to docker service kill. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/docker-service-kill/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go b/experiments/generic/docker-service-kill/experiment/docker-service-kill.go
deleted file mode 100644
index f93156e..0000000
--- a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go
+++ /dev/null
@@ -1,205 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/docker-service-kill/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// DockerServiceKill inject the docker-service-kill chaos
-func DockerServiceKill(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Node Label":     experimentsDetails.NodeLabel,
-		"Target Node":    experimentsDetails.TargetNode,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareDockerServiceKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("Chaos injection failed, err: %v", err)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/docker-service-kill/rbac.yaml b/experiments/generic/docker-service-kill/rbac.yaml
deleted file mode 100644
index daa301b..0000000
--- a/experiments/generic/docker-service-kill/rbac.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: docker-service-kill-sa
-  namespace: default
-  labels:
-    name: docker-service-kill-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: docker-service-kill-sa
-  namespace: default
-  labels:
-    name: docker-service-kill-sa
-rules:
-  - apiGroups:
-      - ""
-      - "batch"
-      - "apps"
-      - "litmuschaos.io"
-    resources:
-      - "jobs"
-      - "pods"
-      - "pods/log"
-      - "events"
-      - "chaosengines"
-      - "chaosexperiments"
-      - "chaosresults"
-    verbs:
-      - "create"
-      - "list"
-      - "get"
-      - "patch"
-      - "update"
-      - "delete"
-  - apiGroups:
-      - ""
-    resources:
-      - "nodes"
-    verbs:
-      - "get"
-      - "list"
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: docker-service-kill-sa
-  namespace: default
-  labels:
-    name: docker-service-kill-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: docker-service-kill-sa
-subjects:
-  - kind: ServiceAccount
-    name: docker-service-kill-sa
-    namespace: default
diff --git a/experiments/generic/docker-service-kill/test/test.yml b/experiments/generic/docker-service-kill/test/test.yml
deleted file mode 100644
index 6efbf85..0000000
--- a/experiments/generic/docker-service-kill/test/test.yml
+++ /dev/null
@@ -1,69 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: docker-service-kill-sa
-      containers:
-        - name: gotest
-          image: busybox
-          command:
-            - sleep
-            - "3600"
-          env:
-            # provide application namespace
-            - name: APP_NAMESPACE
-              value: ""
-
-            # provide application labels
-            - name: APP_LABEL
-              value: ""
-
-            # provide application kind
-            - name: APP_KIND
-              value: ""
-
-            - name: TOTAL_CHAOS_DURATION
-              value: ""
-
-            # provide auxiliary application details - namespace and labels of the applications
-            # sample input is - "ns1:app=percona,ns2:name=nginx"
-            - name: AUXILIARY_APPINFO
-              value: ""
-
-            ## Period to wait before injection of chaos in sec
-            - name: RAMP_TIME
-              value: ""
-
-            # provide the chaos namespace
-            - name: CHAOS_NAMESPACE
-              value: ""
-
-            - name: NODE_LABEL
-              value: ""
-
-            - name: TARGET_NODE
-              value: ""
-
-            - name: TARGET_CONTAINER
-              value: ""
-
-            - name: POD_NAME
-              valueFrom:
-                fieldRef:
-                  fieldPath: metadata.name
-
-            - name: CHAOS_SERVICE_ACCOUNT
-              valueFrom:
-                fieldRef:
-                  fieldPath: spec.serviceAccountName
diff --git a/experiments/generic/kubelet-service-kill/README.md b/experiments/generic/kubelet-service-kill/README.md
deleted file mode 100644
index 8db0044..0000000
--- a/experiments/generic/kubelet-service-kill/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Kubelet Service Kill </td>
- <td> This experiment causes kubelet service kill gracefully for a certain chaos duration. The experiment aims to verify resiliency of applications whose replicas may be evicted or becomes unreachable on account on nodes turning unschedulable (Not Ready) due to kubelet service kill. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/kubelet-service-kill/"> Here </a> </td>
- </tr>
- </table>
-
diff --git a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go b/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go
deleted file mode 100644
index 2aaebaf..0000000
--- a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/kubelet-service-kill/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/kubelet-service-kill/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/kubelet-service-kill/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// KubeletServiceKill inject the kubelet-service-kill chaos
-func KubeletServiceKill(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Node Label":     experimentsDetails.NodeLabel,
-		"Target Node":    experimentsDetails.TargetNode,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareKubeletKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/kubelet-service-kill/rbac.yaml b/experiments/generic/kubelet-service-kill/rbac.yaml
deleted file mode 100644
index 4b2ccca..0000000
--- a/experiments/generic/kubelet-service-kill/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: kubelet-service-kill-sa
-  namespace: default
-  labels:
-    name: kubelet-service-kill-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: kubelet-service-kill-sa
-  labels:
-    name: kubelet-service-kill-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: kubelet-service-kill-sa
-  labels:
-    name: kubelet-service-kill-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: kubelet-service-kill-sa
-subjects:
-- kind: ServiceAccount
-  name: kubelet-service-kill-sa
-  namespace: default
diff --git a/experiments/generic/kubelet-service-kill/test/test.yml b/experiments/generic/kubelet-service-kill/test/test.yml
deleted file mode 100644
index 39a32f3..0000000
--- a/experiments/generic/kubelet-service-kill/test/test.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: kubelet-service-kill-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: APP_NODE
-            value: 'node-01'
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/node-cpu-hog/README.md b/experiments/generic/node-cpu-hog/README.md
deleted file mode 100644
index d60c2c8..0000000
--- a/experiments/generic/node-cpu-hog/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Node Cpu Hog </td>
- <td> This experiment causes CPU resource exhaustion on the Kubernetes node. The experiment aims to verify resiliency of applications whose replicas may be evicted on account on nodes turning unschedulable (Not Ready) due to lack of CPU resources. </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/node-cpu-hog/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go b/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go
deleted file mode 100644
index a035acf..0000000
--- a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-cpu-hog/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// NodeCPUHog inject the node-cpu-hog chaos
-func NodeCPUHog(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Node Label":     experimentsDetails.NodeLabel,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-		"Target Nodes":   experimentsDetails.TargetNodes,
-		"Node CPU Cores": experimentsDetails.NodeCPUcores,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareNodeCPUHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: CPU hog failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/node-cpu-hog/rbac.yaml b/experiments/generic/node-cpu-hog/rbac.yaml
deleted file mode 100644
index dfc377e..0000000
--- a/experiments/generic/node-cpu-hog/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: node-cpu-hog-sa
-  namespace: default
-  labels:
-    name: node-cpu-hog-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: node-cpu-hog-sa
-  labels:
-    name: node-cpu-hog-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","jobs","events","chaosengines","pods/log","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: node-cpu-hog-sa
-  labels:
-    name: node-cpu-hog-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: node-cpu-hog-sa
-subjects:
-- kind: ServiceAccount
-  name: node-cpu-hog-sa
-  namespace: default
diff --git a/experiments/generic/node-cpu-hog/test/test.yml b/experiments/generic/node-cpu-hog/test/test.yml
deleted file mode 100644
index 4fc8fe7..0000000
--- a/experiments/generic/node-cpu-hog/test/test.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: node-cpu-hog-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          - name: NODE_CPU_CORE
-            value: '1'
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/node-drain/README.md b/experiments/generic/node-drain/README.md
deleted file mode 100644
index 223ac86..0000000
--- a/experiments/generic/node-drain/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Node Drain </td>
- <td> This experiment drains the node where application pod is running and verifies if it is scheduled on another available node. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/node-drain/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/node-drain/experiment/node-drain.go b/experiments/generic/node-drain/experiment/node-drain.go
deleted file mode 100644
index 91876c4..0000000
--- a/experiments/generic/node-drain/experiment/node-drain.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-drain/lib"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-drain/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-drain/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-//NodeDrain inject the node-drain chaos
-func NodeDrain(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Node Label":     experimentsDetails.NodeLabel,
-		"Target Node":    experimentsDetails.TargetNode,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareNodeDrain(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/node-drain/rbac.yaml b/experiments/generic/node-drain/rbac.yaml
deleted file mode 100644
index f2a21cd..0000000
--- a/experiments/generic/node-drain/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: node-drain-sa
-  namespace: default
-  labels:
-    name: node-drain-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: node-drain-sa
-  labels:
-    name: node-drain-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","extensions","apps"]
-  resources: ["pods","jobs","events","chaosengines","pods/log","daemonsets","pods/eviction","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["patch","get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: node-drain-sa
-  labels:
-    name: node-drain-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: node-drain-sa
-subjects:
-- kind: ServiceAccount
-  name: node-drain-sa
-  namespace: default
diff --git a/experiments/generic/node-drain/test/test.yml b/experiments/generic/node-drain/test/test.yml
deleted file mode 100644
index 6403bc5..0000000
--- a/experiments/generic/node-drain/test/test.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: node-drain-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: APP_NODE
-            value: 'node-01'
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/node-io-stress/README.md b/experiments/generic/node-io-stress/README.md
deleted file mode 100644
index 0f82d42..0000000
--- a/experiments/generic/node-io-stress/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
-<td> Node IO Stress </td>
-<td> This experiment causes disk stress on the Kubernetes node. The experiment aims to verify the resiliency of applications that share this disk resource for ephemeral or persistent storage purposes. </td>
-<td>   <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/node-io-stress/"> Here </a> </td>
-</tr> 
-</table>
diff --git a/experiments/generic/node-io-stress/experiment/node-io-stress.go b/experiments/generic/node-io-stress/experiment/node-io-stress.go
deleted file mode 100644
index 1b76fa8..0000000
--- a/experiments/generic/node-io-stress/experiment/node-io-stress.go
+++ /dev/null
@@ -1,209 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-io-stress/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-io-stress/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-io-stress/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// NodeIOStress inject the node-io-stress chaos
-func NodeIOStress(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Node Label":                      experimentsDetails.NodeLabel,
-		"Chaos Duration":                  experimentsDetails.ChaosDuration,
-		"Target Nodes":                    experimentsDetails.TargetNodes,
-		"NumberOfWorkers":                 experimentsDetails.NumberOfWorkers,
-		"FilesystemUtilizationPercentage": experimentsDetails.FilesystemUtilizationPercentage,
-		"FilesystemUtilizationBytes":      experimentsDetails.FilesystemUtilizationBytes,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareNodeIOStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: node io stress failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/node-io-stress/rbac.yaml b/experiments/generic/node-io-stress/rbac.yaml
deleted file mode 100644
index e540057..0000000
--- a/experiments/generic/node-io-stress/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: node-io-stress-sa
-  namespace: default
-  labels:
-    name: node-io-stress-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: node-io-stress-sa
-  labels:
-    name: node-io-stress-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: node-io-stress-sa
-  labels:
-    name: node-io-stress-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: node-io-stress-sa
-subjects:
-- kind: ServiceAccount
-  name: node-io-stress-sa
-  namespace: default
diff --git a/experiments/generic/node-io-stress/test/test.yml b/experiments/generic/node-io-stress/test/test.yml
deleted file mode 100644
index 03a8ef0..0000000
--- a/experiments/generic/node-io-stress/test/test.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: node-io-stress-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: FILESYSTEM_UTILIZATION_PERCENTAGE
-            value: ''
-
-          - name: FILESYSTEM_UTILIZATION_BYTES
-            value: '' 
-
-          - name: NUMBER_OF_WORKERS
-            value: '4'
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/node-memory-hog/README.md b/experiments/generic/node-memory-hog/README.md
deleted file mode 100644
index 7ec4c73..0000000
--- a/experiments/generic/node-memory-hog/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
-<td> Node Memory Hog </td>
-<td> This experiment causes Memory exhaustion on the Kubernetes node. The experiment aims to verify resiliency of applications whose replicas may be evicted on account on nodes turning unschedulable due to lack of Memory resources. </td>
-<td>   <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/node-memory-hog/"> Here </a> </td>
-</tr> 
-</table>
diff --git a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go b/experiments/generic/node-memory-hog/experiment/node-memory-hog.go
deleted file mode 100644
index 3c5dea5..0000000
--- a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-memory-hog/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-memory-hog/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-memory-hog/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// NodeMemoryHog inject the node-memory-hog chaos
-func NodeMemoryHog(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Node Label":                    experimentsDetails.NodeLabel,
-		"Chaos Duration":                experimentsDetails.ChaosDuration,
-		"Target Nodes":                  experimentsDetails.TargetNodes,
-		"Memory Consumption Percentage": experimentsDetails.MemoryConsumptionPercentage,
-		"Memory Consumption Mebibytes":  experimentsDetails.MemoryConsumptionMebibytes,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareNodeMemoryHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: node memory hog failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNodes, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/node-memory-hog/rbac.yaml b/experiments/generic/node-memory-hog/rbac.yaml
deleted file mode 100644
index 3217801..0000000
--- a/experiments/generic/node-memory-hog/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: node-memory-hog-sa
-  namespace: default
-  labels:
-    name: node-memory-hog-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: node-memory-hog-sa
-  labels:
-    name: node-memory-hog-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: node-memory-hog-sa
-  labels:
-    name: node-memory-hog-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: node-memory-hog-sa
-subjects:
-- kind: ServiceAccount
-  name: node-memory-hog-sa
-  namespace: default
diff --git a/experiments/generic/node-memory-hog/test/test.yml b/experiments/generic/node-memory-hog/test/test.yml
deleted file mode 100644
index 63bf495..0000000
--- a/experiments/generic/node-memory-hog/test/test.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: node-memory-hog-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          # in MB
-          - name: MEMORY_PERCENTAGE
-            value: '500' 
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/node-restart/README.md b/experiments/generic/node-restart/README.md
deleted file mode 100644
index ee25c69..0000000
--- a/experiments/generic/node-restart/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Node Restart </td>
- <td> This experiment restarts Kubernetes node. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/node-restart/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/node-restart/experiment/node-restart.go b/experiments/generic/node-restart/experiment/node-restart.go
deleted file mode 100644
index cf6084a..0000000
--- a/experiments/generic/node-restart/experiment/node-restart.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-restart/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-restart/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-restart/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// NodeRestart inject the node-restart chaos
-func NodeRestart(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Node Label":     experimentsDetails.NodeLabel,
-		"Target Node":    experimentsDetails.TargetNode,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareNodeRestart(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: Node restart failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	msg = experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-	types.SetResultEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-}
diff --git a/experiments/generic/node-restart/rbac.yaml b/experiments/generic/node-restart/rbac.yaml
deleted file mode 100644
index b806764..0000000
--- a/experiments/generic/node-restart/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: node-restart-sa
-  namespace: default
-  labels:
-    name: node-restart-sa
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
-  name: node-restart-sa
-  labels:
-    name: node-restart-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","jobs","secrets","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
-  name: node-restart-sa
-  labels:
-    name: node-restart-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: node-restart-sa
-subjects:
-- kind: ServiceAccount
-  name: node-restart-sa
-  namespace: default
diff --git a/experiments/generic/node-restart/test/test.yml b/experiments/generic/node-restart/test/test.yml
deleted file mode 100644
index 75b89cf..0000000
--- a/experiments/generic/node-restart/test/test.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: node-restart-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_INTERVAL
-            value: '10' 
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: SSH_USER
-            value: 'core'
-
-          - name: REBOOT_COMMAND
-            value: 'sudo systemctl reboot'
-
-          - name: SECRET_NAME
-            value: 'id-rsa'
-          
-          - name: SECRET_VALUE
-            value: 'ssh-privatekey'
-
-          - name: TARGET_NODE
-            value: 'node01'
-
-          - name: TARGET_NODE_IP
-            value: '192.168.1.15'
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/node-taint/README.md b/experiments/generic/node-taint/README.md
deleted file mode 100644
index aebd7bd..0000000
--- a/experiments/generic/node-taint/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Node Taint </td>
- <td> This experiment adds specific taints to the node which causes a forceful eviction of the pods from that node & checks if they are scheduled on another available node. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/nodes/node-taint/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/node-taint/experiment/node-taint.go b/experiments/generic/node-taint/experiment/node-taint.go
deleted file mode 100644
index 834c361..0000000
--- a/experiments/generic/node-taint/experiment/node-taint.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-taint/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-taint/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-taint/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// NodeTaint inject the node-taint chaos
-func NodeTaint(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Node Label":     experimentsDetails.NodeLabel,
-		"Target Node":    experimentsDetails.TargetNode,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-		"Taints":         experimentsDetails.Taints,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Errorf("Target nodes are not in the ready state, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareNodeTaint(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
-		if experimentsDetails.AuxiliaryAppInfo != "" {
-			log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
-			if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-				log.Errorf("Auxiliary Application status check failed, err: %v", err)
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-		}
-
-		// Checking the status of target nodes
-		log.Info("[Status]: Getting the status of target nodes")
-		if err := status.CheckNodeStatus(experimentsDetails.TargetNode, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
-			log.Warnf("Target nodes are not in the ready state, you may need to manually recover the node, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "NUT: Not Ready", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "NUT: Ready"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "NUT: Ready, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "NUT: Ready, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/node-taint/rbac.yaml b/experiments/generic/node-taint/rbac.yaml
deleted file mode 100644
index d72ac4b..0000000
--- a/experiments/generic/node-taint/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: node-taint-sa
-  namespace: default
-  labels:
-    name: node-taint-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: node-taint-sa
-  labels:
-    name: node-taint-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","extensions"]
-  resources: ["pods","jobs","events","chaosengines","pods/log","daemonsets","pods/eviction","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["patch","get","list","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: node-taint-sa
-  labels:
-    name: node-taint-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: node-taint-sa
-subjects:
-- kind: ServiceAccount
-  name: node-taint-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/node-taint/test/test.yml b/experiments/generic/node-taint/test/test.yml
deleted file mode 100644
index 968ae4b..0000000
--- a/experiments/generic/node-taint/test/test.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: node-taint-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: APP_NODE
-            value: 'node-01'
-
-          - name: TAINTS
-            value: 'node.kubernetes.io/unreachable:NoExecute'
-
-          - name: AUXILIARY_APPINFO
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-autoscaler/README.md b/experiments/generic/pod-autoscaler/README.md
deleted file mode 100644
index f12de20..0000000
--- a/experiments/generic/pod-autoscaler/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Autoscaler</td>
- <td> Scale the deployment replicas to check the autoscaling capability. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-autoscaler/"> Here </a> </td>
- </tr>
- </table>
\ No newline at end of file
diff --git a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go b/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go
deleted file mode 100644
index 5930959..0000000
--- a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-autoscaler/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodAutoscaler inject the pod-autoscaler chaos
-func PodAutoscaler(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application informations are as follows", logrus.Fields{
-		"Namespace":      experimentsDetails.AppNS,
-		"AppKind":        experimentsDetails.AppKind,
-		"AppLabel":       experimentsDetails.AppLabel,
-		"Replicas":       experimentsDetails.Replicas,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PreparePodAutoscaler(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT")
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-autoscaler/rbac.yaml b/experiments/generic/pod-autoscaler/rbac.yaml
deleted file mode 100644
index db35439..0000000
--- a/experiments/generic/pod-autoscaler/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-autoscaler-sa
-  namespace: default
-  labels:
-    name: pod-autoscaler-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: pod-autoscaler-sa
-  labels:
-    name: pod-autoscaler-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","deployments","jobs","events","chaosengines","pods/log","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: pod-autoscaler-sa
-  labels:
-    name: pod-autoscaler-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: pod-autoscaler-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-autoscaler-sa
-  namespace: default
diff --git a/experiments/generic/pod-autoscaler/test/test.yml b/experiments/generic/pod-autoscaler/test/test.yml
deleted file mode 100644
index f9f2f86..0000000
--- a/experiments/generic/pod-autoscaler/test/test.yml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-autoscaler-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
-
-          # provide application kind
-          - name: APP_KIND
-            value: ''
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '30'
-
-          - name: REPLICA_COUNT
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: ''
-
-            ## Period to wait before/after injection of chaos  
-          - name: RAMP_TIME
-            value: ''
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
diff --git a/experiments/generic/pod-cpu-hog-exec/README.md b/experiments/generic/pod-cpu-hog-exec/README.md
deleted file mode 100644
index 4e7313c..0000000
--- a/experiments/generic/pod-cpu-hog-exec/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod CPU Hog Exec </td>
- <td> This experiment causes CPU resource consumption on specified application containers by starting one or more md5sum calculation process on the special file /dev/zero. It Can test the application's resilience to potential slowness/unavailability of some replicas due to high CPU load.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-cpu-hog-exec/"> Here </a> </td>
- </tr>
-</table>
\ No newline at end of file
diff --git a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go b/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go
deleted file mode 100644
index 49b7c2b..0000000
--- a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-cpu-hog-exec/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-cpu-hog-exec/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-cpu-hog-exec/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodCPUHogExec inject the pod-cpu-hog-exec chaos
-func PodCPUHogExec(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":          common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container": experimentsDetails.TargetContainer,
-		"Chaos Duration":   experimentsDetails.ChaosDuration,
-		"CPU Cores":        experimentsDetails.CPUcores,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareCPUExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: CPU hog failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-cpu-hog-exec/rbac.yaml b/experiments/generic/pod-cpu-hog-exec/rbac.yaml
deleted file mode 100644
index b6dfdf8..0000000
--- a/experiments/generic/pod-cpu-hog-exec/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-cpu-hog-exec-sa
-  namespace: default
-  labels:
-    name: pod-cpu-hog-exec-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-cpu-hog-exec-sa
-  namespace: default
-  labels:
-    name: pod-cpu-hog-exec-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","pods/exec","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-cpu-hog-exec-sa
-  namespace: default
-  labels:
-    name: pod-cpu-hog-exec-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-cpu-hog-exec-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-cpu-hog-exec-sa
-  namespace: default
diff --git a/experiments/generic/pod-cpu-hog-exec/test/test.yml b/experiments/generic/pod-cpu-hog-exec/test/test.yml
deleted file mode 100644
index 4002552..0000000
--- a/experiments/generic/pod-cpu-hog-exec/test/test.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-cpu-hog-exec-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          - name: CPU_CORES
-            value: '1'
-
-          ## Percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: '100' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          - name: SEQUENCE
-            value: 'parallel'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-cpu-hog/README.md b/experiments/generic/pod-cpu-hog/README.md
deleted file mode 100644
index d772251..0000000
--- a/experiments/generic/pod-cpu-hog/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod CPU Hog </td>
- <td> This experiment causes CPU resource consumption on specified application containers using `cgroups` and litmus `nsutil` which consume CPU resources of the given target containers. It Can test the application's resilience to potential slowness/unavailability of some replicas due to high CPU load. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-cpu-hog/"> Here </a> </td>
- </tr>
-</table>
diff --git a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go b/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go
deleted file mode 100644
index ded331c..0000000
--- a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodCPUHog inject the pod-cpu-hog chaos
-func PodCPUHog(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-cpu-hog")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: CPU hog failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-cpu-hog/rbac.yaml b/experiments/generic/pod-cpu-hog/rbac.yaml
deleted file mode 100644
index cdf5fcd..0000000
--- a/experiments/generic/pod-cpu-hog/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-cpu-hog-sa
-  namespace: default
-  labels:
-    name: pod-cpu-hog-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-cpu-hog-sa
-  namespace: default
-  labels:
-    name: pod-cpu-hog-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","pods/exec","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-cpu-hog-sa
-  namespace: default
-  labels:
-    name: pod-cpu-hog-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-cpu-hog-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-cpu-hog-sa
-  namespace: default
diff --git a/experiments/generic/pod-cpu-hog/test/test.yml b/experiments/generic/pod-cpu-hog/test/test.yml
deleted file mode 100644
index 4002552..0000000
--- a/experiments/generic/pod-cpu-hog/test/test.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-cpu-hog-exec-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          - name: CPU_CORES
-            value: '1'
-
-          ## Percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: '100' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          - name: SEQUENCE
-            value: 'parallel'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-delete/README.md b/experiments/generic/pod-delete/README.md
deleted file mode 100644
index 2e9c8cc..0000000
--- a/experiments/generic/pod-delete/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Delete </td>
- <td> This experiment causes (forced/graceful) pod failure of random replicas of an application deployment. It tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the application pod </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-delete/"> Here </a> </td>
- </tr>
- </table>
-
diff --git a/experiments/generic/pod-delete/experiment/pod-delete.go b/experiments/generic/pod-delete/experiment/pod-delete.go
deleted file mode 100644
index a5801ff..0000000
--- a/experiments/generic/pod-delete/experiment/pod-delete.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package experiment
-
-import (
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-delete/lib"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-	"os"
-)
-
-// PodDelete inject the pod-delete chaos
-func PodDelete(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	if err := result.SetResultUID(&resultDetails, clients, &chaosDetails); err != nil {
-		log.Errorf("Unable to set the result uid, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if err := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); err != nil {
-		log.Errorf("failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":        common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-				log.Errorf("failed to create %v event inside chaosengine", types.PreChaosCheck)
-			}
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-delete/rbac.yaml b/experiments/generic/pod-delete/rbac.yaml
deleted file mode 100644
index bedf23f..0000000
--- a/experiments/generic/pod-delete/rbac.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-delete-sa
-  namespace: default
-  labels:
-    name: pod-delete-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-delete-sa
-  namespace: default
-  labels:
-    name: pod-delete-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","deployments","pods/log","events","jobs","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-delete-sa
-  namespace: default
-  labels:
-    name: pod-delete-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-delete-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-delete-sa
-  namespace: default
-
diff --git a/experiments/generic/pod-delete/test/test.yml b/experiments/generic/pod-delete/test/test.yml
deleted file mode 100644
index e77b5fa..0000000
--- a/experiments/generic/pod-delete/test/test.yml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: litmus
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '20'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          - name: FORCE
-            value: '' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-dns-error/README.md b/experiments/generic/pod-dns-error/README.md
deleted file mode 100644
index 48e0755..0000000
--- a/experiments/generic/pod-dns-error/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod DNS Error </td>
- <td> It injects chaos to spoof dns resolution in kubernetes pods. It causes loss of access to services by blocking dns resolution of hostnames/domains </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-dns-error/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go b/experiments/generic/pod-dns-error/experiment/pod-dns-error.go
deleted file mode 100644
index 4442365..0000000
--- a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-dns-chaos/lib"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodDNSError contains steps to inject chaos
-func PodDNSError(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, experimentEnv.Error)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-		"TargetHostNames":   experimentsDetails.TargetHostNames,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-dns-error/rbac.yaml b/experiments/generic/pod-dns-error/rbac.yaml
deleted file mode 100644
index 48bb233..0000000
--- a/experiments/generic/pod-dns-error/rbac.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-dns-error-sa
-  namespace: default
-  labels:
-    name: pod-dns-error-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-dns-error-sa
-  namespace: default
-  labels:
-    name: pod-dns-error-sa
-rules:
-  - apiGroups: [""]
-    resources: ["pods","events"]
-    verbs: ["create","list","get","patch","update","delete","deletecollection"]
-  - apiGroups: [""]
-    resources: ["pods/exec","pods/log","replicationcontrollers"]
-    verbs: ["create","list","get"]
-  - apiGroups: ["batch"]
-    resources: ["jobs"]
-    verbs: ["create","list","get","delete","deletecollection"]
-  - apiGroups: ["apps"]
-    resources: ["deployments","statefulsets","daemonsets","replicasets"]
-    verbs: ["list","get"]
-  - apiGroups: ["apps.openshift.io"]
-    resources: ["deploymentconfigs"]
-    verbs: ["list","get"]
-  - apiGroups: ["argoproj.io"]
-    resources: ["rollouts"]
-    verbs: ["list","get"]
-  - apiGroups: ["litmuschaos.io"]
-    resources: ["chaosengines","chaosexperiments","chaosresults"]
-    verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-dns-error-sa
-  namespace: default
-  labels:
-    name: pod-dns-error-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-dns-error-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-dns-error-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/pod-dns-error/test/test.yml b/experiments/generic/pod-dns-error/test/test.yml
deleted file mode 100644
index 5bdb7fd..0000000
--- a/experiments/generic/pod-dns-error/test/test.yml
+++ /dev/null
@@ -1,86 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-dns-error-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          # provide application kind
-          - name: APP_KIND
-            value: 'deployment'
-
-          # list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets
-          - name: TARGET_HOSTNAMES
-            value: ''
-
-          # can be either exact or substring, determines whether the dns query has to match exactly with one of the targets or can have any of the targets as substring
-          - name: MATCH_SCHEME
-            value: 'exact'
-
-          # in sec
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: TARGET_PODS
-            value: ''
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-            ## Period to wait before/after injection of chaos
-          - name: RAMP_TIME
-            value: ''
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          # provide the name of container runtime
-          # it supports docker, containerd, crio
-          # defaults to containerd
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the container runtime path
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-
diff --git a/experiments/generic/pod-dns-spoof/README.md b/experiments/generic/pod-dns-spoof/README.md
deleted file mode 100644
index b91ebe7..0000000
--- a/experiments/generic/pod-dns-spoof/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod DNS Spoof </td>
- <td> It injects chaos to spoof dns resolution in kubernetes pods. It causes dns resolution of target hostnames/domains to wrong IPs as specified by SPOOF_MAP in the engine config.  </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-dns-spoof/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go b/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go
deleted file mode 100644
index 2776805..0000000
--- a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-dns-chaos/lib"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodDNSSpoof contains steps to inject chaos
-func PodDNSSpoof(clients clients.ClientSets) {
-
-	var err error
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, experimentEnv.Spoof)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-		"Spoof Map":         experimentsDetails.SpoofMap,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails)
-			if err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err = litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Info("[Confirmation]: chaos has been injected successfully")
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err = status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-dns-spoof/rbac.yaml b/experiments/generic/pod-dns-spoof/rbac.yaml
deleted file mode 100644
index a34c946..0000000
--- a/experiments/generic/pod-dns-spoof/rbac.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-dns-spoof-sa
-  namespace: default
-  labels:
-    name: pod-dns-spoof-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-dns-spoof-sa
-  namespace: default
-  labels:
-    name: pod-dns-spoof-sa
-rules:
-  - apiGroups: [""]
-    resources: ["pods","events"]
-    verbs: ["create","list","get","patch","update","delete","deletecollection"]
-  - apiGroups: [""]
-    resources: ["pods/exec","pods/log","replicationcontrollers"]
-    verbs: ["create","list","get"]
-  - apiGroups: ["batch"]
-    resources: ["jobs"]
-    verbs: ["create","list","get","delete","deletecollection"]
-  - apiGroups: ["apps"]
-    resources: ["deployments","statefulsets","daemonsets","replicasets"]
-    verbs: ["list","get"]
-  - apiGroups: ["apps.openshift.io"]
-    resources: ["deploymentconfigs"]
-    verbs: ["list","get"]
-  - apiGroups: ["argoproj.io"]
-    resources: ["rollouts"]
-    verbs: ["list","get"]
-  - apiGroups: ["litmuschaos.io"]
-    resources: ["chaosengines","chaosexperiments","chaosresults"]
-    verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-dns-spoof-sa
-  namespace: default
-  labels:
-    name: pod-dns-spoof-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-dns-spoof-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-dns-spoof-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/pod-dns-spoof/test/test.yml b/experiments/generic/pod-dns-spoof/test/test.yml
deleted file mode 100644
index 1fef36b..0000000
--- a/experiments/generic/pod-dns-spoof/test/test.yml
+++ /dev/null
@@ -1,82 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-dns-spoof-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          # provide application kind
-          - name: APP_KIND
-            value: 'deployment'
-
-          # map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed
-          - name: SPOOF_MAP
-            value: '{"abc.com":"spoofabc.com"}'
-
-          # in sec
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: TARGET_PODS
-            value: ''
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-            ## Period to wait before/after injection of chaos
-          - name: RAMP_TIME
-            value: ''
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          # provide the name of container runtime
-          # it supports docker, containerd, crio
-          # defaults to containerd
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the container runtime path
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-
diff --git a/experiments/generic/pod-fio-stress/README.md b/experiments/generic/pod-fio-stress/README.md
deleted file mode 100644
index b2e9082..0000000
--- a/experiments/generic/pod-fio-stress/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Watch Progress </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod FIO Stress </td>
- <td> This experiment causes storage resource consumption on specified application containers by using fio command which will used to consume storage of the application container for certain duration of time. It can test the application's resilience to potential issues with the application when one of the instance has storage crunch.</td>
- <td>  Monitor the capacity filled up on the host filesystem: <br> watch du -h</td>
- <td>  NA</td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go b/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go
deleted file mode 100644
index 2f7348a..0000000
--- a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-fio-stress/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-fio-stress/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-fio-stress/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// Experiment contains steps to inject chaos
-func PodFioStress(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Targets":          common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container": experimentsDetails.TargetContainer,
-		"Chaos Duration":   experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcherWithoutExit go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("Chaos injection failed, err: %v", err)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-fio-stress/rbac.yaml b/experiments/generic/pod-fio-stress/rbac.yaml
deleted file mode 100644
index a7cebdf..0000000
--- a/experiments/generic/pod-fio-stress/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-fio-stress-sa
-  namespace: default
-  labels:
-    name: pod-fio-stress-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-fio-stress-sa
-  namespace: default
-  labels:
-    name: pod-fio-stress-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","pods/exec","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-fio-stress-sa
-  namespace: default
-  labels:
-    name: pod-fio-stress-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-fio-stress-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-fio-stress-sa
-  namespace: default
diff --git a/experiments/generic/pod-fio-stress/test/test.yml b/experiments/generic/pod-fio-stress/test/test.yml
deleted file mode 100644
index 56cb88b..0000000
--- a/experiments/generic/pod-fio-stress/test/test.yml
+++ /dev/null
@@ -1,77 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-fio-stress-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          # provide application labels
-          - name: APP_LABEL
-            value: 'app=nginx'
- 
-          # provide application kind
-          - name: APP_KIND
-            value: '' 
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '120'
-          
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: '' 
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-          
-          - name: SEQUENCE
-            value: 'serial'
-
-          - name: IO_ENGINE
-            value: 'libaio'
-        
-          - name: IO_DEPTH
-            value: '1'
-          
-          - name: READ_WRITE_MODE
-            value: 'randwrite'
-
-          - name: BLOCK_SIZE
-            value: '4k'
-
-          # Size in MBs
-          - name: SIZE
-            value: '5120'
-
-          - name: NUMBER_OF_JOBS
-            value: '2'
-          
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
\ No newline at end of file
diff --git a/experiments/generic/pod-http-latency/README.md b/experiments/generic/pod-http-latency/README.md
deleted file mode 100644
index e913e00..0000000
--- a/experiments/generic/pod-http-latency/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod HTTP Latency </td>
- <td>This experiment causes latency in http request on the specified container by starting proxy server and then redirecting the traffic to the proxy server. It Can test the application's resilience to lossy/flaky requests to dependant services.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-http-latency/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go b/experiments/generic/pod-http-latency/experiment/pod-http-latency.go
deleted file mode 100644
index 03bee50..0000000
--- a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/latency"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodHttpLatency inject the pod-http-latency chaos
-func PodHttpLatency(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	chaosDetails := types.ChaosDetails{}
-	eventsDetails := types.EventDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-http-latency")
-
-	// Initialize events Parameters
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows\n", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodHttpLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-http-latency/rbac.yaml b/experiments/generic/pod-http-latency/rbac.yaml
deleted file mode 100644
index 9ab60e6..0000000
--- a/experiments/generic/pod-http-latency/rbac.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-http-latency-sa
-  namespace: default
-  labels:
-    name: pod-http-latency-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-http-latency-sa
-  namespace: default
-  labels:
-    name: pod-http-latency-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-  # Create and monitor the experiment & helper pods
-  - apiGroups: [""]
-    resources: ["pods"]
-    verbs: ["create","delete","get","list","patch","update", "deletecollection"]
-  # Performs CRUD operations on the events inside chaosengine and chaosresult
-  - apiGroups: [""]
-    resources: ["events"]
-    verbs: ["create","get","list","patch","update"]
-  # Fetch configmaps details and mount it to the experiment pod (if specified)
-  - apiGroups: [""]
-    resources: ["configmaps"]
-    verbs: ["get","list",]
-  # Track and get the runner, experiment, and helper pods log 
-  - apiGroups: [""]
-    resources: ["pods/log"]
-    verbs: ["get","list","watch"]  
-  # for creating and managing to execute comands inside target container
-  - apiGroups: [""]
-    resources: ["pods/exec"]
-    verbs: ["get","list","create"]
-  # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-  - apiGroups: ["apps"]
-    resources: ["deployments","statefulsets","replicasets", "daemonsets"]
-    verbs: ["list","get"]
-  # deriving the parent/owner details of the pod(if parent is deploymentConfig)  
-  - apiGroups: ["apps.openshift.io"]
-    resources: ["deploymentconfigs"]
-    verbs: ["list","get"]
-  # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-  - apiGroups: [""]
-    resources: ["replicationcontrollers"]
-    verbs: ["get","list"]
-  # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-  - apiGroups: ["argoproj.io"]
-    resources: ["rollouts"]
-    verbs: ["list","get"]
-  # for configuring and monitor the experiment job by the chaos-runner pod
-  - apiGroups: ["batch"]
-    resources: ["jobs"]
-    verbs: ["create","list","get","delete","deletecollection"]
-  # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
-  - apiGroups: ["litmuschaos.io"]
-    resources: ["chaosengines","chaosexperiments","chaosresults"]
-    verbs: ["create","list","get","patch","update","delete"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-http-latency-sa
-  namespace: default
-  labels:
-    name: pod-http-latency-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-http-latency-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-http-latency-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/pod-http-latency/test/test.yaml b/experiments/generic/pod-http-latency/test/test.yaml
deleted file mode 100644
index 44ad925..0000000
--- a/experiments/generic/pod-http-latency/test/test.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-http-latency-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
- 
-          # provide application kind
-          - name: APP_KIND
-            value: ''
-
-          # provide auxiliary application details - namespace and labels of the applications
-          # sample input is - "ns1:app=percona,ns2:name=nginx"
-          - name: AUXILIARY_APPINFO
-            value: ''
-          
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          # provide lib image
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:latest' 
-
-          - name: LATENCY
-            value: '2000' #in ms
-
-          # port of the target service
-          - name: TARGET_SERVICE_PORT
-            value: "80"
-
-          # port on which the proxy will listen
-          - name: PROXY_PORT
-            value: "2002"
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' # in seconds
-
-          # Time period to wait before and after injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          - name: TARGET_PODS
-            value: ''
-
-          # provide the name of container runtime
-           # it supports docker, containerd, and crio
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the socket file path
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          # To select pods on specific node(s)
-          - name: NODE_LABEL
-            value: ''
-
-          ## it defines the sequence of chaos execution for multiple target pods
-          ## supported values: serial, parallel
-          - name: SEQUENCE
-            value: 'parallel'
-          
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
\ No newline at end of file
diff --git a/experiments/generic/pod-http-modify-body/README.md b/experiments/generic/pod-http-modify-body/README.md
deleted file mode 100644
index b6a8c17..0000000
--- a/experiments/generic/pod-http-modify-body/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod HTTP Modify Body </td>
- <td>This experiment modifies the response body on the specified container by starting proxy server and then redirecting the traffic to the proxy server. It Can test the application's resilience to incorrect or incomplete body of response to dependant services.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-http-modify-body/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go b/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go
deleted file mode 100644
index c68b405..0000000
--- a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/modify-body"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodHttpModifyBody contains steps to inject chaos
-func PodHttpModifyBody(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-http-modify-body")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodHttpModifyBodyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-http-modify-body/rbac.yaml b/experiments/generic/pod-http-modify-body/rbac.yaml
deleted file mode 100644
index 2e15380..0000000
--- a/experiments/generic/pod-http-modify-body/rbac.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-http-modify-body-sa
-  namespace: default
-  labels:
-    name: pod-http-modify-body-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-http-modify-body-sa
-  namespace: default
-  labels:
-    name: pod-http-modify-body-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-  - apiGroups: 
-      - "" 
-      - "batch" 
-      - "apps" 
-      - "litmuschaos.io"
-    resources: 
-      - "jobs" 
-      - "pods" 
-      - "pods/log" 
-      - "events" 
-      - "deployments" 
-      - "replicasets" 
-      - "pods/exec" 
-      - "chaosengines" 
-      - "chaosexperiments" 
-      - "chaosresults"
-    verbs: 
-      - "create" 
-      - "list" 
-      - "get" 
-      - "patch" 
-      - "update" 
-      - "delete" 
-      - "deletecollection"
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-http-modify-body-sa
-  namespace: default
-  labels:
-    name: pod-http-modify-body-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-http-modify-body-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-http-modify-body-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/pod-http-modify-body/test/test.yml b/experiments/generic/pod-http-modify-body/test/test.yml
deleted file mode 100644
index 858d3f9..0000000
--- a/experiments/generic/pod-http-modify-body/test/test.yml
+++ /dev/null
@@ -1,91 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-http-modify-body-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
- 
-          # provide application kind
-          - name: APP_KIND
-            value: '' 
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          # provide lib image
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:latest' 
-
-          - name: RESPONSE_BODY
-            value: '' #in ms
-
-          # port of the target service
-          - name: TARGET_SERVICE_PORT
-            value: "80"
-
-          # port on which the proxy will listen
-          - name: PROXY_PORT
-            value: "2002"
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' # in seconds
-
-          # Time period to wait before and after injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          - name: TARGET_PODS
-            value: ''
-
-          # provide the name of container runtime
-           # it supports docker, containerd, and crio
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the socket file path
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          # To select pods on specific node(s)
-          - name: NODE_LABEL
-            value: ''
-
-          ## it defines the sequence of chaos execution for multiple target pods
-          ## supported values: serial, parallel
-          - name: SEQUENCE
-            value: 'parallel'
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
diff --git a/experiments/generic/pod-http-modify-header/README.md b/experiments/generic/pod-http-modify-header/README.md
deleted file mode 100644
index d67aea0..0000000
--- a/experiments/generic/pod-http-modify-header/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod HTTP Modify Header </td>
- <td>This experiment modifies the response or request headers on the specified container by starting proxy server and then redirecting the traffic to the proxy server. It Can test the application's resilience to incorrect or incomplete headers of request/response to dependant services.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-http-modify-header/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go b/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go
deleted file mode 100644
index cfb6344..0000000
--- a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/header"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodHttpModifyHeader inject the pod-http-modify-header chaos
-func PodHttpModifyHeader(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	chaosDetails := types.ChaosDetails{}
-	eventsDetails := types.EventDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-http-modify-header")
-
-	// Initialize events Parameters
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows\n", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodHttpModifyHeaderChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-http-modify-header/rbac.yaml b/experiments/generic/pod-http-modify-header/rbac.yaml
deleted file mode 100644
index 4505835..0000000
--- a/experiments/generic/pod-http-modify-header/rbac.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-http-modify-header-sa
-  namespace: default
-  labels:
-    name: pod-http-modify-header-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-http-modify-header-sa
-  namespace: default
-  labels:
-    name: pod-http-modify-header-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-  # Create and monitor the experiment & helper pods
-  - apiGroups: [""]
-    resources: ["pods"]
-    verbs: ["create","delete","get","list","patch","update", "deletecollection"]
-  # Performs CRUD operations on the events inside chaosengine and chaosresult
-  - apiGroups: [""]
-    resources: ["events"]
-    verbs: ["create","get","list","patch","update"]
-  # Fetch configmaps details and mount it to the experiment pod (if specified)
-  - apiGroups: [""]
-    resources: ["configmaps"]
-    verbs: ["get","list",]
-  # Track and get the runner, experiment, and helper pods log 
-  - apiGroups: [""]
-    resources: ["pods/log"]
-    verbs: ["get","list","watch"]  
-  # for creating and managing to execute comands inside target container
-  - apiGroups: [""]
-    resources: ["pods/exec"]
-    verbs: ["get","list","create"]
-  # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-  - apiGroups: ["apps"]
-    resources: ["deployments","statefulsets","replicasets", "daemonsets"]
-    verbs: ["list","get"]
-  # deriving the parent/owner details of the pod(if parent is deploymentConfig)  
-  - apiGroups: ["apps.openshift.io"]
-    resources: ["deploymentconfigs"]
-    verbs: ["list","get"]
-  # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-  - apiGroups: [""]
-    resources: ["replicationcontrollers"]
-    verbs: ["get","list"]
-  # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-  - apiGroups: ["argoproj.io"]
-    resources: ["rollouts"]
-    verbs: ["list","get"]
-  # for configuring and monitor the experiment job by the chaos-runner pod
-  - apiGroups: ["batch"]
-    resources: ["jobs"]
-    verbs: ["create","list","get","delete","deletecollection"]
-  # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
-  - apiGroups: ["litmuschaos.io"]
-    resources: ["chaosengines","chaosexperiments","chaosresults"]
-    verbs: ["create","list","get","patch","update","delete"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-http-modify-header-sa
-  namespace: default
-  labels:
-    name: pod-http-modify-header-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-http-modify-header-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-http-modify-header-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/pod-http-modify-header/test/test.yaml b/experiments/generic/pod-http-modify-header/test/test.yaml
deleted file mode 100644
index 03b3851..0000000
--- a/experiments/generic/pod-http-modify-header/test/test.yaml
+++ /dev/null
@@ -1,100 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-http-modify-header-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
- 
-          # provide application kind
-          - name: APP_KIND
-            value: '' 
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          # provide lib image
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:latest' 
-
-          # map of headers to modify/add; Eg: {"X-Litmus-Test-Header": "X-Litmus-Test-Value"}
-          # to remove a header, just set the value to ""; Eg: {"X-Litmus-Test-Header": ""}
-          - name: HEADERS_MAP
-            value: '{}'
-
-          # whether to modify response headers or request headers. Accepted values: request, response
-          - name: HEADER_MODE
-            value: 'response'
-
-          # port of the target service
-          - name: TARGET_SERVICE_PORT
-            value: "80"
-
-          # port on which the proxy will listen
-          - name: PROXY_PORT
-            value: "2002"
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' # in seconds
-
-          # Time period to wait before and after injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          - name: TARGET_PODS
-            value: ''
-
-          # provide the name of container runtime
-           # it supports docker, containerd, and crio
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the socket file path
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          # To select pods on specific node(s)
-          - name: NODE_LABEL
-            value: ''
-
-          ## it defines the sequence of chaos execution for multiple target pods
-          ## supported values: serial, parallel
-          - name: SEQUENCE
-            value: 'parallel'
-          
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
\ No newline at end of file
diff --git a/experiments/generic/pod-http-reset-peer/README.md b/experiments/generic/pod-http-reset-peer/README.md
deleted file mode 100644
index eb8824f..0000000
--- a/experiments/generic/pod-http-reset-peer/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod HTTP Reset Peer </td>
- <td>This experiment causes tcp reset in http request on the specified container by starting a proxy server and then redirecting the traffic to the proxy server. It Can test the application's resilience to lossy/flaky requests to dependant services.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-http-reset-peer/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go b/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go
deleted file mode 100644
index 76188c3..0000000
--- a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/reset"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodHttpResetPeer contains steps to inject chaos
-func PodHttpResetPeer(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-http-reset-peer")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodHttpResetPeerChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-http-reset-peer/rbac.yaml b/experiments/generic/pod-http-reset-peer/rbac.yaml
deleted file mode 100644
index 3fd5db4..0000000
--- a/experiments/generic/pod-http-reset-peer/rbac.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-http-reset-peer-sa
-  namespace: default
-  labels:
-    name: pod-http-reset-peer-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-http-reset-peer-sa
-  namespace: default
-  labels:
-    name: pod-http-reset-peer-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-  # Create and monitor the experiment & helper pods
-  - apiGroups: [""]
-    resources: ["pods"]
-    verbs: ["create","delete","get","list","patch","update", "deletecollection"]
-  # Performs CRUD operations on the events inside chaosengine and chaosresult
-  - apiGroups: [""]
-    resources: ["events"]
-    verbs: ["create","get","list","patch","update"]
-  # Fetch configmaps details and mount it to the experiment pod (if specified)
-  - apiGroups: [""]
-    resources: ["configmaps"]
-    verbs: ["get","list",]
-  # Track and get the runner, experiment, and helper pods log 
-  - apiGroups: [""]
-    resources: ["pods/log"]
-    verbs: ["get","list","watch"]  
-  # for creating and managing to execute comands inside target container
-  - apiGroups: [""]
-    resources: ["pods/exec"]
-    verbs: ["get","list","create"]
-  # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-  - apiGroups: ["apps"]
-    resources: ["deployments","statefulsets","replicasets", "daemonsets"]
-    verbs: ["list","get"]
-  # deriving the parent/owner details of the pod(if parent is deploymentConfig)  
-  - apiGroups: ["apps.openshift.io"]
-    resources: ["deploymentconfigs"]
-    verbs: ["list","get"]
-  # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-  - apiGroups: [""]
-    resources: ["replicationcontrollers"]
-    verbs: ["get","list"]
-  # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-  - apiGroups: ["argoproj.io"]
-    resources: ["rollouts"]
-    verbs: ["list","get"]
-  # for configuring and monitor the experiment job by the chaos-runner pod
-  - apiGroups: ["batch"]
-    resources: ["jobs"]
-    verbs: ["create","list","get","delete","deletecollection"]
-  # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
-  - apiGroups: ["litmuschaos.io"]
-    resources: ["chaosengines","chaosexperiments","chaosresults"]
-    verbs: ["create","list","get","patch","update","delete"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-http-reset-peer-sa
-  namespace: default
-  labels:
-    name: pod-http-reset-peer-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-http-reset-peer-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-http-reset-peer-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/pod-http-reset-peer/test/test.yml b/experiments/generic/pod-http-reset-peer/test/test.yml
deleted file mode 100644
index 99bfd08..0000000
--- a/experiments/generic/pod-http-reset-peer/test/test.yml
+++ /dev/null
@@ -1,99 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-http-reset-peer-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
- 
-          # provide application kind
-          - name: APP_KIND
-            value: '' 
-
-          - name: TOTAL_CHAOS_DURATION
-            value: ''
-
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-          
-          - name: TARGET_CONTAINER
-            value: ''
-
-          # provide lib image
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:latest' 
-
-          - name: TIMEOUT
-            value: '10' #in ms
-
-          # port of the target service
-          - name: TARGET_SERVICE_PORT
-            value: "80"
-
-          # port on which the proxy will listen
-          - name: PROXY_PORT
-            value: "2002"
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          - name: TARGET_PODS
-            value: ''
-
-          # provide the name of container runtime
-           # it supports docker, containerd, and crio
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the socket file path
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          # To select pods on specific node(s)
-          - name: NODE_LABEL
-            value: ''
-
-          ## it defines the sequence of chaos execution for multiple target pods
-          ## supported values: serial, parallel
-          - name: SEQUENCE
-            value: 'parallel'
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
diff --git a/experiments/generic/pod-http-status-code/README.md b/experiments/generic/pod-http-status-code/README.md
deleted file mode 100644
index 5b8b979..0000000
--- a/experiments/generic/pod-http-status-code/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod HTTP Status Code </td>
- <td>This experiment causes modification of http response status code to a provided status code and also optionally changes the body of the response to a pre-defined template for the provided response code on the specified container by starting proxy server and then redirecting the traffic to the proxy server. It Can test the application's resilience to issues in http response from dependant services.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-http-status-code/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go b/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go
deleted file mode 100644
index b37de87..0000000
--- a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/statuscode"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodHttpStatusCode contains steps to inject chaos
-func PodHttpStatusCode(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-	var err error
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-http-status-code")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	// PRE-CHAOS check to verify support for provided status code value
-	if experimentsDetails.StatusCode, err = litmusLIB.GetStatusCode(experimentsDetails.StatusCode); err != nil {
-		log.Errorf("[Pre-Chaos]: Failed to verify status code support, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodHttpStatusCodeChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-http-status-code/rbac.yaml b/experiments/generic/pod-http-status-code/rbac.yaml
deleted file mode 100644
index a3b70a1..0000000
--- a/experiments/generic/pod-http-status-code/rbac.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-http-status-code-sa
-  namespace: default
-  labels:
-    name: pod-http-status-code-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-http-status-code-sa
-  namespace: default
-  labels:
-    name: pod-http-status-code-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-  - apiGroups: 
-      - "" 
-      - "batch" 
-      - "apps" 
-      - "litmuschaos.io"
-    resources: 
-      - "jobs" 
-      - "pods" 
-      - "pods/log" 
-      - "events" 
-      - "deployments" 
-      - "replicasets" 
-      - "pods/exec" 
-      - "chaosengines" 
-      - "chaosexperiments" 
-      - "chaosresults"
-    verbs: 
-      - "create" 
-      - "list" 
-      - "get" 
-      - "patch" 
-      - "update" 
-      - "delete" 
-      - "deletecollection"
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-http-status-code-sa
-  namespace: default
-  labels:
-    name: pod-http-status-code-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-http-status-code-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-http-status-code-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/generic/pod-http-status-code/test/test.yml b/experiments/generic/pod-http-status-code/test/test.yml
deleted file mode 100644
index 37175b4..0000000
--- a/experiments/generic/pod-http-status-code/test/test.yml
+++ /dev/null
@@ -1,100 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: http-status-code-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-        
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
- 
-          # provide application kind
-          - name: APP_KIND
-            value: '' 
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          # provide lib image
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:latest' 
-
-          # modified status code for the http response
-          - name: STATUS_CODE
-            value: '500'
-          #  whether to modify the body as per the status code provided
-          - name: "MODIFY_RESPONSE_BODY"
-            value: "true"
-
-          # port of the target service
-          - name: TARGET_SERVICE_PORT
-            value: "80"
-
-          # port on which the proxy will listen
-          - name: PROXY_PORT
-            value: "2002"
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' # in seconds
-
-          # Time period to wait before and after injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          - name: TARGET_PODS
-            value: ''
-
-          # provide the name of container runtime
-           # it supports docker, containerd, and crio
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the socket file path
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          # To select pods on specific node(s)
-          - name: NODE_LABEL
-            value: ''
-
-          ## it defines the sequence of chaos execution for multiple target pods
-          ## supported values: serial, parallel
-          - name: SEQUENCE
-            value: 'parallel'
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
diff --git a/experiments/generic/pod-io-stress/README.md b/experiments/generic/pod-io-stress/README.md
deleted file mode 100644
index d06b90c..0000000
--- a/experiments/generic/pod-io-stress/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod IO Stress </td>
- <td>This experiment causes disk stress on the application pod. The experiment aims to verify the resiliency of applications that share this disk resource for ephemeral or persistent storage purposes.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-io-stress/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go b/experiments/generic/pod-io-stress/experiment/pod-io-stress.go
deleted file mode 100644
index eb6dcdb..0000000
--- a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodIOStress inject the pod-io-stress chaos
-func PodIOStress(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-io-stress")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: Pod IO Stress failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-io-stress/rbac.yaml b/experiments/generic/pod-io-stress/rbac.yaml
deleted file mode 100644
index 3c8a213..0000000
--- a/experiments/generic/pod-io-stress/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-io-stress-sa
-  namespace: default
-  labels:
-    name: pod-io-stress-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-io-stress-sa
-  namespace: default
-  labels:
-    name: pod-io-stress-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","pods/exec","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-io-stress-sa
-  namespace: default
-  labels:
-    name: pod-io-stress-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-io-stress-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-io-stress-sa
-  namespace: default
diff --git a/experiments/generic/pod-io-stress/test/test.yml b/experiments/generic/pod-io-stress/test/test.yml
deleted file mode 100644
index 51d6427..0000000
--- a/experiments/generic/pod-io-stress/test/test.yml
+++ /dev/null
@@ -1,61 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-io-stress-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: FILESYSTEM_UTILIZATION_PERCENTAGE
-            value: ''
-
-          - name: FILESYSTEM_UTILIZATION_BYTES
-            value: ''            
-
-          - name: NUMBER_OF_WORKERS
-            value: '4'
-
-          ## Percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: '100' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-memory-hog-exec/README.md b/experiments/generic/pod-memory-hog-exec/README.md
deleted file mode 100644
index 4bbd9e1..0000000
--- a/experiments/generic/pod-memory-hog-exec/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Memory Hog Exec </td>
- <td> This experiment causes Memory resource consumption on specified application containers by using dd command which will used to consume memory of the application container for certain duration of time. It can test the application's resilience to potential slowness/unavailability of some replicas due to high Memory load.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-memory-hog-exec/"> Here </a> </td>
- </tr>
-</table>
\ No newline at end of file
diff --git a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go b/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go
deleted file mode 100644
index f3a1d4a..0000000
--- a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-memory-hog-exec/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-memory-hog-exec/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-memory-hog-exec/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodMemoryHogExec inject the pod-memory-hog-exec chaos
-func PodMemoryHogExec(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":            common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":   experimentsDetails.TargetContainer,
-		"Chaos Duration":     experimentsDetails.ChaosDuration,
-		"Memory Consumption": experimentsDetails.MemoryConsumption,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareMemoryExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: pod memory hog failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-memory-hog-exec/rbac.yaml b/experiments/generic/pod-memory-hog-exec/rbac.yaml
deleted file mode 100644
index f69cd2c..0000000
--- a/experiments/generic/pod-memory-hog-exec/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-memory-hog-exec-sa
-  namespace: default
-  labels:
-    name: pod-memory-hog-exec-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-memory-hog-exec-sa
-  namespace: default
-  labels:
-    name: pod-memory-hog-exec-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","pods/exec","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-memory-hog-exec-sa
-  namespace: default
-  labels:
-    name: pod-memory-hog-exec-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-memory-hog-exec-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-memory-hog-exec-sa
-  namespace: default
diff --git a/experiments/generic/pod-memory-hog-exec/test/test.yml b/experiments/generic/pod-memory-hog-exec/test/test.yml
deleted file mode 100644
index 06f8647..0000000
--- a/experiments/generic/pod-memory-hog-exec/test/test.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-memory-hog-exec-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          - name: MEMORY_CONSUMPTION
-            value: '500'
-
-          ## Percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: '100' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          - name: SEQUENCE
-            value: 'parallel'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-memory-hog/README.md b/experiments/generic/pod-memory-hog/README.md
deleted file mode 100644
index 37ac8d3..0000000
--- a/experiments/generic/pod-memory-hog/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Memory Hog </td>
-<td> This experiment causes Memory resource consumption on specified application containers using `cgroups` and litmus `nsutil` which consume memory resources of the given target containers. It can test the application's resilience to potential slowness/unavailability of some replicas due to high CPU load. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-memory-hog/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go b/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go
deleted file mode 100644
index a4bf3cc..0000000
--- a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodMemoryHog inject the pod-memory-hog chaos
-func PodMemoryHog(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-memory-hog")
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed,, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("[Error]: pod memory hog failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-memory-hog/rbac.yaml b/experiments/generic/pod-memory-hog/rbac.yaml
deleted file mode 100644
index ba8d2f3..0000000
--- a/experiments/generic/pod-memory-hog/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-memory-hog-sa
-  namespace: default
-  labels:
-    name: pod-memory-hog-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-memory-hog-sa
-  namespace: default
-  labels:
-    name: pod-memory-hog-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","pods/exec","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-memory-hog-sa
-  namespace: default
-  labels:
-    name: pod-memory-hog-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-memory-hog-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-memory-hog-sa
-  namespace: default
diff --git a/experiments/generic/pod-memory-hog/test/test.yml b/experiments/generic/pod-memory-hog/test/test.yml
deleted file mode 100644
index 06f8647..0000000
--- a/experiments/generic/pod-memory-hog/test/test.yml
+++ /dev/null
@@ -1,64 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-memory-hog-exec-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60'
-
-          - name: CHAOS_INTERVAL
-            value: '10'
-
-          - name: MEMORY_CONSUMPTION
-            value: '500'
-
-          ## Percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: '100' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: TARGET_CONTAINER
-            value: ''
-
-          - name: SEQUENCE
-            value: 'parallel'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-network-corruption/README.md b/experiments/generic/pod-network-corruption/README.md
deleted file mode 100644
index 96b48b3..0000000
--- a/experiments/generic/pod-network-corruption/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Network Corruption </td>
- <td> This chaos action Injects packet corruption on the specified container by starting a traffic control (tc) process with netem rules to add egress packet corruption. Corruption is injected via pumba library with command Pumba netem corruption by passing the relevant network interface, packet-corruption-percentage, chaos duration, and regex filter for the container name. </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-network-corruption/"> Here </a> </td>
- </tr>
- </table>
-
diff --git a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go b/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go
deleted file mode 100644
index d3276db..0000000
--- a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/corruption"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodNetworkCorruption inject the pod-network-corruption chaos
-func PodNetworkCorruption(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	chaosDetails := types.ChaosDetails{}
-	eventsDetails := types.EventDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-network-corruption")
-
-	// Initialize events Parameters
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows\n", logrus.Fields{
-		"Targets":               common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":      experimentsDetails.TargetContainer,
-		"Chaos Duration":        experimentsDetails.ChaosDuration,
-		"Container Runtime":     experimentsDetails.ContainerRuntime,
-		"Curruption Percentage": experimentsDetails.NetworkPacketCorruptionPercentage,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodNetworkCorruptionChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-network-corruption/rbac.yaml b/experiments/generic/pod-network-corruption/rbac.yaml
deleted file mode 100644
index a4d2873..0000000
--- a/experiments/generic/pod-network-corruption/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-network-corruption-sa
-  namespace: default
-  labels:
-    name: pod-network-corruption-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-network-corruption-sa
-  namespace: default
-  labels:
-    name: pod-network-corruption-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-network-corruption-sa
-  namespace: default
-  labels:
-    name: pod-network-corruption-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-network-corruption-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-network-corruption-sa
-  namespace: default
diff --git a/experiments/generic/pod-network-corruption/test/test.yml b/experiments/generic/pod-network-corruption/test/test.yml
deleted file mode 100644
index 1fc6b6b..0000000
--- a/experiments/generic/pod-network-corruption/test/test.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-network-corruption-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: NETWORK_INTERFACE
-            value: 'eth0'
-
-          - name: TC_IMAGE
-            value: 'gaiadocker/iproute2'
-
-          - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE
-            value: '100'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-          ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          # provide the name of container runtime
-          # it supports docker, containerd, crio
-          # defaults to containerd
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the container runtime path
-          # applicable only for containerd and crio runtime
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-network-duplication/README.md b/experiments/generic/pod-network-duplication/README.md
deleted file mode 100644
index 5cd6d36..0000000
--- a/experiments/generic/pod-network-duplication/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Network Duplication </td>
- <td> This experiment causes network duplication using pumba. It injects network duplication on the specified container by starting a traffic control (tc) process with netem rules. It Can test the application's resilience to duplicate network </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-network-duplication/"> Here </a> </td>
- </tr>
- </table>
-
diff --git a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go b/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go
deleted file mode 100644
index f37c463..0000000
--- a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/duplication"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodNetworkDuplication inject the pod-network-duplication chaos
-func PodNetworkDuplication(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	chaosDetails := types.ChaosDetails{}
-	eventsDetails := types.EventDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-network-duplication")
-
-	// Initialize events Parameters
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows\n", logrus.Fields{
-		"Targets":                common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":       experimentsDetails.TargetContainer,
-		"Chaos Duration":         experimentsDetails.ChaosDuration,
-		"Container Runtime":      experimentsDetails.ContainerRuntime,
-		"Duplication Percentage": experimentsDetails.NetworkPacketDuplicationPercentage,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodNetworkDuplicationChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-network-duplication/rbac.yaml b/experiments/generic/pod-network-duplication/rbac.yaml
deleted file mode 100644
index 6e8bffb..0000000
--- a/experiments/generic/pod-network-duplication/rbac.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-network-duplication-sa
-  namespace: default
-  labels:
-    name: pod-network-duplication-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-network-duplication-sa
-  namespace: default
-  labels:
-    name: pod-network-duplication-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-network-duplication-sa
-  namespace: default
-  labels:
-    name: pod-network-duplication-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-network-duplication-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-network-duplication-sa
-  namespace: default
diff --git a/experiments/generic/pod-network-duplication/test/test.yml b/experiments/generic/pod-network-duplication/test/test.yml
deleted file mode 100644
index faadb5d..0000000
--- a/experiments/generic/pod-network-duplication/test/test.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-network-duplication-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: NETWORK_INTERFACE
-            value: 'eth0'
-
-          - name: TC_IMAGE
-            value: 'gaiadocker/iproute2'
-
-          - name: NETWORK_PACKET_DUPLICATION_PERCENTAGE
-            value: '100'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-           ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          # provide the name of container runtime
-          # it supports docker, containerd, crio
-          # defaults to containerd
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the container runtime path
-          # applicable only for containerd and crio runtime
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-network-latency/README.md b/experiments/generic/pod-network-latency/README.md
deleted file mode 100644
index e9db45f..0000000
--- a/experiments/generic/pod-network-latency/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Network Latency </td>
- <td> This experiment causes flaky access to application replica by injecting network delay using pumba. It injects latency on the specified container by starting a traffic control (tc) process with netem rules to add egress delays. It Can test the application's resilience to lossy/flaky network </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-network-latency/"> Here </a> </td>
- </tr>
- </table>
-
diff --git a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go b/experiments/generic/pod-network-latency/experiment/pod-network-latency.go
deleted file mode 100644
index a4ca956..0000000
--- a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/latency"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodNetworkLatency inject the pod-network-latency chaos
-func PodNetworkLatency(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	chaosDetails := types.ChaosDetails{}
-	eventsDetails := types.EventDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-network-latency")
-
-	// Initialize events Parameters
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows\n", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-		"Latency":           experimentsDetails.NetworkLatency,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodNetworkLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-network-latency/rbac.yaml b/experiments/generic/pod-network-latency/rbac.yaml
deleted file mode 100644
index c7c301e..0000000
--- a/experiments/generic/pod-network-latency/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-network-latency-sa
-  namespace: default
-  labels:
-    name: pod-network-latency-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-network-latency-sa
-  namespace: default
-  labels:
-    name: pod-network-latency-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","pods/log","events","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-network-latency-sa
-  namespace: default
-  labels:
-    name: pod-network-latency-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-network-latency-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-network-latency-sa
-  namespace: default
diff --git a/experiments/generic/pod-network-latency/test/test.yml b/experiments/generic/pod-network-latency/test/test.yml
deleted file mode 100644
index 08b7b96..0000000
--- a/experiments/generic/pod-network-latency/test/test.yml
+++ /dev/null
@@ -1,87 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-network-latency-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          # provide application kind
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: NETWORK_INTERFACE
-            value: 'eth0'
-
-          - name: TC_IMAGE
-            value: 'gaiadocker/iproute2'
-
-          # in ms
-          - name: NETWORK_LATENCY
-            value: '60000'
-
-          # in sec
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-            ## Period to wait before/after injection of chaos  
-          - name: RAMP_TIME
-            value: ''
-
-           ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          # provide the name of container runtime
-          # it supports docker, containerd, crio
-          # defaults to containerd
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the container runtime path
-          # applicable only for containerd and crio runtime
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-network-loss/README.md b/experiments/generic/pod-network-loss/README.md
deleted file mode 100644
index b148519..0000000
--- a/experiments/generic/pod-network-loss/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Network Loss </td>
- <td> This experiment injects chaos to disrupt network connectivity to kubernetes pods.The application pod should be healthy once chaos is stopped. It causes loss of access to application replica by injecting packet loss using pumba </td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-network-loss/"> Here </a> </td>
- </tr>
- </table>
-
diff --git a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go b/experiments/generic/pod-network-loss/experiment/pod-network-loss.go
deleted file mode 100644
index 9264593..0000000
--- a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/loss"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodNetworkLoss inject the pod-network-loss chaos
-func PodNetworkLoss(clients clients.ClientSets) {
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	chaosDetails := types.ChaosDetails{}
-	eventsDetails := types.EventDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, "pod-network-loss")
-
-	// Initialize events Parameters
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application information is as follows\n", logrus.Fields{
-		"Targets":           common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container":  experimentsDetails.TargetContainer,
-		"Chaos Duration":    experimentsDetails.ChaosDuration,
-		"Container Runtime": experimentsDetails.ContainerRuntime,
-		"Loss Percentage":   experimentsDetails.NetworkPacketLossPercentage,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PodNetworkLossChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Infof("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-}
diff --git a/experiments/generic/pod-network-loss/rbac.yaml b/experiments/generic/pod-network-loss/rbac.yaml
deleted file mode 100644
index 8427df6..0000000
--- a/experiments/generic/pod-network-loss/rbac.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-network-loss-sa
-  namespace: default
-  labels:
-    name: pod-network-loss-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-network-loss-sa
-  namespace: default
-  labels:
-    name: pod-network-loss-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch"]
-  resources: ["pods","jobs","events","pods/log","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-network-loss-sa
-  namespace: default
-  labels:
-    name: pod-network-loss-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-network-loss-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-network-loss-sa
-  namespace: default
diff --git a/experiments/generic/pod-network-loss/test/test.yml b/experiments/generic/pod-network-loss/test/test.yml
deleted file mode 100644
index 28f88ff..0000000
--- a/experiments/generic/pod-network-loss/test/test.yml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-network-loss-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: TARGET_CONTAINER
-            value: 'nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: NETWORK_INTERFACE
-            value: 'eth0'
-
-          - name: TC_IMAGE
-            value: 'gaiadocker/iproute2'
-
-          - name: NETWORK_PACKET_LOSS_PERCENTAGE
-            value: '100'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: TARGET_POD
-            value: ''
-
-          - name: LIB_IMAGE
-            value: 'litmuschaos/go-runner:ci'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: RAMP_TIME
-            value: ''
-
-           ## percentage of total pods to target
-          - name: PODS_AFFECTED_PERC
-            value: ''
-
-          # provide the name of container runtime
-          # it supports docker, containerd, crio
-          # defaults to containerd
-          - name: CONTAINER_RUNTIME
-            value: 'containerd'
-
-          # provide the container runtime path
-          # applicable only for containerd and crio runtime
-          - name: SOCKET_PATH
-            value: '/run/containerd/containerd.sock'
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
diff --git a/experiments/generic/pod-network-partition/README.md b/experiments/generic/pod-network-partition/README.md
deleted file mode 100644
index c29f6bd..0000000
--- a/experiments/generic/pod-network-partition/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Pod Network Partition </td>
- <td> This experiment blocks the 100% Ingress and Egress traffic of the target application. It can block the traffic for some specific IPs/Hosts or all the IPs</td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/pods/pod-network-partition/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go b/experiments/generic/pod-network-partition/experiment/pod-network-partition.go
deleted file mode 100644
index c0c300b..0000000
--- a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-network-partition/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// PodNetworkPartition inject the pod-network-partition chaos
-func PodNetworkPartition(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Targets":          common.GetAppDetailsForLogging(chaosDetails.AppDetail),
-		"Target Container": experimentsDetails.TargetContainer,
-		"Chaos Duration":   experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-	if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("Chaos injection failed, err: %v", err)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// generating the event in chaosresult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/generic/pod-network-partition/rbac.yaml b/experiments/generic/pod-network-partition/rbac.yaml
deleted file mode 100644
index 9d7527c..0000000
--- a/experiments/generic/pod-network-partition/rbac.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: pod-network-partition-sa
-  namespace: default
-  labels:
-    name: pod-network-partition-sa
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
-metadata:
-  name: pod-network-partition-sa
-  namespace: default
-  labels:
-    name: pod-network-partition-sa
-rules:
-- apiGroups: [""]
-  resources: ["pods","events"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["list","get","create"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["networking.k8s.io"]
-  resources: ["networkpolicies"]
-  verbs: ["create","delete","list","get"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
-metadata:
-  name: pod-network-partition-sa
-  namespace: default
-  labels:
-    name: pod-network-partition-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: Role
-  name: pod-network-partition-sa
-subjects:
-- kind: ServiceAccount
-  name: pod-network-partition-sa
-  namespace: default
diff --git a/experiments/generic/pod-network-partition/test/test.yml b/experiments/generic/pod-network-partition/test/test.yml
deleted file mode 100644
index 1c04f78..0000000
--- a/experiments/generic/pod-network-partition/test/test.yml
+++ /dev/null
@@ -1,70 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: pod-network-partition-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
- 
-          # provide application kind
-          - name: APP_KIND
-            value: '' 
-
-          - name: TOTAL_CHAOS_DURATION
-            value: ''
-
-          - name: DESTINATION_IPS
-            value: ''
-
-          - name: DESTINATION_HOSTS
-            value: ''
-
-          - name: POLICY_TYPES
-            value: ''
-
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          ## env var that describes the library used to execute the chaos
-          ## default: litmus. Supported values: litmus, powerfulseal, chaoskube
-          - name: LIB
-            value: ''
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
diff --git a/experiments/kafka/kafka-broker-pod-failure/README.md b/experiments/kafka/kafka-broker-pod-failure/README.md
deleted file mode 100644
index 325a6c0..0000000
--- a/experiments/kafka/kafka-broker-pod-failure/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Kafka Broker Pod Failure </td>
- <td> It causes (forced/graceful) pod failure of specific/random Kafka broker pods. It tests deployment sanity (replica availability & uninterrupted service) and recovery workflows of the Kafka cluster </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/kafka/kafka-broker-pod-failure/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go b/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go
deleted file mode 100644
index 7bf9204..0000000
--- a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package experiment
-
-import (
-	"os"
-	"strings"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	kafkaPodDelete "github.com/litmuschaos/litmus-go/chaoslib/litmus/kafka-broker-pod-failure/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/kafka"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/kafka/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// KafkaBrokerPodFailure derive and kill the kafka broker leader
-func KafkaBrokerPodFailure(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ChaoslibDetail.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ChaoslibDetail.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("The application informations are as follows", logrus.Fields{
-		"Kafka Namespace": experimentsDetails.KafkaNamespace,
-		"Kafka Label":     experimentsDetails.KafkaLabel,
-		"Chaos Duration":  experimentsDetails.ChaoslibDetail.ChaosDuration,
-	})
-
-	// PRE-CHAOS APPLICATION STATUS CHECK
-	// KAFKA CLUSTER HEALTH CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the Kafka cluster is healthy(pre-chaos)")
-		if err := kafka.ClusterHealthCheck(&experimentsDetails, clients); err != nil {
-			log.Errorf("Cluster health check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// PRE-CHAOS KAFKA APPLICATION LIVENESS CHECK
-	switch strings.ToLower(experimentsDetails.KafkaLivenessStream) {
-	case "enable":
-		livenessTopicLeader, err := kafka.LivenessStream(&experimentsDetails, clients)
-		if err != nil {
-			log.Errorf("Liveness check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("The Liveness pod gets established")
-		log.Infof("[Info]: Kafka partition leader is %v", livenessTopicLeader)
-
-		if experimentsDetails.KafkaBroker == "" {
-			experimentsDetails.KafkaBroker = livenessTopicLeader
-		}
-	}
-
-	kafka.DisplayKafkaBroker(&experimentsDetails)
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := kafkaPodDelete.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	// POST-CHAOS KAFKA CLUSTER HEALTH CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the Kafka cluster is healthy(post-chaos)")
-		if err := kafka.ClusterHealthCheck(&experimentsDetails, clients); err != nil {
-			log.Errorf("Cluster health check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "")
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful")
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Successful")
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	// Liveness Status Check (post-chaos) and cleanup
-	switch strings.ToLower(experimentsDetails.KafkaLivenessStream) {
-	case "enable":
-		log.Info("[Status]: Verify that the Kafka liveness pod is running(post-chaos)")
-		if err := status.CheckApplicationStatusesByLabels(experimentsDetails.ChaoslibDetail.AppNS, "name=kafka-liveness-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil {
-			log.Errorf("Application liveness status check failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-
-		log.Info("[CleanUp]: Deleting the kafka liveness pod(post-chaos)")
-		if err := kafka.LivenessCleanup(&experimentsDetails, clients); err != nil {
-			log.Errorf("liveness cleanup failed, err: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Info("[The End]: Updating the chaos result of kafka pod delete experiment (EOT)")
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ChaoslibDetail.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.ChaoslibDetail.EngineName != "" {
-		msg := experimentsDetails.ChaoslibDetail.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/kafka/kafka-broker-pod-failure/rbac.yaml b/experiments/kafka/kafka-broker-pod-failure/rbac.yaml
deleted file mode 100644
index 8608b41..0000000
--- a/experiments/kafka/kafka-broker-pod-failure/rbac.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: kafka-broker-pod-failure-sa
-  namespace: default
-  labels:
-    name: kafka-broker-pod-failure-sa
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
-  name: kafka-broker-pod-failure-sa
-  labels:
-    name: kafka-broker-pod-failure-sa
-rules:
-- apiGroups: ["","litmuschaos.io","batch","apps"]
-  resources: ["pods","deployments","pods/log","events","jobs","pods/exec","statefulsets","configmaps","chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","delete"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
-  name: kafka-broker-pod-failure-sa
-  labels:
-    name: kafka-broker-pod-failure-sa
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: kafka-broker-pod-failure-sa
-subjects:
-- kind: ServiceAccount
-  name: kafka-broker-pod-failure-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/kafka/kafka-broker-pod-failure/test/test.yml b/experiments/kafka/kafka-broker-pod-failure/test/test.yml
deleted file mode 100644
index d754553..0000000
--- a/experiments/kafka/kafka-broker-pod-failure/test/test.yml
+++ /dev/null
@@ -1,101 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-    name: litmus-experiment
-spec:
-    replicas: 1
-    selector: 
-    matchLabels:
-        app: litmus-experiment
-    template:
-    metadata:
-        labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: %CHAOS_SERVICE_ACCOUNT%
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-        - sleep 
-        - "3600"
-        env:
-          - name: KAFKA_KIND
-            value: 'statefulset'
-
-          - name: KAFKA_LIVENESS_STREAM
-            value: 'enable'
-
-          - name: KAFKA_LIVENESS_IMAGE
-            value: 'litmuschaos/kafka-client:latest'
-
-            # set to 'enabled' if you have auth set up
-          - name: KAFKA_SASL_AUTH
-            value: 'disabled'
-
-            # Recommended timeout for EKS platform: 60000 ms
-          - name: KAFKA_CONSUMER_TIMEOUT
-            value: '60000'  # in milliseconds
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '15'
-
-          - name: CHAOS_INTERVAL
-            value: '5'
-
-          - name: FORCE
-            value: 'true'
-
-          - name: KAFKA_INSTANCE_NAME
-            value: ''
-
-          - name: KAFKA_NAMESPACE
-            value: ''
-            
-          - name: KAFKA_LABEL
-            value: ''
-
-          - name: KAFKA_BROKER
-            value: ''
-
-          - name: KAFKA_REPLICATION_FACTOR
-            value: ''
-
-          - name: KAFKA_SERVICE
-            value: ''
-
-          - name: KAFKA_PORT
-            value: ''
-
-          - name: ZOOKEEPER_NAMESPACE
-            value: ''
-
-          - name: ZOOKEEPER_LABEL
-            value: ''
-
-          - name: ZOOKEEPER_SERVICE
-            value: ''
-
-          - name: ZOOKEEPER_PORT
-            value: ''
-
-            ## env var that describes the library used to execute the chaos
-            ## default: litmus. Supported values: litmus
-          - name: LIB
-            value: ''
-            
-            # Provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-                fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-                fieldRef:
-                fieldPath: spec.serviceAccountName
-
diff --git a/experiments/kube-aws/ebs-loss-by-id/README.md b/experiments/kube-aws/ebs-loss-by-id/README.md
deleted file mode 100644
index 36c5da2..0000000
--- a/experiments/kube-aws/ebs-loss-by-id/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> EBS Loss By ID </td>
- <td> This experiment causes the detachment of an EBS volume from an instance for a certain chaos duration and reattach as part of recovery(post chaos). The experiment is very specific to the volume and instance to which it is added.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/aws/ebs-loss-by-id/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go b/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go
deleted file mode 100644
index 5f64c7e..0000000
--- a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ebs"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// EBSLossByID inject the ebs volume loss chaos
-func EBSLossByID(clients clients.ClientSets) {
-
-	var err error
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE VOLUME INFORMATION
-	log.InfoWithValues("The volume information is as follows", logrus.Fields{
-		"Volume IDs":     experimentsDetails.EBSVolumeID,
-		"Region":         experimentsDetails.Region,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-		"Sequence":       experimentsDetails.Sequence,
-	})
-
-	//Verify the aws ec2 instance is attached to ebs volume
-	if chaosDetails.DefaultHealthCheck {
-		if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil {
-			log.Errorf("Volume status check failed pre chaos: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.PrepareEBSLossByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	if chaosDetails.DefaultHealthCheck {
-		//Verify the aws ec2 instance is attached to ebs volume
-		if err = aws.EBSStateCheckByID(experimentsDetails.EBSVolumeID, experimentsDetails.Region); err != nil {
-			log.Errorf("Volume status check failed post chaos: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", reason)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-
-}
diff --git a/experiments/kube-aws/ebs-loss-by-id/rbac.yaml b/experiments/kube-aws/ebs-loss-by-id/rbac.yaml
deleted file mode 100644
index bfe1a03..0000000
--- a/experiments/kube-aws/ebs-loss-by-id/rbac.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: ebs-loss-by-id-sa
-  namespace: default
-  labels:
-    name: ebs-loss-by-id-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: ebs-loss-by-id-sa
-  labels:
-    name: ebs-loss-by-id-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: ebs-loss-by-id-sa
-  labels:
-    name: ebs-loss-by-id-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: ebs-loss-by-id-sa
-subjects:
-- kind: ServiceAccount
-  name: ebs-loss-by-id-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/kube-aws/ebs-loss-by-id/test/test.yml b/experiments/kube-aws/ebs-loss-by-id/test/test.yml
deleted file mode 100644
index bcf4b9e..0000000
--- a/experiments/kube-aws/ebs-loss-by-id/test/test.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: ebs-loss-by-id-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '60' 
-
-          - name: EC2_INSTANCE_ID
-            value: ''
-
-          - name: EBS_VOL_ID
-            value: ''
-
-          - name: DEVICE_NAME
-            value: '/dev/sdf'
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: REGION
-            value: ''
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/
\ No newline at end of file
diff --git a/experiments/kube-aws/ebs-loss-by-tag/README.md b/experiments/kube-aws/ebs-loss-by-tag/README.md
deleted file mode 100644
index 64d1e8a..0000000
--- a/experiments/kube-aws/ebs-loss-by-tag/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> EBS Loss By Tag </td>
- <td> This experiment causes the detachment of an EBS volume from an ec2 instance for a certain chaos duration and reattach as part of recovery(post chaos). The target volumes are selected on the basis volume tag. The experiment itself derives the device name and instance ID on which the volume is attached.</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/aws/ebs-loss-by-tag/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go b/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go
deleted file mode 100644
index 96a7323..0000000
--- a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ebs"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// EBSLossByTag inject the ebs volume loss chaos
-func EBSLossByTag(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	//DISPLAY THE VOLUME INFORMATION
-	log.InfoWithValues("The volume information is as follows", logrus.Fields{
-		"Volume Tag":     experimentsDetails.VolumeTag,
-		"Region":         experimentsDetails.Region,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-		"Sequence":       experimentsDetails.Sequence,
-	})
-
-	//selecting the target volumes (pre chaos)
-	//if no volumes found in attached state then this check will fail
-	if err := aws.SetTargetVolumeIDs(&experimentsDetails); err != nil {
-		log.Errorf("Failed to set the volumes under chaos: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareEBSLossByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	if chaosDetails.DefaultHealthCheck {
-		//Verify the aws ec2 instance is attached to ebs volume
-		if err := aws.PostChaosVolumeStatusCheck(&experimentsDetails); err != nil {
-			log.Errorf("Failed to verify that the EBS volume is attached to an instance: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", reason)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-
-}
diff --git a/experiments/kube-aws/ebs-loss-by-tag/rbac.yaml b/experiments/kube-aws/ebs-loss-by-tag/rbac.yaml
deleted file mode 100644
index cc96710..0000000
--- a/experiments/kube-aws/ebs-loss-by-tag/rbac.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: ebs-loss-by-tag-sa
-  namespace: default
-  labels:
-    name: ebs-loss-by-tag-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: ebs-loss-by-tag-sa
-  labels:
-    name: ebs-loss-by-tag-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: ebs-loss-by-tag-sa
-  labels:
-    name: ebs-loss-by-tag-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: ebs-loss-by-tag-sa
-subjects:
-- kind: ServiceAccount
-  name: ebs-loss-by-tag-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/kube-aws/ebs-loss-by-tag/test/test.yml b/experiments/kube-aws/ebs-loss-by-tag/test/test.yml
deleted file mode 100644
index ab2b7f2..0000000
--- a/experiments/kube-aws/ebs-loss-by-tag/test/test.yml
+++ /dev/null
@@ -1,57 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: ebs-loss-by-tag-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: APP_NAMESPACE
-            value: 'default'
-
-          - name: APP_LABEL
-            value: 'run=nginx'
-
-          - name: APP_KIND
-            value: 'deployment'
-
-          - name: TOTAL_CHAOS_DURATION
-            value: '30'
-
-          - name: CHOAS_INTERVAL
-            value: '30'             
-
-          - name: EBS_VOLUME_TAG
-            value: ''
-            
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: REGION
-            value: ''
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/
\ No newline at end of file
diff --git a/experiments/kube-aws/ec2-terminate-by-id/README.md b/experiments/kube-aws/ec2-terminate-by-id/README.md
deleted file mode 100644
index 00a46fa..0000000
--- a/experiments/kube-aws/ec2-terminate-by-id/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> EC2 Terminate </td>
- <td> This experiment causes termination of an EC2 instance before bringing it back to running state after the specified chaos duration. One or more target instance is provided in the list format in `EC2_INSTANCE_ID` env as comma(,) seperated envs (eg: instance1,instance2)</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/aws/ec2-terminate-by-id/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go b/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go
deleted file mode 100644
index 5db97c1..0000000
--- a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package experiment
-
-import (
-	"os"
-	"strings"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ec2-terminate-by-id/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-id/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-id/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// EC2TerminateByID inject the ebs volume loss chaos
-func EC2TerminateByID(clients clients.ClientSets) {
-
-	var (
-		err                  error
-		activeNodeCount      int
-		autoScalingGroupName string
-	)
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-
-	//DISPLAY THE INSTANCE INFORMATION
-	log.InfoWithValues("The instance information is as follows", logrus.Fields{
-		"Chaos Duration":  experimentsDetails.ChaosDuration,
-		"Chaos Namespace": experimentsDetails.ChaosNamespace,
-		"Instance ID":     experimentsDetails.Ec2InstanceID,
-		"Sequence":        experimentsDetails.Sequence,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	//Verify the aws ec2 instance is running (pre chaos)
-	if chaosDetails.DefaultHealthCheck && experimentsDetails.ManagedNodegroup != "enable" {
-		log.Info("[Status]: Verify that the aws ec2 instances are in running state (pre-chaos)")
-		if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil {
-			log.Errorf("EC2 instance status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: EC2 instance is in running state")
-	}
-
-	//PRE-CHAOS NODE STATUS CHECK
-	if experimentsDetails.ManagedNodegroup == "enable" {
-		log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)")
-		activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(strings.Split(experimentsDetails.Ec2InstanceID, ","), experimentsDetails.Region)
-		if err != nil {
-			log.Errorf("Pre chaos node status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.PrepareEC2TerminateByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//Verify the aws ec2 instance is running (post chaos)
-	if chaosDetails.DefaultHealthCheck && experimentsDetails.ManagedNodegroup != "enable" {
-		log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)")
-		if err = aws.InstanceStatusCheckByID(experimentsDetails.Ec2InstanceID, experimentsDetails.Region); err != nil {
-			log.Errorf("EC2 instance status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: EC2 instance is in running state (post chaos)")
-	}
-
-	// POST-CAOS ACTIVE NODE COUNT TEST
-	if experimentsDetails.ManagedNodegroup == "enable" {
-		log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)")
-		if err := aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil {
-			log.Errorf("Post chaos active node count check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult:  %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", reason)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-
-}
diff --git a/experiments/kube-aws/ec2-terminate-by-id/rbac.yaml b/experiments/kube-aws/ec2-terminate-by-id/rbac.yaml
deleted file mode 100644
index ad2b68a..0000000
--- a/experiments/kube-aws/ec2-terminate-by-id/rbac.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: ec2-terminate-by-id-sa
-  namespace: default
-  labels:
-    name: ec2-terminate-by-id-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: ec2-terminate-by-id-sa
-  labels:
-    name: ec2-terminate-by-id-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["patch","get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: ec2-terminate-by-id-sa
-  labels:
-    name: ec2-terminate-by-id-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: ec2-terminate-by-id-sa
-subjects:
-- kind: ServiceAccount
-  name: ec2-terminate-by-id-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/kube-aws/ec2-terminate-by-id/test/test.yml b/experiments/kube-aws/ec2-terminate-by-id/test/test.yml
deleted file mode 100644
index bfbd7e3..0000000
--- a/experiments/kube-aws/ec2-terminate-by-id/test/test.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: ec2-terminate-by-id-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          - name: EC2_INSTANCE_ID
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: REGION
-            value: ''
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/                 
diff --git a/experiments/kube-aws/ec2-terminate-by-tag/README.md b/experiments/kube-aws/ec2-terminate-by-tag/README.md
deleted file mode 100644
index 834d28c..0000000
--- a/experiments/kube-aws/ec2-terminate-by-tag/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> EC2 Terminate By Tag </td>
- <td> This experiment causes termination of an EC2 instance before bringing it back to running state using the instance tag after the specified chaos duration. We can also control the number of target instance using instance affected percentage</td>
- <td>  <a href="https://litmuschaos.github.io/litmus/experiments/categories/aws/ec2-terminate-by-tag/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go b/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go
deleted file mode 100644
index 378bb15..0000000
--- a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ec2-terminate-by-tag/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-tag/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-tag/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// EC2TerminateByTag inject the ebs volume loss chaos
-func EC2TerminateByTag(clients clients.ClientSets) {
-
-	var (
-		err                  error
-		activeNodeCount      int
-		autoScalingGroupName string
-	)
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err = types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-
-	//DISPLAY THE INSTANCE INFORMATION
-	log.InfoWithValues("The instance information is as follows", logrus.Fields{
-		"Chaos Duration":               experimentsDetails.ChaosDuration,
-		"Chaos Namespace":              experimentsDetails.ChaosNamespace,
-		"Instance Tag":                 experimentsDetails.InstanceTag,
-		"Instance Affected Percentage": experimentsDetails.InstanceAffectedPerc,
-		"Sequence":                     experimentsDetails.Sequence,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	//selecting the target instance (pre chaos)
-	if err = litmusLIB.SetTargetInstance(&experimentsDetails); err != nil {
-		log.Errorf("Failed to get the target ec2 instance: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	//PRE-CHAOS NODE STATUS CHECK
-	if experimentsDetails.ManagedNodegroup == "enable" {
-		log.Info("[Status]: Counting number of active nodes in the node group (pre-chaos)")
-		activeNodeCount, autoScalingGroupName, err = aws.PreChaosNodeCountCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region)
-		if err != nil {
-			log.Errorf("Pre chaos node status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.PrepareEC2TerminateByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	//Verify the aws ec2 instance is running (post chaos)
-	if chaosDetails.DefaultHealthCheck && experimentsDetails.ManagedNodegroup != "enable" {
-		log.Info("[Status]: Verify that the aws ec2 instances are in running state (post-chaos)")
-		if err = aws.InstanceStatusCheck(experimentsDetails.TargetInstanceIDList, experimentsDetails.Region); err != nil {
-			log.Errorf("Failed to get the ec2 instance status as running post chaos: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Status]: EC2 instance is in running state (post chaos)")
-	}
-
-	// POST-CHAOS ACTIVE NODE COUNT TEST
-	if experimentsDetails.ManagedNodegroup == "enable" {
-		log.Info("[Status]: Counting and verifying number of active nodes in the node group (post-chaos)")
-		if err = aws.PostChaosNodeCountCheck(activeNodeCount, autoScalingGroupName, experimentsDetails.Region); err != nil {
-			log.Errorf("Post chaos active node count check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult:  %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", reason)
-	}
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-
-}
diff --git a/experiments/kube-aws/ec2-terminate-by-tag/rbac.yaml b/experiments/kube-aws/ec2-terminate-by-tag/rbac.yaml
deleted file mode 100644
index 4b12839..0000000
--- a/experiments/kube-aws/ec2-terminate-by-tag/rbac.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: ec2-terminate-by-tag-sa
-  namespace: default
-  labels:
-    name: ec2-terminate-by-tag-sa
-    app.kubernetes.io/part-of: litmus
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: ec2-terminate-by-tag-sa
-  labels:
-    name: ec2-terminate-by-tag-sa
-    app.kubernetes.io/part-of: litmus
-rules:
-- apiGroups: [""]
-  resources: ["pods","events","secrets"]
-  verbs: ["create","list","get","patch","update","delete","deletecollection"]
-- apiGroups: [""]
-  resources: ["pods/exec","pods/log"]
-  verbs: ["create","list","get"]
-- apiGroups: ["batch"]
-  resources: ["jobs"]
-  verbs: ["create","list","get","delete","deletecollection"]
-- apiGroups: ["litmuschaos.io"]
-  resources: ["chaosengines","chaosexperiments","chaosresults"]
-  verbs: ["create","list","get","patch","update"]
-- apiGroups: [""]
-  resources: ["nodes"]
-  verbs: ["patch","get","list"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: ec2-terminate-by-tag-sa
-  labels:
-    name: ec2-terminate-by-tag-sa
-    app.kubernetes.io/part-of: litmus
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: ec2-terminate-by-tag-sa
-subjects:
-- kind: ServiceAccount
-  name: ec2-terminate-by-tag-sa
-  namespace: default
\ No newline at end of file
diff --git a/experiments/kube-aws/ec2-terminate-by-tag/test/test.yml b/experiments/kube-aws/ec2-terminate-by-tag/test/test.yml
deleted file mode 100644
index a646b99..0000000
--- a/experiments/kube-aws/ec2-terminate-by-tag/test/test.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector: 
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels:
-        app: litmus-experiment
-    spec:
-      serviceAccountName: ec2-terminate-by-tag-sa
-      containers:
-      - name: gotest
-        image: busybox
-        command:
-          - sleep 
-          - "3600"
-        env:
-          # value: key:value ex: team:devops
-          - name: INSTANCE_TAG
-            value: ''
-
-          - name: CHAOS_NAMESPACE
-            value: 'default'
-
-          - name: REGION
-            value: ''
-
-          - name: RAMP_TIME
-            value: ''
-
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          secrets:
-            - name: cloud-secret
-              mountPath: /tmp/                 
diff --git a/experiments/kubernetes/node-restart/experiment/node-restart.go b/experiments/kubernetes/node-restart/experiment/node-restart.go
index d8854ae..70d5666 100644
--- a/experiments/kubernetes/node-restart/experiment/node-restart.go
+++ b/experiments/kubernetes/node-restart/experiment/node-restart.go
@@ -44,13 +44,13 @@ func NodeRestart(clients clients.ClientSets) {
 		}
 	}
 
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
+	// //Updating the chaos result in the beginning of experiment
+	// log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
+	// if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
+	// 	log.Errorf("Unable to Create the Chaos Result, err: %v", err)
+	// 	result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
+	// 	return
+	// }
 
 	// Set the chaos result uid
 	result.SetResultUID(&resultDetails, clients, &chaosDetails)
diff --git a/experiments/spring-boot/spring-boot-faults/README.md b/experiments/spring-boot/spring-boot-faults/README.md
deleted file mode 100644
index 5d18fd6..0000000
--- a/experiments/spring-boot/spring-boot-faults/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> Spring Boot App Kill </td>
- <td> This experiment allows injecting Chaos Monkey app-kill assaults on Spring Boot applications, which have the [Chaos Monkey for Spring Boot](https://codecentric.github.io/chaos-monkey-spring-boot/) in their classpath. It can target random pods with a Spring Boot application and allows configuring the assaults to inject app-kill. It tests the resiliency of the system when some applications are having unexpected faulty behavior.</td>
- <td>  TODO </td>
- </tr>
-<tr>
- <td> Spring Boot CPU Stress </td>
- <td> This experiment allows injecting Chaos Monkey cpu-stress assaults on Spring Boot applications, which have the [Chaos Monkey for Spring Boot](https://codecentric.github.io/chaos-monkey-spring-boot/) in their classpath. It can target random pods with a Spring Boot application and allows configuring the assaults to inject cpu-stress. It tests the resiliency of the system when some applications are having unexpected faulty behavior.</td>
- <td>  TODO </td>
- </tr>
-<tr>
- <td> Spring Boot Memory Stress </td>
- <td> This experiment allows injecting Chaos Monkey memory-stress assaults on Spring Boot applications, which have the [Chaos Monkey for Spring Boot](https://codecentric.github.io/chaos-monkey-spring-boot/) in their classpath. It can target random pods with a Spring Boot application and allows configuring the assaults to inject memory-stress. It tests the resiliency of the system when some applications are having unexpected faulty behavior.</td>
- <td>  TODO </td>
- </tr>
-<tr>
- <td> Spring Boot Latency </td>
- <td> This experiment allows injecting Chaos Monkey latency assaults on Spring Boot applications, which have the [Chaos Monkey for Spring Boot](https://codecentric.github.io/chaos-monkey-spring-boot/) in their classpath. It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency. It tests the resiliency of the system when some applications are having unexpected faulty behavior.</td>
- <td>  TODO </td>
- </tr>
-<tr>
- <td> Spring Boot Exceptions </td>
- <td> This experiment allows injecting Chaos Monkey exceptions assaults on Spring Boot applications, which have the [Chaos Monkey for Spring Boot](https://codecentric.github.io/chaos-monkey-spring-boot/) in their classpath. It can target random pods with a Spring Boot application and allows configuring the assaults to inject exceptions. It tests the resiliency of the system when some applications are having unexpected faulty behavior.</td>
- <td>  TODO </td>
- </tr>
-<tr>
- <td> Spring Boot Faults </td>
- <td> This experiment allows injecting Chaos Monkey faults assaults on Spring Boot applications, which have the [Chaos Monkey for Spring Boot](https://codecentric.github.io/chaos-monkey-spring-boot/) in their classpath. It can target random pods with a Spring Boot application and allows configuring the assaults to inject spring-boot faults. It tests the resiliency of the system when some applications are having unexpected faulty behavior.</td>
- <td>  TODO </td>
- </tr>
- </table>
-
diff --git a/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go b/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go
deleted file mode 100644
index 7e3dc57..0000000
--- a/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/spring-boot-chaos/lib"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/spring-boot/spring-boot-chaos/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/spring-boot/spring-boot-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	"github.com/sirupsen/logrus"
-)
-
-// Experiment contains steps to inject chaos
-func Experiment(clients clients.ClientSets, expName string) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails, expName)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes, err: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to Create the Chaos Result, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	_ = result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosResult to mark the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	//DISPLAY THE APP INFORMATION
-	log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{
-		"Namespace":      experimentsDetails.AppNS,
-		"Label":          experimentsDetails.AppLabel,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	// Select targeted pods
-	log.Infof("[PreCheck]: Getting targeted pods list")
-	if err := litmusLIB.SetTargetPodList(&experimentsDetails, clients, &chaosDetails); err != nil {
-		log.Errorf("Failed to get target pod list, err: %v", err)
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "Pods: Not Found", "Warning", &chaosDetails)
-		_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-	podNames := make([]string, 0, 1)
-	for _, pod := range experimentsDetails.TargetPodList.Items {
-		podNames = append(podNames, pod.Name)
-	}
-	log.Infof("[PreCheck]: Target pods list for chaos, %v", podNames)
-
-	// Check if the targeted pods have the chaos monkey endpoint
-	log.Infof("[PreCheck]: Checking for ChaosMonkey endpoint in target pods")
-	if _, err := litmusLIB.CheckChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, experimentsDetails.TargetPodList); err != nil {
-		log.Errorf("Some target pods don't have the chaos monkey endpoint, err: %v", err)
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "ChaosMonkey: Not Found", "Warning", &chaosDetails)
-		_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	//PRE-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
-		log.Errorf("Chaos injection failed, err: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	// POST-CHAOS APPLICATION STATUS CHECK
-	if chaosDetails.DefaultHealthCheck {
-		log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
-		if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil {
-			log.Errorf("Application status check failed, err: %v", err)
-			types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails)
-			_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking AUT as running, as we already checked the status of application under test
-		msg := "AUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed, err: %v", err)
-				msg := "AUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "AUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to Update the Chaos Result, err: %v", err)
-		return
-	}
-
-	// generating the event in chaosResult to mark the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
-
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		_ = events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
-	}
-}
diff --git a/experiments/vmware/vm-poweroff/README.md b/experiments/vmware/vm-poweroff/README.md
deleted file mode 100644
index 609cff0..0000000
--- a/experiments/vmware/vm-poweroff/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-## Experiment Metadata
-
-<table>
-<tr>
-<th> Name </th>
-<th> Description </th>
-<th> Documentation Link </th>
-</tr>
-<tr>
- <td> VM Poweroff </td>
- <td> Stopping the Vm for a certain chaos duraton </td>
- <td> <a href="https://litmuschaos.github.io/litmus/experiments/categories/vmware/vm-poweroff/"> Here </a> </td>
- </tr>
- </table>
diff --git a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go b/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go
deleted file mode 100644
index bee4d41..0000000
--- a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package experiment
-
-import (
-	"os"
-
-	"github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1"
-	litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/vm-poweroff/lib"
-	clients "github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/cloud/vmware"
-	"github.com/litmuschaos/litmus-go/pkg/events"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/probe"
-	"github.com/litmuschaos/litmus-go/pkg/result"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/common"
-	experimentEnv "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/environment"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/types"
-
-	"github.com/sirupsen/logrus"
-)
-
-var err error
-
-// VMPoweroff contains steps to inject vm-power-off chaos
-func VMPoweroff(clients clients.ClientSets) {
-
-	experimentsDetails := experimentTypes.ExperimentDetails{}
-	resultDetails := types.ResultDetails{}
-	eventsDetails := types.EventDetails{}
-	chaosDetails := types.ChaosDetails{}
-
-	//Fetching all the ENV passed from the runner pod
-	log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
-	experimentEnv.GetENV(&experimentsDetails)
-
-	// Initialize the chaos attributes
-	types.InitialiseChaosVariables(&chaosDetails)
-
-	// Initialize Chaos Result Parameters
-	types.SetResultAttributes(&resultDetails, chaosDetails)
-
-	if experimentsDetails.EngineName != "" {
-		// Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet
-		if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil {
-			log.Errorf("Unable to initialize the probes: %v", err)
-			return
-		}
-	}
-
-	//Updating the chaos result in the beginning of experiment
-	log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
-	if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
-		log.Errorf("Unable to create the chaosresult: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	// Set the chaos result uid
-	result.SetResultUID(&resultDetails, clients, &chaosDetails)
-
-	// generating the event in chaosresult to marked the verdict as awaited
-	msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
-	types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", types.AwaitedVerdict)
-	}
-
-	if experimentsDetails.VMTag != "" {
-		// GET VM IDs FROM TAG
-		experimentsDetails.VMIds, err = vmware.GetVMIDFromTag(experimentsDetails.VMTag)
-		if err != nil {
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			log.Errorf("Unable to get the VM ID, err: %v", err)
-			return
-		}
-	}
-
-	//DISPLAY THE VM INFORMATION
-	log.InfoWithValues("[Info]: The Instance information is as follows", logrus.Fields{
-		"VM MOIDS":       experimentsDetails.VMIds,
-		"Ramp Time":      experimentsDetails.RampTime,
-		"Chaos Duration": experimentsDetails.ChaosDuration,
-	})
-
-	// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
-	go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
-
-	// GET SESSION ID TO LOGIN TO VCENTER
-	cookie, err := vmware.GetVcenterSessionID(experimentsDetails.VcenterServer, experimentsDetails.VcenterUser, experimentsDetails.VcenterPass)
-	if err != nil {
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		log.Errorf("Vcenter Login failed: %v", err)
-		return
-	}
-
-	if chaosDetails.DefaultHealthCheck {
-		// PRE-CHAOS VM STATUS CHECK
-		if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil {
-			log.Errorf("VM status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Verification]: VMs are in running state (pre-chaos)")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking IUT as running, as we already checked the status of instance under test
-		msg := "IUT: Running"
-
-		// run the probes in the pre-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
-				log.Errorf("Probe Failed: %v", err)
-				msg := "IUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "IUT: Running, Probes: Successful"
-		}
-		// generating the events for the pre-chaos check
-		types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PreChaosCheck)
-		}
-	}
-
-	chaosDetails.Phase = types.ChaosInjectPhase
-
-	if err = litmusLIB.InjectVMPowerOffChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails, cookie); err != nil {
-		log.Errorf("Chaos injection failed: %v", err)
-		result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-		return
-	}
-
-	log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
-	resultDetails.Verdict = v1alpha1.ResultVerdictPassed
-
-	chaosDetails.Phase = types.PostChaosPhase
-
-	if chaosDetails.DefaultHealthCheck {
-		//POST-CHAOS VM STATUS CHECK
-		log.Info("[Status]: Verify that the IUT (Instance Under Test) is running (post-chaos)")
-		if err := vmware.VMStatusCheck(experimentsDetails.VcenterServer, experimentsDetails.VMIds, cookie); err != nil {
-			log.Errorf("VM status check failed: %v", err)
-			result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-			return
-		}
-		log.Info("[Verification]: VMs are in running state (post-chaos)")
-	}
-
-	if experimentsDetails.EngineName != "" {
-		// marking IUT as running, as we already checked the status of instance under test
-		msg := "IUT: Running"
-
-		// run the probes in the post-chaos check
-		if len(resultDetails.ProbeDetails) != 0 {
-			if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
-				log.Errorf("Probes Failed: %v", err)
-				msg := "IUT: Running, Probes: Unsuccessful"
-				types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
-				if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-					log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-				}
-				result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails)
-				return
-			}
-			msg = "IUT: Running, Probes: Successful"
-		}
-
-		// generating post chaos event
-		types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.PostChaosCheck)
-		}
-	}
-
-	//Updating the chaosResult in the end of experiment
-	log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
-	if err = result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
-		log.Errorf("Unable to update the chaosresult: %v", err)
-		return
-	}
-
-	// generating the event in chaosresult to marked the verdict as pass/fail
-	msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
-	reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict)
-	types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
-	if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); eventErr != nil {
-		log.Errorf("Failed to create %v event inside chaosresult", reason)
-	}
-	if experimentsDetails.EngineName != "" {
-		msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
-		types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
-		if eventErr := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine"); eventErr != nil {
-			log.Errorf("Failed to create %v event inside chaosengine", types.Summary)
-		}
-	}
-}
diff --git a/experiments/vmware/vm-poweroff/test/test.yml b/experiments/vmware/vm-poweroff/test/test.yml
deleted file mode 100644
index a80fa4c..0000000
--- a/experiments/vmware/vm-poweroff/test/test.yml
+++ /dev/null
@@ -1,87 +0,0 @@
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: litmus-experiment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: litmus-experiment
-  template:
-    metadata:
-      labels: 
-        app: litmus-experiment
-    spec:
-      serviceAccountName: vm-poweroff-sa
-      containers:
-      - name: gotest
-        image: busybox 
-        command: 
-          - sleep
-          - "3600"
-        env:
-          # provide application namespace
-          - name: APP_NAMESPACE
-            value: ''
-
-          # provide application labels
-          - name: APP_LABEL
-            value: ''
- 
-          # provide application kind
-          - name: APP_KIND
-            value: '' 
-
-          - name: TOTAL_CHAOS_DURATION
-            value: ''
-
-          # provide auxiliary application details - namespace and labels of the applications
-          # sample input is - "ns1:app=percona,ns2:name=nginx"
-          - name: AUXILIARY_APPINFO
-            value: ''
-          
-          ## Period to wait before injection of chaos in sec
-          - name: RAMP_TIME
-            value: ''
-
-          ## env var that describes the library used to execute the chaos
-          ## default: litmus. Supported values: litmus, powerfulseal, chaoskube
-          - name: LIB
-            value: ''
-
-          # provide the chaos namespace
-          - name: CHAOS_NAMESPACE
-            value: ''
-        
-          - name: POD_NAME
-            valueFrom:
-              fieldRef:
-                fieldPath: metadata.name
-
-          - name: CHAOS_SERVICE_ACCOUNT
-            valueFrom:
-              fieldRef:
-                fieldPath: spec.serviceAccountName
-
-            # provide vm Moid
-          - name: APP_VM_MOID
-            value: ''              
-
-          - name: VCENTERSERVER
-            valueFrom:
-              secretKeyRef:
-                name: vcenter-secret
-                key: VCENTERSERVER
-
-          - name: VCENTERUSER
-            valueFrom:
-              secretKeyRef:
-                name: vcenter-secret
-                key: VCENTERUSER
-
-          - name: VCENTERPASS
-            valueFrom:
-              secretKeyRef:
-                name: vcenter-secret
-                key: VCENTERPASS                
diff --git a/pkg/aws-ssm/aws-ssm-chaos/environment/environment.go b/pkg/aws-ssm/aws-ssm-chaos/environment/environment.go
deleted file mode 100644
index bf5cfed..0000000
--- a/pkg/aws-ssm/aws-ssm-chaos/environment/environment.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package environment
-
-import (
-	"strconv"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "60"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.DocumentName = types.Getenv("DOCUMENT_NAME", "LitmusChaos-AWS-SSM-Doc")
-	experimentDetails.DocumentType = types.Getenv("DOCUMENT_TYPE", "Command")
-	experimentDetails.DocumentFormat = types.Getenv("DOCUMENT_FORMAT", "YAML")
-	experimentDetails.DocumentPath = types.Getenv("DOCUMENT_PATH", "LitmusChaos-AWS-SSM-Docs.yml")
-	experimentDetails.Region = types.Getenv("REGION", "")
-	experimentDetails.Cpu, _ = strconv.Atoi(types.Getenv("CPU_CORE", "0"))
-	experimentDetails.NumberOfWorkers, _ = strconv.Atoi(types.Getenv("NUMBER_OF_WORKERS", "1"))
-	experimentDetails.MemoryPercentage, _ = strconv.Atoi(types.Getenv("MEMORY_PERCENTAGE", "80"))
-	experimentDetails.InstallDependencies = types.Getenv("INSTALL_DEPENDENCIES", "True")
-	experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel")
-	switch expName {
-	case "aws-ssm-chaos-by-tag":
-		experimentDetails.EC2InstanceTag = types.Getenv("EC2_INSTANCE_TAG", "")
-		experimentDetails.InstanceAffectedPerc, _ = strconv.Atoi(types.Getenv("INSTANCE_AFFECTED_PERC", "0"))
-	case "aws-ssm-chaos-by-id":
-		experimentDetails.EC2InstanceID = types.Getenv("EC2_INSTANCE_ID", "")
-	}
-}
diff --git a/pkg/aws-ssm/aws-ssm-chaos/types/types.go b/pkg/aws-ssm/aws-ssm-chaos/types/types.go
deleted file mode 100644
index 3e0a23e..0000000
--- a/pkg/aws-ssm/aws-ssm-chaos/types/types.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package types
-
-import (
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName       string
-	EngineName           string
-	RampTime             int
-	ChaosDuration        int
-	ChaosInterval        int
-	ChaosUID             clientTypes.UID
-	InstanceID           string
-	ChaosNamespace       string
-	ChaosPodName         string
-	Timeout              int
-	Delay                int
-	EC2InstanceID        string
-	EC2InstanceTag       string
-	Region               string
-	InstanceAffectedPerc int
-	Sequence             string
-	Cpu                  int
-	NumberOfWorkers      int
-	MemoryPercentage     int
-	InstallDependencies  string
-	DocumentName         string
-	DocumentType         string
-	DocumentFormat       string
-	DocumentPath         string
-	IsDocsUploaded       bool
-	CommandIDs           []string
-	TargetInstanceIDList []string
-}
diff --git a/pkg/azure/disk-loss/environment/environment.go b/pkg/azure/disk-loss/environment/environment.go
deleted file mode 100644
index ee48cfa..0000000
--- a/pkg/azure/disk-loss/environment/environment.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package environment
-
-import (
-	"strconv"
-	"strings"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "azure-disk-loss")
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.ScaleSet = types.Getenv("SCALE_SET", "disable")
-	experimentDetails.ResourceGroup = types.Getenv("RESOURCE_GROUP", "")
-	experimentDetails.VirtualDiskNames = strings.TrimSpace(types.Getenv("VIRTUAL_DISK_NAMES", ""))
-	experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel")
-}
diff --git a/pkg/azure/disk-loss/types/types.go b/pkg/azure/disk-loss/types/types.go
deleted file mode 100644
index fab9c6b..0000000
--- a/pkg/azure/disk-loss/types/types.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package types
-
-import (
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName   string
-	EngineName       string
-	ChaosDuration    int
-	ChaosInterval    int
-	RampTime         int
-	ChaosUID         clientTypes.UID
-	InstanceID       string
-	ChaosNamespace   string
-	ChaosPodName     string
-	Timeout          int
-	Delay            int
-	ScaleSet         string
-	ResourceGroup    string
-	SubscriptionID   string
-	VirtualDiskNames string
-	Sequence         string
-}
diff --git a/pkg/azure/instance-stop/environment/environment.go b/pkg/azure/instance-stop/environment/environment.go
deleted file mode 100644
index 8eb13b5..0000000
--- a/pkg/azure/instance-stop/environment/environment.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package environment
-
-import (
-	"strconv"
-	"strings"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "azure-instance-stop")
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.AzureInstanceNames = strings.TrimSpace(types.Getenv("AZURE_INSTANCE_NAMES", ""))
-	experimentDetails.ResourceGroup = types.Getenv("RESOURCE_GROUP", "")
-	experimentDetails.ScaleSet = types.Getenv("SCALE_SET", "disable")
-	experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel")
-}
diff --git a/pkg/azure/instance-stop/types/types.go b/pkg/azure/instance-stop/types/types.go
deleted file mode 100644
index 9217981..0000000
--- a/pkg/azure/instance-stop/types/types.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package types
-
-import (
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName     string
-	EngineName         string
-	RampTime           int
-	ChaosDuration      int
-	ChaosInterval      int
-	ChaosUID           clientTypes.UID
-	InstanceID         string
-	ChaosNamespace     string
-	ChaosPodName       string
-	Timeout            int
-	Delay              int
-	AzureInstanceNames string
-	ResourceGroup      string
-	SubscriptionID     string
-	ScaleSet           string
-	Sequence           string
-}
diff --git a/pkg/baremetal/redfish-node-restart/environment/environment.go b/pkg/baremetal/redfish-node-restart/environment/environment.go
deleted file mode 100644
index bafe960..0000000
--- a/pkg/baremetal/redfish-node-restart/environment/environment.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package environment
-
-import (
-	"strconv"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish-node-restart/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-//GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "")
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.AuxiliaryAppInfo = types.Getenv("AUXILIARY_APPINFO", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.IPMIIP = types.Getenv("IPMI_IP", "")
-	experimentDetails.User = types.Getenv("USER", "")
-	experimentDetails.Password = types.Getenv("PASSWORD", "")
-}
diff --git a/pkg/baremetal/redfish-node-restart/types/types.go b/pkg/baremetal/redfish-node-restart/types/types.go
deleted file mode 100644
index e7575b3..0000000
--- a/pkg/baremetal/redfish-node-restart/types/types.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package types
-
-import (
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName   string
-	EngineName       string
-	ChaosDuration    int
-	RampTime         int
-	TargetContainer  string
-	ChaosUID         clientTypes.UID
-	InstanceID       string
-	ChaosNamespace   string
-	ChaosPodName     string
-	AuxiliaryAppInfo string
-	Timeout          int
-	Delay            int
-	IPMIIP           string
-	User             string
-	Password         string
-}
diff --git a/pkg/baremetal/redfish/redfish.go b/pkg/baremetal/redfish/redfish.go
deleted file mode 100644
index 8bf5271..0000000
--- a/pkg/baremetal/redfish/redfish.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package redfish
-
-import (
-	"bytes"
-	"crypto/tls"
-	"encoding/base64"
-	"encoding/json"
-
-	"fmt"
-	"net/http"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-)
-
-// State helps get the power state of the node
-type State struct {
-	PowerState string
-}
-
-// GetNodeStatus will check and return the status of the node.
-func GetNodeStatus(IP, user, password string) (string, error) {
-	URL := fmt.Sprintf("https://%v/redfish/v1/Systems/System.Embedded.1/", IP)
-	auth := user + ":" + password
-	encodedAuth := base64.StdEncoding.EncodeToString([]byte(auth))
-	data := map[string]string{}
-	json_data, _ := json.Marshal(data)
-	req, err := http.NewRequest("GET", URL, bytes.NewBuffer(json_data))
-	if err != nil {
-		log.Errorf("Error creating HTTP get request, err: ", err)
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("failed to get the node status, err: %v", err)}
-	}
-	req.Header.Add("Authorization", "Basic "+encodedAuth)
-	req.Header.Add("Content-Type", "application/json")
-	req.Header.Add("Accept", "*/*")
-	tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
-	client := &http.Client{Transport: tr}
-	resp, err := client.Do(req)
-	if err != nil {
-		msg := fmt.Sprintf("Error creating post request: %v", err)
-		log.Error(msg)
-	}
-	log.Infof(resp.Status)
-	if resp.StatusCode != 200 {
-		log.Error("Unable to get current state of the node")
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("failed to get the node status. Request failed with status: %v", resp.StatusCode)}
-	}
-	defer resp.Body.Close()
-	power := new(State)
-	json.NewDecoder(resp.Body).Decode(power)
-	return power.PowerState, nil
-}
-
-// RebootNode triggers hard reset on the target baremetal node
-func RebootNode(URL, user, password string) error {
-	data := map[string]string{"ResetType": "ForceRestart"}
-	json_data, err := json.Marshal(data)
-	auth := user + ":" + password
-	encodedAuth := base64.StdEncoding.EncodeToString([]byte(auth))
-	if err != nil {
-		log.Error(err.Error())
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("unable to encode the authentication credentials, err: %v", err)}
-	}
-	req, err := http.NewRequest("POST", URL, bytes.NewBuffer(json_data))
-	if err != nil {
-		log.Errorf("Error creating HTTP post request, err: %v", err)
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("error creating http post request, err: %v", err)}
-	}
-	req.Header.Add("Authorization", "Basic "+encodedAuth)
-	req.Header.Add("Content-Type", "application/json")
-	req.Header.Add("Accept", "*/*")
-	tr := &http.Transport{
-		TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
-	}
-	client := &http.Client{Transport: tr}
-	resp, err := client.Do(req)
-	if err != nil {
-		log.Errorf("Error creating HTTP post request, err: %v", err)
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("error creating http post request, err: %v", err)}
-	}
-	log.Infof(resp.Status)
-	if resp.StatusCode >= 400 || resp.StatusCode < 200 {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Reason: fmt.Sprintf("failed to trigger node restart, received http status code %v", resp.StatusCode)}
-	}
-	defer resp.Body.Close()
-	return nil
-}
diff --git a/pkg/cassandra/liveness.go b/pkg/cassandra/liveness.go
deleted file mode 100644
index 21027fa..0000000
--- a/pkg/cassandra/liveness.go
+++ /dev/null
@@ -1,376 +0,0 @@
-package cassandra
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"net/http"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/pkg/errors"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/types"
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	"github.com/litmuschaos/litmus-go/pkg/utils/retry"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	appsv1 "k8s.io/api/apps/v1"
-	apiv1 "k8s.io/api/core/v1"
-
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// LivenessCheck will create an external liveness pod which will continuously check for the liveness of cassandra statefulset
-func LivenessCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, error) {
-
-	// Generate the run_id for the liveness pod
-	experimentsDetails.RunID = stringutils.GetRunID()
-
-	// Creating liveness deployment
-	if err := CreateLivenessPod(experimentsDetails, clients); err != nil {
-		return "", err
-	}
-
-	// Creating liveness service
-	if err := CreateLivenessService(experimentsDetails, clients); err != nil {
-		return "", err
-	}
-
-	// Checking the status of liveness deployment pod
-	log.Info("[Status]: Checking the status of the cassandra liveness pod")
-	if err := status.CheckApplicationStatusesByLabels(experimentsDetails.ChaoslibDetail.AppNS, "name=cassandra-liveness-deploy-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("liveness pod is not in running state, %s", err.Error())}
-	}
-
-	// Record cassandra liveness pod resource version
-	ResourceVersionBefore, err := GetLivenessPodResourceVersion(experimentsDetails, clients)
-	if err != nil {
-		return ResourceVersionBefore, cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("failed to get the pod resource version, %s", err.Error())}
-	}
-
-	return ResourceVersionBefore, nil
-}
-
-// LivenessCleanup will check the status of liveness pod cycle and wait till the cycle comes to the complete state
-// At last it removes/cleanup the liveness deploy and svc
-func LivenessCleanup(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, ResourceVersionBefore string) error {
-
-	// Getting ClusterIP
-	log.Info("[CleanUP]: Getting ClusterIP of liveness service")
-	ClusterIP, err := GetServiceClusterIP(experimentsDetails, clients)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the ClusterIP of liveness service, %s", err.Error())}
-	}
-
-	// Record cassandra liveness pod resource version after chaos
-	ResourceVersionAfter, err := GetLivenessPodResourceVersion(experimentsDetails, clients)
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the pod resource version, %s", err.Error())}
-	}
-
-	if err = ResourceVersionCheck(ResourceVersionBefore, ResourceVersionAfter); err != nil {
-		return err
-	}
-
-	if err = WaitTillCycleComplete(experimentsDetails, ClusterIP); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("cycle complete test failed, %s", err.Error())}
-	}
-
-	log.Info("[Cleanup]: Deleting cassandra liveness deployment & service")
-	if err = DeleteLivenessDeployment(experimentsDetails, clients); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("liveness deployment deletion failed, %s", err.Error())}
-	}
-	if err = DeleteLivenessService(experimentsDetails, clients); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("liveness service deletion failed, %s", err.Error())}
-	}
-
-	log.Info("[Cleanup]: Cassandra liveness service has been deleted successfully")
-
-	return nil
-}
-
-// GetLivenessPodResourceVersion will return the resource version of the liveness pod
-func GetLivenessPodResourceVersion(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, error) {
-
-	livenessPods, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cassandra-liveness-deploy-" + experimentsDetails.RunID})
-	if err != nil {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the liveness pod, %s", err.Error())}
-	} else if len(livenessPods.Items) == 0 {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "no liveness pod found with matching labels"}
-	}
-	ResourceVersion := livenessPods.Items[0].ResourceVersion
-
-	return ResourceVersion, nil
-}
-
-// GetServiceClusterIP will return the cluster IP of the liveness service
-func GetServiceClusterIP(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, error) {
-
-	service, err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).Get(context.Background(), "cassandra-liveness-service-"+experimentsDetails.RunID, metav1.GetOptions{})
-	if err != nil {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to fetch the liveness service, %s", err.Error())}
-	}
-
-	return service.Spec.ClusterIP, nil
-}
-
-// WaitTillCycleComplete will check the status of liveness pod cycle
-// Wait till the cycle come to the complete state
-func WaitTillCycleComplete(experimentsDetails *experimentTypes.ExperimentDetails, ClusterIP string) error {
-
-	port := strconv.Itoa(experimentsDetails.LivenessServicePort)
-	URL := "http://" + ClusterIP + ":" + port
-	log.Infof("The URL to check the status of liveness pod cycle, url: %v", URL)
-
-	return retry.
-		Times(uint(experimentsDetails.ChaoslibDetail.Timeout / experimentsDetails.ChaoslibDetail.Delay)).
-		Wait(time.Duration(experimentsDetails.ChaoslibDetail.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			response, err := http.Get(URL)
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("the HTTP request failed with error %s", err)}
-			}
-			data, _ := io.ReadAll(response.Body)
-			if !strings.Contains(string(data), "CycleComplete") {
-				log.Info("[Verification]: Wait for liveness pod to come in CycleComplete state")
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: "livenss pod is not in completed state"}
-			}
-			log.Info("Liveness pod comes to CycleComplete state")
-			return nil
-		})
-}
-
-// ResourceVersionCheck compare the resource version for target pods before and after chaos
-func ResourceVersionCheck(ResourceVersionBefore, ResourceVersionAfter string) error {
-
-	if ResourceVersionBefore != ResourceVersionAfter {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "liveness pod failed as target pod is unhealthy"}
-	}
-	log.Info("The cassandra cluster is active")
-
-	return nil
-}
-
-// DeleteLivenessDeployment deletes the livenes deployments and wait for its termination
-func DeleteLivenessDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-
-	deletePolicy := metav1.DeletePropagationForeground
-	if err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaoslibDetail.AppNS).Delete(context.Background(), "cassandra-liveness-deploy-"+experimentsDetails.RunID, metav1.DeleteOptions{
-		PropagationPolicy: &deletePolicy,
-	}); err != nil {
-		return err
-	}
-	return retry.
-		Times(uint(experimentsDetails.ChaoslibDetail.Timeout / experimentsDetails.ChaoslibDetail.Delay)).
-		Wait(time.Duration(experimentsDetails.ChaoslibDetail.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			podSpec, err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cassandra-liveness-deploy-" + experimentsDetails.RunID})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("liveness deployment is not deleted yet, %s", err.Error())}
-			} else if len(podSpec.Items) != 0 {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: "liveness pod is not deleted yet"}
-			}
-			return nil
-		})
-}
-
-// DeleteLivenessService deletes the liveness service and wait for its termination
-func DeleteLivenessService(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-
-	deletePolicy := metav1.DeletePropagationForeground
-	if err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).Delete(context.Background(), "cassandra-liveness-service-"+experimentsDetails.RunID, metav1.DeleteOptions{
-		PropagationPolicy: &deletePolicy,
-	}); err != nil {
-		return errors.Errorf("fail to delete liveness service, %s", err.Error())
-	}
-	return retry.
-		Times(uint(experimentsDetails.ChaoslibDetail.Timeout / experimentsDetails.ChaoslibDetail.Delay)).
-		Wait(time.Duration(experimentsDetails.ChaoslibDetail.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			svc, err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=cassandra-liveness-service-" + experimentsDetails.RunID})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("liveness service is not deleted yet, %s", err.Error())}
-			} else if len(svc.Items) != 0 {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: "liveness service is not deleted yet"}
-			}
-			return nil
-		})
-}
-
-// CreateLivenessPod will create a cassandra liveness deployment
-func CreateLivenessPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-
-	// Create liveness deploy
-	liveness := &appsv1.Deployment{
-		TypeMeta: metav1.TypeMeta{
-			Kind:       "Deployment",
-			APIVersion: "apps/v1",
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Name: "cassandra-liveness-deploy-" + experimentsDetails.RunID,
-			Labels: map[string]string{
-				"name": "cassandra-liveness-deploy-" + experimentsDetails.RunID,
-			},
-		},
-		Spec: appsv1.DeploymentSpec{
-			Replicas: func(i int32) *int32 { return &i }(1),
-			Selector: &metav1.LabelSelector{
-				MatchLabels: map[string]string{
-					"name": "cassandra-liveness-deploy-" + experimentsDetails.RunID,
-				},
-			},
-			Template: apiv1.PodTemplateSpec{
-				ObjectMeta: metav1.ObjectMeta{
-					Labels: map[string]string{
-						"name": "cassandra-liveness-deploy-" + experimentsDetails.RunID,
-					},
-				},
-				Spec: apiv1.PodSpec{
-					Volumes: []apiv1.Volume{
-						{
-							Name: "status-volume",
-							VolumeSource: apiv1.VolumeSource{
-								EmptyDir: &apiv1.EmptyDirVolumeSource{},
-							},
-						},
-					},
-					Containers: []apiv1.Container{
-						{
-							Name:  "liveness-business-logic",
-							Image: experimentsDetails.CassandraLivenessImage,
-							Command: []string{
-								"/bin/bash",
-							},
-							Args: []string{
-								"-c",
-								"bash cassandra-liveness-check.sh",
-							},
-							Env: []apiv1.EnvVar{
-								{
-									Name:  "LIVENESS_PERIOD_SECONDS",
-									Value: "10",
-								},
-								{
-									Name:  "LIVENESS_TIMEOUT_SECONDS",
-									Value: "10",
-								},
-								{
-									Name:  "LIVENESS_RETRY_COUNT",
-									Value: "10",
-								},
-								{
-									Name:  "CASSANDRA_SVC_NAME",
-									Value: experimentsDetails.CassandraServiceName,
-								},
-								{
-									Name:  "REPLICATION_FACTOR",
-									Value: experimentsDetails.KeySpaceReplicaFactor,
-								},
-								{
-									Name:  "CASSANDRA_PORT",
-									Value: strconv.Itoa(experimentsDetails.CassandraPort),
-								},
-							},
-							Resources: apiv1.ResourceRequirements{},
-							VolumeMounts: []apiv1.VolumeMount{
-								{
-									Name:      "status-volume",
-									MountPath: "/var/tmp",
-								},
-							},
-							ImagePullPolicy: apiv1.PullPolicy("Always"),
-						},
-						{
-							Name:  "webserver",
-							Image: experimentsDetails.CassandraLivenessImage,
-							Command: []string{
-								"/bin/bash",
-							},
-							Args: []string{
-								"-c",
-								"bash webserver.sh",
-							},
-							Ports: []apiv1.ContainerPort{
-								{
-									HostPort:      0,
-									ContainerPort: int32(experimentsDetails.LivenessServicePort),
-								},
-							},
-							Env: []apiv1.EnvVar{
-								{
-									Name:  "INIT_WAIT_SECONDS",
-									Value: "10",
-								},
-								{
-									Name:  "LIVENESS_SVC_PORT",
-									Value: strconv.Itoa(experimentsDetails.LivenessServicePort),
-								},
-							},
-							Resources: apiv1.ResourceRequirements{},
-							VolumeMounts: []apiv1.VolumeMount{
-								{
-									Name:      "status-volume",
-									MountPath: "/var/tmp",
-								},
-							},
-							ImagePullPolicy: apiv1.PullPolicy("Always"),
-						},
-					},
-				},
-			},
-			Strategy:        appsv1.DeploymentStrategy{},
-			MinReadySeconds: 0,
-		},
-	}
-
-	// Creating liveness deployment
-	_, err := clients.KubeClient.AppsV1().Deployments(experimentsDetails.ChaoslibDetail.AppNS).Create(context.Background(), liveness, metav1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{deploymentName: %s, namespace: %s}", liveness.Name, liveness.Namespace), Reason: fmt.Sprintf("unable to create liveness deployment, %s", err.Error())}
-	}
-	log.Info("Liveness Deployment Created successfully!")
-	return nil
-}
-
-// CreateLivenessService will create Cassandra liveness service
-func CreateLivenessService(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-	// Create resource object
-	livenessSvc := &apiv1.Service{
-		TypeMeta: metav1.TypeMeta{
-			Kind:       "Service",
-			APIVersion: "v1",
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Name: "cassandra-liveness-service-" + experimentsDetails.RunID,
-			Labels: map[string]string{
-				"name": "cassandra-liveness-service-" + experimentsDetails.RunID,
-			},
-		},
-		Spec: apiv1.ServiceSpec{
-			Ports: []apiv1.ServicePort{
-				{
-					Name:     "liveness",
-					Protocol: apiv1.Protocol("TCP"),
-					Port:     int32(experimentsDetails.LivenessServicePort),
-				},
-			},
-			Selector: map[string]string{
-				"name": "cassandra-liveness-deploy-" + experimentsDetails.RunID,
-			},
-			HealthCheckNodePort: 0,
-		},
-	}
-
-	// Creating liveness service
-	_, err := clients.KubeClient.CoreV1().Services(experimentsDetails.ChaoslibDetail.AppNS).Create(context.Background(), livenessSvc, metav1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Target: fmt.Sprintf("{serviceName: %s, namespace: %s}", livenessSvc.Name, livenessSvc.Namespace), Reason: fmt.Sprintf("unable to create liveness service, %s", err.Error())}
-	}
-	log.Info("Liveness service created successfully!")
-
-	return nil
-}
diff --git a/pkg/cassandra/node-tools.go b/pkg/cassandra/node-tools.go
deleted file mode 100644
index bbd6252..0000000
--- a/pkg/cassandra/node-tools.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package cassandra
-
-import (
-	"context"
-	"fmt"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec"
-	"github.com/pkg/errors"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/types"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// NodeToolStatusCheck checks for the distribution of the load on the ring
-func NodeToolStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-
-	// Getting application pod list
-	targetPodName, err := GetApplicationPodName(experimentsDetails, clients)
-	if err != nil {
-		return err
-	}
-	log.Infof("[NodeToolStatus]: Selecting %v pod for running `nodetool status` command", targetPodName)
-
-	replicaCount, err := GetApplicationReplicaCount(experimentsDetails, clients)
-	if err != nil {
-		return errors.Errorf("unable to get app replica count, err: %v", err)
-	}
-	log.Info("[Check]: Checking for the distribution of load on the ring")
-
-	// Get the load percentage on the application pod
-	loadPercentage, err := GetLoadDistribution(experimentsDetails, clients, targetPodName)
-	if err != nil {
-		return errors.Errorf("failed to get load percentage, err: %v", err)
-	}
-
-	// Check the load percentage
-	if err = CheckLoadPercentage(loadPercentage, replicaCount); err != nil {
-		return errors.Errorf("load percentage check failed, err: %v", err)
-	}
-
-	return nil
-}
-
-// GetApplicationPodName will return the name of first application pod
-func GetApplicationPodName(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, error) {
-	podList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.ChaoslibDetail.AppLabel})
-	if err != nil {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace, err: %v", experimentsDetails.ChaoslibDetail.AppNS, err)}
-	} else if len(podList.Items) == 0 {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace", experimentsDetails.ChaoslibDetail.AppNS)}
-	}
-
-	return podList.Items[0].Name, nil
-}
-
-// GetApplicationReplicaCount will return the replica count of the sts application
-func GetApplicationReplicaCount(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (int, error) {
-	podList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.ChaoslibDetail.AppLabel})
-	if err != nil {
-		return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace, err: %v", experimentsDetails.ChaoslibDetail.AppNS, err)}
-	} else if len(podList.Items) == 0 {
-		return 0, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("failed to get the application pod in %v namespace", experimentsDetails.ChaoslibDetail.AppNS)}
-	}
-	return len(podList.Items), nil
-}
-
-// CheckLoadPercentage checks the load percentage on every replicas
-func CheckLoadPercentage(loadPercentage []string, replicaCount int) error {
-
-	// It will make sure that the replica have some load
-	// It will fail if replica has 0% load
-	if len(loadPercentage) != replicaCount {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "failed to get the load on all of the replicas"}
-	}
-
-	for count := 0; count < len(loadPercentage); count++ {
-
-		if loadPercentage[count] == "0%" || loadPercentage[count] == "" {
-			return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("the load distribution percentage failed, as its value is: '%v'", loadPercentage[count])}
-		}
-	}
-	log.Info("[Check]: Load is distributed over all the replica of cassandra")
-
-	return nil
-}
-
-// GetLoadDistribution will get the load distribution on all the replicas of the application pod in an array formats
-func GetLoadDistribution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, targetPod string) ([]string, error) {
-
-	// It will contains all the pod & container details required for exec command
-	execCommandDetails := litmusexec.PodDetails{}
-
-	command := append([]string{"/bin/sh", "-c"}, "nodetool status  | awk '{print $6}' | tail -n +6 | head -n -1")
-	litmusexec.SetExecCommandAttributes(&execCommandDetails, targetPod, "cassandra", experimentsDetails.ChaoslibDetail.AppNS)
-	response, _, err := litmusexec.Exec(&execCommandDetails, clients, command)
-	if err != nil {
-		return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to get nodetool status details, err: %v", err)}
-	}
-	split := strings.Split(response, "\n")
-	loadPercentage := split[:len(split)-1]
-
-	return loadPercentage, nil
-}
diff --git a/pkg/cassandra/pod-delete/environment/environment.go b/pkg/cassandra/pod-delete/environment/environment.go
deleted file mode 100644
index b081cc7..0000000
--- a/pkg/cassandra/pod-delete/environment/environment.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package environment
-
-import (
-	"strconv"
-
-	cassandraTypes "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/types"
-	exp "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-//GetENV fetches all the env variables from the runner pod
-func GetENV(cassandraDetails *cassandraTypes.ExperimentDetails) {
-
-	var ChaoslibDetail exp.ExperimentDetails
-
-	ChaoslibDetail.ExperimentName = types.Getenv("EXPERIMENT_NAME", "cassandra-pod-delete")
-	ChaoslibDetail.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	ChaoslibDetail.EngineName = types.Getenv("CHAOSENGINE", "")
-	ChaoslibDetail.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	ChaoslibDetail.ChaosInterval = types.Getenv("CHAOS_INTERVAL", "10")
-	ChaoslibDetail.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	ChaoslibDetail.ChaosServiceAccount = types.Getenv("CHAOS_SERVICE_ACCOUNT", "")
-	ChaoslibDetail.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	ChaoslibDetail.InstanceID = types.Getenv("INSTANCE_ID", "")
-	ChaoslibDetail.ChaosPodName = types.Getenv("POD_NAME", "")
-	ChaoslibDetail.TargetContainer = types.Getenv("TARGET_CONTAINER", "")
-	ChaoslibDetail.Force, _ = strconv.ParseBool(types.Getenv("FORCE", "false"))
-	ChaoslibDetail.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	ChaoslibDetail.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	ChaoslibDetail.PodsAffectedPerc = types.Getenv("PODS_AFFECTED_PERC", "0")
-	ChaoslibDetail.Sequence = types.Getenv("SEQUENCE", "parallel")
-	cassandraDetails.ChaoslibDetail = &ChaoslibDetail
-	cassandraDetails.CassandraServiceName = types.Getenv("CASSANDRA_SVC_NAME", "")
-	cassandraDetails.KeySpaceReplicaFactor = types.Getenv("KEYSPACE_REPLICATION_FACTOR", "")
-	cassandraDetails.CassandraPort, _ = strconv.Atoi(types.Getenv("CASSANDRA_PORT", "9042"))
-	cassandraDetails.LivenessServicePort, _ = strconv.Atoi(types.Getenv("LIVENESS_SVC_PORT", "8088"))
-	cassandraDetails.CassandraLivenessImage = types.Getenv("CASSANDRA_LIVENESS_IMAGE", "litmuschaos/cassandra-client:latest")
-	cassandraDetails.CassandraLivenessCheck = types.Getenv("CASSANDRA_LIVENESS_CHECK", "")
-	cassandraDetails.RunID = types.Getenv("RunID", "")
-
-	ChaoslibDetail.AppNS, ChaoslibDetail.AppKind, ChaoslibDetail.AppLabel = getAppDetails()
-}
-
-func getAppDetails() (string, string, string) {
-	targets := types.Getenv("TARGETS", "")
-	app := types.GetTargets(targets)
-	if len(app) != 0 {
-		return app[0].Namespace, app[0].Kind, app[0].Labels[0]
-	}
-	return "", "", ""
-}
diff --git a/pkg/cassandra/pod-delete/types/types.go b/pkg/cassandra/pod-delete/types/types.go
deleted file mode 100644
index e59e02a..0000000
--- a/pkg/cassandra/pod-delete/types/types.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package types
-
-import (
-	exp "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ChaoslibDetail         *exp.ExperimentDetails
-	CassandraServiceName   string
-	KeySpaceReplicaFactor  string
-	CassandraPort          int
-	LivenessServicePort    int
-	CassandraLivenessImage string
-	CassandraLivenessCheck string
-	RunID                  string
-	Sequence               string
-}
diff --git a/pkg/gcp/gcp-vm-disk-loss/environment/environment.go b/pkg/gcp/gcp-vm-disk-loss/environment/environment.go
deleted file mode 100644
index 610dc2d..0000000
--- a/pkg/gcp/gcp-vm-disk-loss/environment/environment.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package environment
-
-import (
-	"strconv"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "")
-	experimentDetails.GCPProjectID = types.Getenv("GCP_PROJECT_ID", "")
-	experimentDetails.DiskVolumeNames = types.Getenv("DISK_VOLUME_NAMES", "")
-	experimentDetails.DiskVolumeLabel = types.Getenv("DISK_VOLUME_LABEL", "")
-	experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel")
-	experimentDetails.Zones = types.Getenv("ZONES", "")
-	experimentDetails.DiskAffectedPerc, _ = strconv.Atoi(types.Getenv("DISK_AFFECTED_PERC", "0"))
-}
diff --git a/pkg/gcp/gcp-vm-disk-loss/types/types.go b/pkg/gcp/gcp-vm-disk-loss/types/types.go
deleted file mode 100644
index a41c172..0000000
--- a/pkg/gcp/gcp-vm-disk-loss/types/types.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package types
-
-import (
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName              string
-	EngineName                  string
-	ChaosDuration               int
-	ChaosInterval               int
-	RampTime                    int
-	ChaosUID                    clientTypes.UID
-	InstanceID                  string
-	ChaosNamespace              string
-	ChaosPodName                string
-	Timeout                     int
-	Delay                       int
-	Sequence                    string
-	TargetContainer             string
-	GCPProjectID                string
-	DiskVolumeNames             string
-	Zones                       string
-	DiskVolumeLabel             string
-	TargetDiskVolumeNamesList   []string
-	TargetDiskInstanceNamesList []string
-	DiskAffectedPerc            int
-	DeviceNamesList             []string
-}
diff --git a/pkg/gcp/gcp-vm-instance-stop/environment/environment.go b/pkg/gcp/gcp-vm-instance-stop/environment/environment.go
deleted file mode 100644
index e8b49a1..0000000
--- a/pkg/gcp/gcp-vm-instance-stop/environment/environment.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package environment
-
-import (
-	"strconv"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "")
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "")
-	experimentDetails.VMInstanceName = types.Getenv("VM_INSTANCE_NAMES", "")
-	experimentDetails.GCPProjectID = types.Getenv("GCP_PROJECT_ID", "")
-	experimentDetails.Zones = types.Getenv("ZONES", "")
-	experimentDetails.ManagedInstanceGroup = types.Getenv("MANAGED_INSTANCE_GROUP", "disable")
-	experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel")
-	experimentDetails.InstanceLabel = types.Getenv("INSTANCE_LABEL", "")
-	experimentDetails.InstanceAffectedPerc, _ = strconv.Atoi(types.Getenv("INSTANCE_AFFECTED_PERC", "0"))
-}
diff --git a/pkg/gcp/gcp-vm-instance-stop/types/types.go b/pkg/gcp/gcp-vm-instance-stop/types/types.go
deleted file mode 100644
index d66da07..0000000
--- a/pkg/gcp/gcp-vm-instance-stop/types/types.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package types
-
-import (
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName           string
-	EngineName               string
-	ChaosDuration            int
-	ChaosInterval            int
-	RampTime                 int
-	ChaosUID                 clientTypes.UID
-	InstanceID               string
-	ChaosNamespace           string
-	ChaosPodName             string
-	Timeout                  int
-	Delay                    int
-	VMInstanceName           string
-	GCPProjectID             string
-	Zones                    string
-	ManagedInstanceGroup     string
-	Sequence                 string
-	TargetContainer          string
-	InstanceLabel            string
-	InstanceAffectedPerc     int
-	TargetVMInstanceNameList []string
-}
diff --git a/pkg/kafka/environment/environment.go b/pkg/kafka/environment/environment.go
deleted file mode 100644
index c7c88ab..0000000
--- a/pkg/kafka/environment/environment.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package environment
-
-import (
-	"strconv"
-
-	exp "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types"
-	kafkaTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(kafkaDetails *kafkaTypes.ExperimentDetails) {
-
-	var ChaoslibDetail exp.ExperimentDetails
-
-	ChaoslibDetail.ExperimentName = types.Getenv("EXPERIMENT_NAME", "kafka-broker-pod-failure")
-	ChaoslibDetail.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	ChaoslibDetail.EngineName = types.Getenv("CHAOSENGINE", "")
-	ChaoslibDetail.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "60"))
-	ChaoslibDetail.ChaosInterval = types.Getenv("CHAOS_INTERVAL", "10")
-	ChaoslibDetail.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	ChaoslibDetail.ChaosServiceAccount = types.Getenv("CHAOS_SERVICE_ACCOUNT", "")
-	ChaoslibDetail.TargetContainer = types.Getenv("TARGET_CONTAINER", "")
-	ChaoslibDetail.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	ChaoslibDetail.InstanceID = types.Getenv("INSTANCE_ID", "")
-	ChaoslibDetail.ChaosPodName = types.Getenv("POD_NAME", "")
-	ChaoslibDetail.Sequence = types.Getenv("SEQUENCE", "parallel")
-	ChaoslibDetail.PodsAffectedPerc = types.Getenv("PODS_AFFECTED_PERC", "0")
-	ChaoslibDetail.Force, _ = strconv.ParseBool(types.Getenv("FORCE", "true"))
-	ChaoslibDetail.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	ChaoslibDetail.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-
-	ChaoslibDetail.AppNS, ChaoslibDetail.AppKind, ChaoslibDetail.AppLabel = getAppDetails()
-
-	kafkaDetails.ChaoslibDetail = &ChaoslibDetail
-	kafkaDetails.KafkaKind = types.Getenv("KAFKA_KIND", "statefulset")
-	kafkaDetails.KafkaLivenessStream = types.Getenv("KAFKA_LIVENESS_STREAM", "enable")
-	kafkaDetails.KafkaLivenessImage = types.Getenv("KAFKA_LIVENESS_IMAGE", "litmuschaos/kafka-client:latest")
-	kafkaDetails.KafkaConsumerTimeout, _ = strconv.Atoi(types.Getenv("KAFKA_CONSUMER_TIMEOUT", "60000"))
-	kafkaDetails.KafkaInstanceName = types.Getenv("KAFKA_INSTANCE_NAME", "")
-	kafkaDetails.KafkaNamespace = types.Getenv("KAFKA_NAMESPACE", "default")
-	kafkaDetails.KafkaLabel = types.Getenv("KAFKA_LABEL", "")
-	kafkaDetails.KafkaBroker = types.Getenv("KAFKA_BROKER", "")
-	kafkaDetails.KafkaRepliationFactor = types.Getenv("KAFKA_REPLICATION_FACTOR", "")
-	kafkaDetails.KafkaService = types.Getenv("KAFKA_SERVICE", "")
-	kafkaDetails.KafkaPort = types.Getenv("KAFKA_PORT", "9092")
-	kafkaDetails.ZookeeperNamespace = types.Getenv("ZOOKEEPER_NAMESPACE", "")
-	kafkaDetails.ZookeeperLabel = types.Getenv("ZOOKEEPER_LABEL", "")
-	kafkaDetails.ZookeeperService = types.Getenv("ZOOKEEPER_SERVICE", "")
-	kafkaDetails.ZookeeperPort = types.Getenv("ZOOKEEPER_PORT", "")
-	kafkaDetails.RunID = types.Getenv("RunID", "")
-
-}
-
-func getAppDetails() (string, string, string) {
-	targets := types.Getenv("TARGETS", "")
-	app := types.GetTargets(targets)
-	if len(app) != 0 {
-		return app[0].Namespace, app[0].Kind, app[0].Labels[0]
-	}
-	return "", "", ""
-}
diff --git a/pkg/kafka/kafka-cluster-health.go b/pkg/kafka/kafka-cluster-health.go
deleted file mode 100644
index bc3e004..0000000
--- a/pkg/kafka/kafka-cluster-health.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package kafka
-
-import (
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-)
-
-// ClusterHealthCheck checks health of the kafka cluster
-func ClusterHealthCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-
-	// Checking Kafka pods status
-	log.Info("[Status]: Verify that all the kafka pods are running")
-	if err := status.CheckApplicationStatusesByLabels(experimentsDetails.KafkaNamespace, experimentsDetails.KafkaLabel, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil {
-		return err
-	}
-
-	// Checking zookeeper pods status
-	log.Info("[Status]: Verify that all the zookeeper pods are running")
-	return status.CheckApplicationStatusesByLabels(experimentsDetails.ZookeeperNamespace, experimentsDetails.ZookeeperLabel, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients)
-}
-
-// DisplayKafkaBroker displays the kafka broker info
-func DisplayKafkaBroker(experimentsDetails *experimentTypes.ExperimentDetails) {
-
-	if experimentsDetails.KafkaBroker != "" {
-		log.Infof("[Info]: Kafka broker pod for deletion is %v", experimentsDetails.KafkaBroker)
-	} else {
-		log.Info("[Info]: kafka broker will be selected randomly across the cluster")
-	}
-}
diff --git a/pkg/kafka/kafka-liveness-cleanup.go b/pkg/kafka/kafka-liveness-cleanup.go
deleted file mode 100644
index 79a71e6..0000000
--- a/pkg/kafka/kafka-liveness-cleanup.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package kafka
-
-import (
-	"context"
-	"fmt"
-	"time"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types"
-	"github.com/litmuschaos/litmus-go/pkg/utils/retry"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// LivenessCleanup deletes the kafka liveness pod
-func LivenessCleanup(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error {
-
-	if err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).Delete(context.Background(), "kafka-liveness-"+experimentsDetails.RunID, metav1.DeleteOptions{}); err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("fail to delete liveness deployment, %s", err.Error())}
-	}
-
-	return retry.
-		Times(uint(experimentsDetails.ChaoslibDetail.Timeout / experimentsDetails.ChaoslibDetail.Delay)).
-		Wait(time.Duration(experimentsDetails.ChaoslibDetail.Delay) * time.Second).
-		Try(func(attempt uint) error {
-			podSpec, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaoslibDetail.AppNS).List(context.Background(), metav1.ListOptions{LabelSelector: "name=kafka-liveness-" + experimentsDetails.RunID})
-			if err != nil {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: fmt.Sprintf("liveness pod is not deleted yet, %s", err.Error())}
-			} else if len(podSpec.Items) != 0 {
-				return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosRevert, Reason: "liveness pod is not deleted yet"}
-			}
-			return nil
-		})
-}
diff --git a/pkg/kafka/kafka-liveness-stream.go b/pkg/kafka/kafka-liveness-stream.go
deleted file mode 100644
index 80ef70b..0000000
--- a/pkg/kafka/kafka-liveness-stream.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package kafka
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/litmuschaos/litmus-go/pkg/cerrors"
-	"github.com/litmuschaos/litmus-go/pkg/clients"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types"
-	"github.com/litmuschaos/litmus-go/pkg/log"
-	"github.com/litmuschaos/litmus-go/pkg/status"
-	litmusexec "github.com/litmuschaos/litmus-go/pkg/utils/exec"
-	"github.com/litmuschaos/litmus-go/pkg/utils/stringutils"
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// LivenessStream generates kafka liveness pod, which continuously validate the liveness of kafka brokers
-// and derive the kafka topic leader(candidate for the deletion)
-func LivenessStream(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) (string, error) {
-	var ordinality string
-	var err error
-
-	// Generate a random string as suffix to topic name
-	log.Info("[Liveness]: Set the kafka topic name")
-	experimentsDetails.RunID = stringutils.GetRunID()
-	KafkaTopicName := "topic-" + experimentsDetails.RunID
-
-	log.Info("[Liveness]: Creating the kafka liveness pod")
-	if err := CreateLivenessPod(experimentsDetails, KafkaTopicName, clients); err != nil {
-		return "", err
-	}
-
-	log.Info("[Liveness]: Confirm that the kafka liveness pod is running")
-	if err := status.CheckApplicationStatusesByLabels(experimentsDetails.KafkaNamespace, "name=kafka-liveness-"+experimentsDetails.RunID, experimentsDetails.ChaoslibDetail.Timeout, experimentsDetails.ChaoslibDetail.Delay, clients); err != nil {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("liveness pod status check failed, err: %v", err)}
-	}
-
-	log.Info("[Liveness]: Obtain the leader broker ordinality for the topic (partition) created by kafka-liveness")
-	if experimentsDetails.KafkaInstanceName == "" {
-
-		execCommandDetails := litmusexec.PodDetails{}
-		command := append([]string{"/bin/sh", "-c"}, "kafka-topics --topic topic-"+experimentsDetails.RunID+" --describe --zookeeper "+experimentsDetails.ZookeeperService+":"+experimentsDetails.ZookeeperPort+" | grep -o 'Leader: [^[:space:]]*' | awk '{print $2}'")
-		litmusexec.SetExecCommandAttributes(&execCommandDetails, "kafka-liveness-"+experimentsDetails.RunID, "kafka-consumer", experimentsDetails.KafkaNamespace)
-		ordinality, _, err = litmusexec.Exec(&execCommandDetails, clients, command)
-		if err != nil {
-			return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to get ordinality details, err: %v", err)}
-		}
-	} else {
-		// It will contains all the pod & container details required for exec command
-		execCommandDetails := litmusexec.PodDetails{}
-
-		command := append([]string{"/bin/sh", "-c"}, "kafka-topics --topic topic-"+experimentsDetails.RunID+" --describe --zookeeper "+experimentsDetails.ZookeeperService+":"+experimentsDetails.ZookeeperPort+"/"+experimentsDetails.KafkaInstanceName+" | grep -o 'Leader: [^[:space:]]*' | awk '{print $2}'")
-		litmusexec.SetExecCommandAttributes(&execCommandDetails, "kafka-liveness-"+experimentsDetails.RunID, "kafka-consumer", experimentsDetails.KafkaNamespace)
-		ordinality, _, err = litmusexec.Exec(&execCommandDetails, clients, command)
-		if err != nil {
-			return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to get ordinality details, err: %v", err)}
-		}
-	}
-
-	log.Info("[Liveness]: Determine the leader broker pod name")
-	podList, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.KafkaNamespace).List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.KafkaLabel})
-	if err != nil {
-		return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to find the pods with matching labels, err: %v", err)}
-	}
-
-	for _, pod := range podList.Items {
-		if strings.ContainsAny(pod.Name, ordinality) {
-			return pod.Name, nil
-		}
-	}
-
-	return "", cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("no kafka pod found with %v ordinality", ordinality)}
-}
-
-// CreateLivenessPod creates the kafka liveness pod
-func CreateLivenessPod(experimentsDetails *experimentTypes.ExperimentDetails, KafkaTopicName string, clients clients.ClientSets) error {
-
-	LivenessPod := &corev1.Pod{
-		TypeMeta: metav1.TypeMeta{
-			Kind:       "Pod",
-			APIVersion: "v1",
-		},
-		ObjectMeta: metav1.ObjectMeta{
-			Name: "kafka-liveness-" + experimentsDetails.RunID,
-			Labels: map[string]string{
-				"app":                       "kafka-liveness",
-				"name":                      "kafka-liveness-" + experimentsDetails.RunID,
-				"app.kubernetes.io/part-of": "litmus",
-			},
-		},
-		Spec: corev1.PodSpec{
-			InitContainers: []corev1.Container{
-				{
-					Name:  "kafka-topic-creator",
-					Image: experimentsDetails.KafkaLivenessImage,
-					Command: []string{
-						"sh",
-						"-c",
-						"./topic.sh",
-					},
-					Env: []corev1.EnvVar{
-						{
-							Name:  "TOPIC_NAME",
-							Value: KafkaTopicName,
-						},
-						{
-							Name:  "KAFKA_INSTANCE_NAME",
-							Value: experimentsDetails.KafkaInstanceName,
-						},
-						{
-							Name:  "ZOOKEEPER_SERVICE",
-							Value: experimentsDetails.ZookeeperService,
-						},
-						{
-							Name:  "ZOOKEEPER_PORT",
-							Value: experimentsDetails.ZookeeperPort,
-						},
-						{
-							Name:  "REPLICATION_FACTOR",
-							Value: experimentsDetails.KafkaRepliationFactor,
-						},
-					},
-					ImagePullPolicy: corev1.PullPolicy("Always"),
-				},
-			},
-			Containers: []corev1.Container{
-				{
-					Name:  "kafka-producer",
-					Image: experimentsDetails.KafkaLivenessImage,
-					Command: []string{
-						"sh",
-						"-c",
-						"stdbuf -oL ./producer.sh",
-					},
-					Env: []corev1.EnvVar{
-						{
-							Name:  "TOPIC_NAME",
-							Value: KafkaTopicName,
-						},
-						{
-							Name:  "KAFKA_SERVICE",
-							Value: experimentsDetails.KafkaService,
-						},
-						{
-							Name:  "KAFKA_PORT",
-							Value: experimentsDetails.KafkaPort,
-						},
-					},
-					ImagePullPolicy: corev1.PullPolicy("Always"),
-				},
-				{
-					Name:  "kafka-consumer",
-					Image: experimentsDetails.KafkaLivenessImage,
-					Command: []string{
-						"sh",
-						"-c",
-						"stdbuf -oL ./consumer.sh",
-					},
-					Env: []corev1.EnvVar{
-						{
-							Name:  "KAFKA_CONSUMER_TIMEOUT",
-							Value: strconv.Itoa(experimentsDetails.KafkaConsumerTimeout),
-						},
-						{
-							Name:  "TOPIC_NAME",
-							Value: KafkaTopicName,
-						},
-						{
-							Name:  "KAFKA_SERVICE",
-							Value: experimentsDetails.KafkaService,
-						},
-						{
-							Name:  "KAFKA_PORT",
-							Value: experimentsDetails.KafkaPort,
-						},
-					},
-					ImagePullPolicy: corev1.PullPolicy("Always"),
-				},
-			},
-			RestartPolicy: corev1.RestartPolicy("Never"),
-		},
-	}
-
-	_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.KafkaNamespace).Create(context.Background(), LivenessPod, metav1.CreateOptions{})
-	if err != nil {
-		return cerrors.Error{ErrorCode: cerrors.ErrorTypeStatusChecks, Reason: fmt.Sprintf("unable to create liveness pod, err: %v", err)}
-	}
-	return nil
-}
diff --git a/pkg/kafka/types/types.go b/pkg/kafka/types/types.go
deleted file mode 100644
index a543070..0000000
--- a/pkg/kafka/types/types.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package types
-
-import (
-	exp "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ChaoslibDetail        *exp.ExperimentDetails
-	ExperimentName        string
-	KafkaKind             string
-	KafkaLivenessStream   string
-	KafkaLivenessImage    string
-	KafkaConsumerTimeout  int
-	KafkaInstanceName     string
-	KafkaNamespace        string
-	KafkaLabel            string
-	KafkaBroker           string
-	KafkaRepliationFactor string
-	KafkaService          string
-	KafkaPort             string
-	ZookeeperNamespace    string
-	ZookeeperLabel        string
-	ZookeeperService      string
-	ZookeeperPort         string
-	RunID                 string
-}
diff --git a/pkg/spring-boot/spring-boot-chaos/environment/environment.go b/pkg/spring-boot/spring-boot-chaos/environment/environment.go
deleted file mode 100644
index 5ba3e0f..0000000
--- a/pkg/spring-boot/spring-boot-chaos/environment/environment.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package environment
-
-import (
-	"encoding/json"
-	"strconv"
-	"strings"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/spring-boot/spring-boot-chaos/types"
-	"github.com/litmuschaos/litmus-go/pkg/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails, expName string) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", expName)
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0"))
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.TargetContainer = types.Getenv("TARGET_CONTAINER", "")
-	experimentDetails.TargetPods = types.Getenv("TARGET_PODS", "")
-	experimentDetails.PodsAffectedPerc, _ = strconv.Atoi(types.Getenv("PODS_AFFECTED_PERC", "0"))
-	experimentDetails.Sequence = types.Getenv("SEQUENCE", "serial")
-
-	// Chaos monkey assault parameters
-	experimentDetails.ChaosMonkeyPath = types.Getenv("CM_PATH", "/actuator/chaosmonkey")
-	experimentDetails.ChaosMonkeyPort = types.Getenv("CM_PORT", "8080")
-
-	level, _ := strconv.Atoi(types.Getenv("CM_LEVEL", "1"))
-	watchedCustomServices := strings.Split(types.Getenv("CM_WATCHED_CUSTOM_SERVICES", ""), ",")
-	commonAssaults := experimentTypes.CommonAssault{
-		Level:                 level,
-		Deterministic:         true,
-		WatchedCustomServices: watchedCustomServices,
-	}
-
-	latency, _ := strconv.Atoi(types.Getenv("LATENCY", "2000"))
-	memoryFillPercentage, _ := strconv.ParseFloat(types.Getenv("MEMORY_FILL_FRACTION", "0.7"), 64)
-	cpuLoadTargetFraction, _ := strconv.ParseFloat(types.Getenv("CPU_LOAD_FRACTION", "0.9"), 64)
-	exceptionAssault := getExceptionAssault()
-
-	switch expName {
-	case "spring-boot-faults":
-		//inject all spring boot faults
-
-		assault := experimentTypes.AllAssault{
-			CommonAssault: commonAssaults,
-		}
-		assault.KillApplicationActive, _ = strconv.ParseBool(types.Getenv("CM_KILL_APPLICATION_ACTIVE", "false"))
-		assault.KillApplicationCron = "*/1 * * * * ?"
-
-		assault.LatencyActive, _ = strconv.ParseBool(types.Getenv("CM_LATENCY_ACTIVE", "false"))
-		assault.LatencyRangeStart = latency
-		assault.LatencyRangeEnd = latency
-
-		assault.MemoryActive, _ = strconv.ParseBool(types.Getenv("CM_MEMORY_ACTIVE", "false"))
-		assault.MemoryMillisecondsHoldFilledMemory = experimentDetails.ChaosDuration * 1000
-		assault.MemoryMillisecondsWaitNextIncrease = 1000
-		assault.MemoryFillIncrementFraction = 1.0
-		assault.MemoryCron = "*/1 * * * * ?"
-		assault.MemoryFillTargetFraction = memoryFillPercentage
-
-		assault.CPUActive, _ = strconv.ParseBool(types.Getenv("CM_CPU_ACTIVE", "false"))
-		assault.CPUMillisecondsHoldLoad = experimentDetails.ChaosDuration * 1000
-		assault.CPULoadTargetFraction = cpuLoadTargetFraction
-		assault.CPUCron = "*/1 * * * * ?"
-
-		assault.ExceptionsActive, _ = strconv.ParseBool(types.Getenv("CM_EXCEPTIONS_ACTIVE", "false"))
-		assault.Exception = exceptionAssault
-
-		experimentDetails.ChaosMonkeyAssault, _ = json.Marshal(assault)
-	case "spring-boot-app-kill":
-		// kill application assault
-		assault := experimentTypes.AppKillAssault{
-			CommonAssault:         commonAssaults,
-			KillApplicationActive: true,
-			KillApplicationCron:   "*/1 * * * * ?",
-		}
-		experimentDetails.ChaosMonkeyAssault, _ = json.Marshal(assault)
-	case "spring-boot-latency":
-		// Latency assault
-		assault := experimentTypes.LatencyAssault{
-			CommonAssault:     commonAssaults,
-			LatencyActive:     true,
-			LatencyRangeStart: latency,
-			LatencyRangeEnd:   latency,
-		}
-		experimentDetails.ChaosMonkeyAssault, _ = json.Marshal(assault)
-	case "spring-boot-memory-stress":
-		// Memory assault
-		assault := experimentTypes.MemoryStressAssault{
-			CommonAssault:                      commonAssaults,
-			MemoryActive:                       true,
-			MemoryMillisecondsHoldFilledMemory: experimentDetails.ChaosDuration * 1000,
-			MemoryMillisecondsWaitNextIncrease: 1000,
-			MemoryFillIncrementFraction:        1.0,
-			MemoryCron:                         "*/1 * * * * ?",
-			MemoryFillTargetFraction:           memoryFillPercentage,
-		}
-		experimentDetails.ChaosMonkeyAssault, _ = json.Marshal(assault)
-	case "spring-boot-cpu-stress":
-		// CPU assault
-		assault := experimentTypes.CPUStressAssault{
-			CommonAssault:           commonAssaults,
-			CPUActive:               true,
-			CPUMillisecondsHoldLoad: experimentDetails.ChaosDuration * 1000,
-			CPULoadTargetFraction:   cpuLoadTargetFraction,
-			CPUCron:                 "*/1 * * * * ?",
-		}
-		experimentDetails.ChaosMonkeyAssault, _ = json.Marshal(assault)
-	case "spring-boot-exceptions":
-		// Exception assault
-		assault := experimentTypes.ExceptionAssault{
-			CommonAssault:    commonAssaults,
-			ExceptionsActive: true,
-			Exception:        exceptionAssault,
-		}
-		experimentDetails.ChaosMonkeyAssault, _ = json.Marshal(assault)
-	}
-
-	// Building watchers
-	watchers := experimentTypes.ChaosMonkeyWatchers{
-		Controller:     false,
-		RestController: false,
-		Service:        false,
-		Repository:     false,
-		Component:      false,
-		RestTemplate:   false,
-		WebClient:      false,
-	}
-
-	envWatchers := strings.Split(types.Getenv("CM_WATCHERS", ""), ",")
-	for _, watcher := range envWatchers {
-		switch watcher {
-		case "controller":
-			watchers.Controller = true
-		case "restController":
-			watchers.RestController = true
-		case "service":
-			watchers.Service = true
-		case "repository":
-			watchers.Repository = true
-		case "component":
-			watchers.Component = true
-		case "webClient":
-			watchers.WebClient = true
-		default:
-		}
-	}
-	experimentDetails.ChaosMonkeyWatchers = watchers
-}
-
-func getExceptionAssault() experimentTypes.AssaultException {
-	// Exception structure, will be like : {type: "", arguments: [{className: "", value: ""]}
-	assaultException := experimentTypes.AssaultException{}
-	assaultExceptionArguments := make([]experimentTypes.AssaultExceptionArgument, 0)
-
-	assaultException.Type = types.Getenv("CM_EXCEPTIONS_TYPE", "")
-
-	envAssaultExceptionArguments := strings.Split(types.Getenv("CM_EXCEPTIONS_ARGUMENTS", ""), ",")
-
-	for _, argument := range envAssaultExceptionArguments {
-		splitArgument := strings.Split(argument, ":")
-		assaultExceptionArgument := experimentTypes.AssaultExceptionArgument{
-			ClassName: splitArgument[0],
-			Value:     "",
-		}
-		if len(splitArgument) > 1 {
-			assaultExceptionArgument.Value = splitArgument[1]
-		}
-		assaultExceptionArguments = append(assaultExceptionArguments, assaultExceptionArgument)
-	}
-	assaultException.Arguments = assaultExceptionArguments
-	return assaultException
-}
diff --git a/pkg/spring-boot/spring-boot-chaos/types/types.go b/pkg/spring-boot/spring-boot-chaos/types/types.go
deleted file mode 100644
index 92a98cc..0000000
--- a/pkg/spring-boot/spring-boot-chaos/types/types.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package types
-
-import (
-	"k8s.io/api/core/v1"
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName     string
-	EngineName         string
-	ChaosDuration      int
-	ChaosInterval      int
-	RampTime           int
-	AppNS              string
-	AppLabel           string
-	AppKind            string
-	ChaosUID           clientTypes.UID
-	InstanceID         string
-	ChaosNamespace     string
-	ChaosPodName       string
-	Timeout            int
-	Delay              int
-	TargetContainer    string
-	PodsAffectedPerc   int
-	TargetPods         string
-	LIBImagePullPolicy string
-	Sequence           string
-	TargetPodList      v1.PodList
-
-	// Chaos monkey parameters
-	ChaosMonkeyAssault  []byte
-	ChaosMonkeyWatchers ChaosMonkeyWatchers
-	ChaosMonkeyPath     string
-	ChaosMonkeyPort     string
-}
-
-type ChaosMonkeyAssaultRevert struct {
-	LatencyActive         bool `json:"latencyActive"`
-	KillApplicationActive bool `json:"killApplicationActive"`
-	MemoryActive          bool `json:"memoryActive"`
-	CPUActive             bool `json:"cpuActive"`
-	ExceptionsActive      bool `json:"exceptionsActive"`
-}
-
-type AllAssault struct {
-	CommonAssault
-	CPUStressAssault
-	MemoryStressAssault
-	LatencyAssault
-	AppKillAssault
-	ExceptionAssault
-}
-
-type CommonAssault struct {
-	Level                 int      `json:"level"`
-	Deterministic         bool     `json:"deterministic"`
-	WatchedCustomServices []string `json:"watchedCustomServices"`
-}
-
-type CPUStressAssault struct {
-	CommonAssault
-	CPUActive               bool    `json:"cpuActive"`
-	CPUMillisecondsHoldLoad int     `json:"cpuMillisecondsHoldLoad"`
-	CPULoadTargetFraction   float64 `json:"cpuLoadTargetFraction"`
-	CPUCron                 string  `json:"cpuCronExpression"`
-}
-
-type MemoryStressAssault struct {
-	CommonAssault
-	MemoryActive                       bool    `json:"memoryActive"`
-	MemoryMillisecondsHoldFilledMemory int     `json:"memoryMillisecondsHoldFilledMemory"`
-	MemoryMillisecondsWaitNextIncrease int     `json:"memoryMillisecondsWaitNextIncrease"`
-	MemoryFillIncrementFraction        float64 `json:"memoryFillIncrementFraction"`
-	MemoryFillTargetFraction           float64 `json:"memoryFillTargetFraction"`
-	MemoryCron                         string  `json:"memoryCronExpression"`
-}
-
-type LatencyAssault struct {
-	CommonAssault
-	LatencyRangeStart int  `json:"latencyRangeStart"`
-	LatencyRangeEnd   int  `json:"latencyRangeEnd"`
-	LatencyActive     bool `json:"latencyActive"`
-}
-
-type AppKillAssault struct {
-	CommonAssault
-	KillApplicationActive bool   `json:"killApplicationActive"`
-	KillApplicationCron   string `json:"killApplicationCronExpression"`
-}
-
-type ExceptionAssault struct {
-	CommonAssault
-	ExceptionsActive bool             `json:"exceptionsActive"`
-	Exception        AssaultException `json:"exceptions"`
-}
-
-type ChaosMonkeyWatchers struct {
-	Controller     bool `json:"controller"`
-	RestController bool `json:"restController"`
-	Service        bool `json:"service"`
-	Repository     bool `json:"repository"`
-	Component      bool `json:"component"`
-	RestTemplate   bool `json:"restTemplate"`
-	WebClient      bool `json:"webClient"`
-}
-
-type AssaultException struct {
-	Type      string                     `json:"type"`
-	Arguments []AssaultExceptionArgument `json:"arguments"`
-}
-
-type AssaultExceptionArgument struct {
-	ClassName string `json:"className"`
-	Value     string `json:"value"`
-}
diff --git a/pkg/vmware/vm-poweroff/environment/environment.go b/pkg/vmware/vm-poweroff/environment/environment.go
deleted file mode 100644
index c34e306..0000000
--- a/pkg/vmware/vm-poweroff/environment/environment.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package environment
-
-import (
-	"strconv"
-	"strings"
-
-	clientTypes "k8s.io/apimachinery/pkg/types"
-
-	"github.com/litmuschaos/litmus-go/pkg/types"
-	experimentTypes "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/types"
-)
-
-// GetENV fetches all the env variables from the runner pod
-func GetENV(experimentDetails *experimentTypes.ExperimentDetails) {
-	experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "vm-poweroff")
-	experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus")
-	experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "")
-	experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30"))
-	experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "30"))
-	experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", ""))
-	experimentDetails.ChaosUID = clientTypes.UID(types.Getenv("CHAOS_UID", ""))
-	experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "")
-	experimentDetails.ChaosPodName = types.Getenv("POD_NAME", "")
-	experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2"))
-	experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180"))
-	experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel")
-	experimentDetails.VMIds = strings.TrimSpace(types.Getenv("APP_VM_MOIDS", ""))
-	experimentDetails.VMTag = strings.TrimSpace(types.Getenv("APP_VM_TAG", ""))
-	experimentDetails.VcenterServer = types.Getenv("VCENTERSERVER", "")
-	experimentDetails.VcenterUser = types.Getenv("VCENTERUSER", "")
-	experimentDetails.VcenterPass = types.Getenv("VCENTERPASS", "")
-}
diff --git a/pkg/vmware/vm-poweroff/types/types.go b/pkg/vmware/vm-poweroff/types/types.go
deleted file mode 100644
index 3e92e65..0000000
--- a/pkg/vmware/vm-poweroff/types/types.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package types
-
-import (
-	clientTypes "k8s.io/apimachinery/pkg/types"
-)
-
-// ADD THE ATTRIBUTES OF YOUR CHOICE HERE
-// FEW MANDATORY ATTRIBUTES ARE ADDED BY DEFAULT
-
-// ExperimentDetails is for collecting all the experiment-related details
-type ExperimentDetails struct {
-	ExperimentName string
-	EngineName     string
-	ChaosDuration  int
-	ChaosInterval  int
-	RampTime       int
-	ChaosUID       clientTypes.UID
-	InstanceID     string
-	ChaosNamespace string
-	ChaosPodName   string
-	Timeout        int
-	Delay          int
-	Sequence       string
-	VMIds          string
-	VMTag          string
-	VcenterServer  string
-	VcenterUser    string
-	VcenterPass    string
-}