From 38a15258ea620e328e4463b9c22b8817559b714c Mon Sep 17 00:00:00 2001
From: Trekkie Coder <trekkie@netlox.io>
Date: Fri, 9 Aug 2024 00:08:30 +0900
Subject: [PATCH] gh-87 cicd added for loxi-ingress

---
 .github/workflows/k3s-loxi-ingress.yml        |  25 ++
 cicd/k3s-flannel-loxilb-ingress/Vagrantfile   |  39 +++
 cicd/k3s-flannel-loxilb-ingress/config.sh     |   3 +
 .../ingress/loxilb-ingress-deploy.yml         | 266 ++++++++++++++++++
 .../ingress/loxilb-ingress-svc.yml            |  23 ++
 .../ingress/loxilb-ingress.yml                |  55 ++++
 .../ingress/loxilb-secret.yml                 |  10 +
 .../kube-loxilb.yml                           | 133 +++++++++
 cicd/k3s-flannel-loxilb-ingress/loxilb.sh     |  13 +
 cicd/k3s-flannel-loxilb-ingress/master.sh     |  15 +
 cicd/k3s-flannel-loxilb-ingress/rmconfig.sh   |   3 +
 cicd/k3s-flannel-loxilb-ingress/validation.sh |  40 +++
 cicd/k3s-flannel-loxilb-ingress/wait_ready.sh |   8 +
 13 files changed, 633 insertions(+)
 create mode 100644 .github/workflows/k3s-loxi-ingress.yml
 create mode 100644 cicd/k3s-flannel-loxilb-ingress/Vagrantfile
 create mode 100755 cicd/k3s-flannel-loxilb-ingress/config.sh
 create mode 100644 cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-deploy.yml
 create mode 100644 cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-svc.yml
 create mode 100644 cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress.yml
 create mode 100644 cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-secret.yml
 create mode 100644 cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml
 create mode 100644 cicd/k3s-flannel-loxilb-ingress/loxilb.sh
 create mode 100755 cicd/k3s-flannel-loxilb-ingress/master.sh
 create mode 100755 cicd/k3s-flannel-loxilb-ingress/rmconfig.sh
 create mode 100755 cicd/k3s-flannel-loxilb-ingress/validation.sh
 create mode 100755 cicd/k3s-flannel-loxilb-ingress/wait_ready.sh

diff --git a/.github/workflows/k3s-loxi-ingress.yml b/.github/workflows/k3s-loxi-ingress.yml
new file mode 100644
index 000000000..7b2fa2fcf
--- /dev/null
+++ b/.github/workflows/k3s-loxi-ingress.yml
@@ -0,0 +1,25 @@
+name: K3s-Loxi-Ingress-Sanity-CI
+on:
+ # schedule:
+ # Runs "At 11:00 UTC every day-of-week"
+ # - cron: '0 11 * * *'
+ workflow_dispatch:
+    inputs:
+      testName:
+        description: 'Test Run-Name'     
+        required: true
+        default: 'k3s-loxi-ingress'
+jobs:
+  build:
+    name: k3s-loxi-ingress-sanity
+    runs-on: [self-hosted, large]
+    steps:
+      - uses: actions/checkout@v2
+        with:
+          submodules: recursive
+      - run: |
+         cd cicd/k3s-flannel-loxilb-ingress
+         ./config.sh
+         ./validation.sh
+         ./rmconfig.sh
+         cd -
diff --git a/cicd/k3s-flannel-loxilb-ingress/Vagrantfile b/cicd/k3s-flannel-loxilb-ingress/Vagrantfile
new file mode 100644
index 000000000..69cb4c0b4
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/Vagrantfile
@@ -0,0 +1,39 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+workers = (ENV['WORKERS'] || "1").to_i
+#box_name = (ENV['VAGRANT_BOX'] || "ubuntu/focal64")
+box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s")
+box_version = "0.7.1"
+Vagrant.configure("2") do |config|
+  config.vm.box = "#{box_name}"
+  config.vm.box_version = "#{box_version}"
+
+  if Vagrant.has_plugin?("vagrant-vbguest")
+    config.vbguest.auto_update = false
+  end
+
+  config.vm.define "loxilb" do |loxilb|
+    loxilb.vm.hostname = 'llb1'
+    #loxilb.vm.network "forwarded_port", guest: 55002, host: 5502, protocol: "tcp"
+    loxilb.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
+    loxilb.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
+    loxilb.vm.provision :shell, :path => "loxilb.sh"
+    loxilb.vm.provider :virtualbox do |vbox|
+        vbox.customize ["modifyvm", :id, "--memory", 6000]
+        vbox.customize ["modifyvm", :id, "--cpus", 4]
+    end
+  end
+
+
+  config.vm.define "master" do |master|
+    master.vm.hostname = 'master'
+    master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0"
+    master.vm.provision :shell, :path => "master.sh"
+    master.vm.provider :virtualbox do |vbox|
+        vbox.customize ["modifyvm", :id, "--memory", 8192]
+        vbox.customize ["modifyvm", :id, "--cpus", 4]
+    end
+  end
+
+end
diff --git a/cicd/k3s-flannel-loxilb-ingress/config.sh b/cicd/k3s-flannel-loxilb-ingress/config.sh
new file mode 100755
index 000000000..6b8ee48ef
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/config.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+vagrant global-status  | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f
+vagrant up
diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-deploy.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-deploy.yml
new file mode 100644
index 000000000..457cf9cdc
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-deploy.yml
@@ -0,0 +1,266 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    app.kubernetes.io/instance: loxilb-ingress
+    app.kubernetes.io/name: loxilb-ingress
+  name: loxilb-ingress
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app.kubernetes.io/instance: loxilb-ingress
+    app.kubernetes.io/name: loxilb-ingress
+  name: loxilb-ingress
+  namespace: kube-system
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: loxilb-ingress
+  namespace: kube-system
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - nodes
+    verbs:
+      - get
+      - watch
+      - list
+      - patch
+  - apiGroups:
+      - ""
+    resources:
+      - pods
+    verbs:
+      - get
+      - watch
+      - list
+      - patch
+  - apiGroups:
+      - ""
+    resources:
+      - endpoints
+      - services
+      - services/status
+    verbs:
+      - get
+      - watch
+      - list
+      - patch
+      - update
+  - apiGroups:
+      - discovery.k8s.io
+    resources:
+      - endpointslices
+    verbs:
+      - get
+      - watch
+      - list
+  - apiGroups:
+      - authentication.k8s.io
+    resources:
+      - tokenreviews
+    verbs:
+      - create
+  - apiGroups:
+      - authorization.k8s.io
+    resources:
+      - subjectaccessreviews
+    verbs:
+      - create
+  - apiGroups:
+      - networking.k8s.io
+    resources:
+      - ingresses
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - networking.k8s.io
+    resources:
+      - ingresses/status
+    verbs:
+      - update
+  - apiGroups:
+      - networking.k8s.io
+    resources:
+      - ingressclasses
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups:
+      - coordination.k8s.io
+    resources:
+      - leases
+    verbs:
+      - create
+  - apiGroups:
+      - ""
+    resources:
+      - events
+    verbs:
+      - create
+      - patch
+  - apiGroups:
+      - discovery.k8s.io
+    resources:
+      - endpointslices
+    verbs:
+      - list
+      - watch
+      - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/instance: loxilb-ingress
+    app.kubernetes.io/name: loxilb-ingress
+  name: loxilb-ingress
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - endpoints
+  - nodes
+  - pods
+  - secrets
+  - namespaces
+  verbs:
+  - list
+  - watch
+- apiGroups:
+  - coordination.k8s.io
+  resources:
+  - leases
+  verbs:
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  - extensions
+  resources:
+  - ingresses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses/status
+  verbs:
+  - update
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingressclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - discovery.k8s.io
+  resources:
+  - endpointslices
+  verbs:
+  - list
+  - watch
+  - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/instance: loxilb-ingress
+    app.kubernetes.io/name: loxilb-ingress
+  name: loxilb-ingress
+  namespace: kube-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: loxilb-ingress
+subjects:
+- kind: ServiceAccount
+  name: loxilb-ingress
+  namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  labels:
+    app.kubernetes.io/instance: loxilb-ingress
+    app.kubernetes.io/name: loxilb-ingress
+  name: loxilb-ingress
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: loxilb-ingress
+subjects:
+- kind: ServiceAccount
+  name: loxilb-ingress
+  namespace: kube-system
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: loxilb-ingress
+  namespace: kube-system
+spec:
+  selector:
+    matchLabels:
+      app: loxilb-ingress
+      app.kubernetes.io/instance: loxilb-ingress
+      app.kubernetes.io/name: loxilb-ingress
+  template:
+    metadata:
+      name: loxilb-ingress
+      labels:
+        app: loxilb-ingress
+        app.kubernetes.io/instance: loxilb-ingress
+        app.kubernetes.io/name: loxilb-ingress
+    spec:
+      #hostNetwork: true
+      #dnsPolicy: ClusterFirstWithHostNet
+      serviceAccountName: loxilb-ingress
+      containers:
+      - name: loxilb-ingress
+        volumeMounts:
+          - mountPath: "/opt/loxilb/cert/"
+            name: loxilb-ssl
+        image: "ghcr.io/loxilb-io/loxilb-ingress:latest"
+        imagePullPolicy: Always
+        command: [ "/bin/loxilb-ingress" ]
+        ports:
+        - containerPort: 11111
+      volumes:
+        - name: loxilb-ssl
+          secret:
+            secretName: loxilb-ssl
diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-svc.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-svc.yml
new file mode 100644
index 000000000..698bd2aad
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress-svc.yml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: loxilb-ingress-manager
+  namespace: kube-system
+  annotations:
+    loxilb.io/lbmode: "onearm"
+spec:
+  externalTrafficPolicy: Local
+  loadBalancerClass: loxilb.io/loxilb
+  selector:
+    app.kubernetes.io/instance: loxilb-ingress
+    app.kubernetes.io/name: loxilb-ingress
+  ports:
+    - name: http
+      port: 80
+      protocol: TCP
+      targetPort: 80
+    - name: https
+      port: 443
+      protocol: TCP
+      targetPort: 443
+  type: LoadBalancer
diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress.yml
new file mode 100644
index 000000000..1808b48d7
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-ingress.yml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: site
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      name: site-handler
+  template:
+    metadata:
+      labels:
+        name: site-handler
+    spec:
+      containers:
+        - name: blog
+          image: ghcr.io/loxilb-io/nginx:stable
+          imagePullPolicy: Always
+          ports:
+            - containerPort: 80
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: site-handler-service
+spec:
+  ports:
+    - name: http
+      port: 80
+      protocol: TCP
+      targetPort: 80
+  selector:
+    name: site-handler
+---
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: site-loxilb-ingress
+spec:
+  ingressClassName: loxilb
+  tls:
+  - hosts:
+    - loxilb.io
+    secretName: loxilb-ssl
+  rules:
+  - host: loxilb.io
+    http:
+      paths:
+        - path: /
+          pathType: Prefix
+          backend:
+            service:
+              name: site-handler-service
+              port:
+                number: 80
diff --git a/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-secret.yml b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-secret.yml
new file mode 100644
index 000000000..73d691823
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/ingress/loxilb-secret.yml
@@ -0,0 +1,10 @@
+apiVersion: v1
+data:
+  server.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZDVENDQXZHZ0F3SUJBZ0lVSENPekxWNlRFeVg2cjIxNllycFlOWWZOY2Zzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0ZERVNNQkFHQTFVRUF3d0piRzk0YVd4aUxtbHZNQjRYRFRJME1EZ3dPREEyTXpNeE5Gb1hEVEkxTURndwpPREEyTXpNeE5Gb3dGREVTTUJBR0ExVUVBd3dKYkc5NGFXeGlMbWx2TUlJQ0lqQU5CZ2txaGtpRzl3MEJBUUVGCkFBT0NBZzhBTUlJQ0NnS0NBZ0VBb2hMNWgxSlFFVVlpRExvR0hzdDNmM3ZrWkcyMWU1LzM2Rml1WDhsa1pOTkwKZUlUZmUzR1E0ci96K253N0oxSXdlc2VHdkkyZW5FNWtLYVdsZHhpekNEd2JxS21GRk1EMk1zQklEUlRJb2d4NgpOak1YUFlqQ1VxUlhVODJwNzhUa1Bvd1FqdllhcExiZ3REcWdiWWxMZC95VUg5aWdmcHo5VFY2d2grQlMvUDJwCnc1MUMrckRIUHdSM0JNL2hGNUtpeVZway9GNmJNQjZRSFE2bGk5SmR4ZEVNSGtDRXhPWUo1R01kVkEvRmUzODMKbTNwK2JucVd2OXdLTXF0d29LVVVEOFJ0TmdkUXJxSmp0elV3YXRmT0VkY3ptTG1uVXg2VjgyMk9weFhQeG4vSApiSmxUcy8vblRrV0FCWmFEVGFqQ2FnZUpCQnZ4Rk1Eci9mVUdQWlRPRUdEZkxxaE9HM0g4UDJmMGp1OHFpMUJ0Cnp0ODBmT2N2eElLME8veWJnemRINjB6YXJFcEZFRjFEcGF3a0hGWmZHYmVTdnpUeTZSVm0zWWxRRjc2NFZHTDQKSCtMMFFEcVI2Zm0veHoxaEhLbER6dFA2VUV1MjExUUc4RDUvQ1ZUVzdQQUIrMkRWbk1vN0JqRzYrVG55Z0ZqNApOUXZEaW9VQ1NwZzdRT2g2RWw0UjgwVHR1Vmo5bEsvbnVIR08yQ0hwclhnTzUyeWgrZzNzOUJVeXQ5eXVQQVFxCkhIeWsyT3hIc1dURnFTQ3VaNHo0NVI0NWFTcmNDd2t6N09xbWlVWTUrenduT1F5WlN3Y2JzUVhzbVNEUm9KUVcKR2lwSUp0VVZjcnZSbWIzWkFnRDVNdlVQRXpEYjVTME5La1lqczNvWnVBZXVjSGJSS080RkFMRlRDYi9JR2E4QwpBd0VBQWFOVE1GRXdIUVlEVlIwT0JCWUVGTDRJZFpKNE9Obk53cVVmdFhCR25STndwN0V0TUI4R0ExVWRJd1FZCk1CYUFGTDRJZFpKNE9Obk53cVVmdFhCR25STndwN0V0TUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dJQkFFWHg0Q3pUMnNTOCtMVjRHc0I2bDFtRWM5UFJiT205Q1pQK3M2MzQ5QVd5MXh3cwppUmlaYVJVcEJ2Skk1MHZYMjhaeWJRVXpPdVg3ZGE5bFIzWEhLM01QdHlVUk5NYkcySzlmbzVZQ1RqV3lTZEg1CnpJQm1RZVV0cXE1OVMwdytSQytUcC9xd3lveUlUajk2dGU0YmdheGhUV2RtazZUNFZjWkgwY1NQU0hEMG9DeFkKcHJDOVdnQ21kQ3JOWWQ0T2pxaUhwOERhWHFybGhmbWZXdThHaFJlcVNmL1pEOTBrSUw3aEx0OHBXYXRpQnZ3UAowRmtGMjNWcFBwZ0s0MElKM1NBcllSWXlIUllKaDNLK1QzZ2RQY0pOdUloaENrRE1YNUtKdlI1QXdUdWpEL1lKCjNTTVRzL1F0SnZScDd0Q0kxM1lwZXFiaHFoQnBtdzdVWFpSUnh4WURiNHU2L25oZUZkMS9NNjdsYTJtUmpvZlIKUDQxc2pRa1lQSkhsY2hVMHRkQnRjN203bVkrdFo1U2h4bklKZnFBS1pqTEpEZUJyYlhrS2hjNms4NFpBM09vQwpCajl1U3V1RERlUUJ0VDlYUHppOVZaV2pVWis2Zk42QlB0RHVRa0x4V2xacHN0TXJIUEhia1gvVXhmU2NuZEpiCkw0ZXBhTVVqUDJDWnd2NGFraUxjZmQzVXEwaENQZzVZNTNOL1cyWlJ2Y204aGlpcXptaDIyeUxMYUZEQXBOaGEKZitXdUNxNU1HQ2Rib3U1Wnk4TXRoaXZwRnhEUXAzWkh4RktXTGw3VGZpR0hRYXZVK0ZnUVdQUFUrOVVmYksyZQpQYmRSSmxoaVE1Z09VbHBWT1V6bGVxR3lhVldDRHpuQ3JlVWFVcnNzbkNZejdzQmNORTViYUl4dlptUmkKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+  server.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRZ0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1N3d2dna29BZ0VBQW9JQ0FRQ2lFdm1IVWxBUlJpSU0KdWdZZXkzZC9lK1JrYmJWN24vZm9XSzVmeVdSazAwdDRoTjk3Y1pEaXYvUDZmRHNuVWpCNng0YThqWjZjVG1RcApwYVYzR0xNSVBCdW9xWVVVd1BZeXdFZ05GTWlpREhvMk14YzlpTUpTcEZkVHphbnZ4T1ErakJDTzlocWt0dUMwCk9xQnRpVXQzL0pRZjJLQituUDFOWHJDSDRGTDgvYW5EblVMNnNNYy9CSGNFeitFWGtxTEpXbVQ4WHBzd0hwQWQKRHFXTDBsM0YwUXdlUUlURTVnbmtZeDFVRDhWN2Z6ZWJlbjV1ZXBhLzNBb3lxM0NncFJRUHhHMDJCMUN1b21PMwpOVEJxMTg0UjF6T1l1YWRUSHBYemJZNm5GYy9HZjhkc21WT3ovK2RPUllBRmxvTk5xTUpxQjRrRUcvRVV3T3Y5CjlRWTlsTTRRWU44dXFFNGJjZncvWi9TTzd5cUxVRzNPM3pSODV5L0VnclE3L0p1RE4wZnJUTnFzU2tVUVhVT2wKckNRY1ZsOFp0NUsvTlBMcEZXYmRpVkFYdnJoVVl2Z2Y0dlJBT3BIcCtiL0hQV0VjcVVQTzAvcFFTN2JYVkFidwpQbjhKVk5iczhBSDdZTldjeWpzR01icjVPZktBV1BnMUM4T0toUUpLbUR0QTZIb1NYaEh6Uk8yNVdQMlVyK2U0CmNZN1lJZW10ZUE3bmJLSDZEZXowRlRLMzNLNDhCQ29jZktUWTdFZXhaTVdwSUs1bmpQamxIamxwS3R3TENUUHMKNnFhSlJqbjdQQ2M1REpsTEJ4dXhCZXlaSU5HZ2xCWWFLa2dtMVJWeXU5R1p2ZGtDQVBreTlROFRNTnZsTFEwcQpSaU96ZWhtNEI2NXdkdEVvN2dVQXNWTUp2OGdacndJREFRQUJBb0lDQUFKVCt3SE5iUnA3c28zenpnYm1zWlNOCldpSFkyYWxwSHFmWHBOSmdWTkR4aTZUQ3Q0ZFRsaHNPbC9jZXR6RE8wc09tS3cvcDhDT1RQWklCR05KRE9tSXAKS0hqelp6Zjl3aVBsNHBKdERRK3dtRkFYQ0l0ZUhQM25RNzRaN0xnZUNSWFc2c2FJWHc2dkFFbFYwcytETHZvZApiUHZUdVNYUlp4MHRRWEVpaC93VUlVU2pSeE16OE5GaFg3MENmeTF5VTI2NU1rTFYyVXY2Z3M4N2o4UmJEZjlBCnBhWnFKNWp6NUJTYTVsaHl5cFpZQ3pVam9NMm5meTF5OE9BOVZIaDl5SGMxYjFMMmtzYlJBQTJQalBoRjF4bHUKeVE5OUs3Qk9nUEg4VGROVDZVSms1UXNQcE5mR1V0U2hEZkR2a1RNNjZkZlcwTFc2cVJtWlBJdlpUVERkM0J2SwpCN1NnOUs3bXZCbVlsNEpMM05pZXBJVjkvNEVPRzZsNi9QaGxUR0JVRUdrTmdNQ2dTaWxyc05qU2E4ZW9SZHdzCm40VmN5enNWeWZYaGFSTVBFTklLVWZJTmFuenpYRkY2ZFRyd3Azelo0RDNhVzNLdWltYUxJR0hCNXZZaGRFTGoKUE9PQVVXRkVXUjMxMVFMZ3hUUm53aStnRUNlMmhPVmNyUkZVcjhDdlVrWWRUT0FGQzdjUnBOUW5rSVdQNU1QbQpXZkRvM2dZRnkyZU45NGR1MHBzTzVabzlHMm5UMlpiTjl1d0FCMk1ibk91R0xiWVRFbWpHdVRIUVc5Uzd6TFBzCnJMUmtVdndvYWE4cVBmaGc4SWw2ak1mcG5heUdETnU1S2tVV05KcGFZaGp5Y2l6SkVGUjVicTF6UU1QZjE2Qk0KdVloWVMySEhuS1VhdFRmZ3hyNmxBb0lCQVFEa0E3UlB2MURtNmxKTXYzaUU0K1FRYzUyTDdueUJHc3FueEVzdwo1Ly9PN2gvd2ZMMkd4VzFzRXcrSUNDd1VmVjNwZjBqWEl2dktjM3V0YXByWjc2Tk5uWnJJU0syYnFCamZpc2VrCk9vMWRlRlQvNlVaOGxhOWVDMFF3UlVWMWdmZ1I2K0VCayttUDJjSWpJRDVKbkx1b1lLSFNwenRmZG15UEUrODUKVUtXRU5rR1BsN200aStqTzRwWUhYQXdhZVFkU2NSbjVPdVUyY3FBTkZVbmpRbmtQNnp6U2tJVkNUaFNGVkpUYgplZEpIOFNwbW9YL0IzL3VVSm9vVGFvWXQwb1V4NnJsR0xqTDhadUx0TUlVU1J0WUJNd2JuMDVQQkZ0cStuaitlClVtWEJqUEgxbURRN0tlWG1yZHlldHYzQWRUQjdjWUhMZW1GL0lNL1g5N3Brb2twVEFvSUJBUUMxOTJheDhReTkKWGkrNmlVNDIzTEx1R3p3a3EwNlVHeVdSMlBHM3pCeDlHRU5KNS9rV0FrSUdPWjlSMG1xOGRuTnJOeitROTFlawp4QlFJS0xGZzEvMVFWZ2VOMEw3TVJySWxSelBsa2NZYUkrNDFhbktKYUpsd3BMaDEza0RmdzRMUXpOLzhDcElrCk9KajBZWFpNNkpSbHlmTE5jQ1pURFBKVjY0R24vaFNDZ3RISndaRk9MdWZsalIzSVpQbEphcHBTNlNRUDBkVDYKeExmUEsyUWZGR251UTBETitsQjdPVTJMT3d4enF0UldsYWFjTnpOU3B0cUJ2NzMvVFFoQ1l5eVc0RnBReER4Vgo3MzJWZ0tvWVQ3dElpZFF5Z2NrWnp0NnFhRUNVQ0o3UmtuUVNyZDEvUHBITDBrdXkxNDIxc0VLdkUyWVpON21WCkNGYVlzRGdqb0orMUFvSUJBUURFckpGT0xEYUxSMWkvekhRWEpEN25GZUt4L1g2MUdiZnQ1N24zc0ZXTlZ3YUQKdUFUK2g3S3ZndTFscDY3bDZaRWt3OTYvZXZSMmx3LzU4RGNyZnJGOFZGSmJLWjFkNHl1NVBncWdGTVUwOTROUgp6aFEzaUNGZzNCVHdFZ0Fzc0hPYWVieDJVUEFvWFd0QVF5S1crak0vdEVKQTRuQ3JFZ25uakFsUGhjbU85Z0dzCjZ2R09SbGdFZzV0bk03Vlk3RVl0alZNYkQvci84UFV1ODhyczFMeDV4NjJKN3BDVE5hZ3JyVjVNeFpKazdaZG0KT1MxcXZGbFRXNzdEcXFHY1NyY0s3R3p0SlJKamRoZU5BY24yanRVdTZhV3VOMmgrSjhsOG5DRkIzYzdabVVwbgpUZWJYbFhjeGQ0d1I5c04veTFXTFZNZmhER21tYjFYMzhqMTdhaVR6QW9JQkFGN0VLTXlWSURCakkvSSszZWYrCmlvVXMwK2E0L0tSdmV1SjVISEcvTURzSjdzbEhzVlovK0lpcmE4aStEKzUwdGVscGpZWmUrbHNKN3ZETTJJdjYKRUtmTkZnUUthY09UTWVYdUxoM3FERVRDMzZVYitlaUwvQlZKQS9RR3VyeU9Zc3VCVjBrNDdDRkhCSW1KVklYNwpQb1hBWmQ0T0FUZVJiNGZGcmZHaWhtWHQ0WG4wZ0VzNmJIVUZTRFI4T2NPOWEvK3dBYUxuZ2NiVHVuSi9RNVpZCkdFOEk0WEFrWTlPNDVTU1VyUWgwT0QrYmtuaWEydlM1aHVTNXlpWnlwTkdHT3N1Y3JneVFGbWdlNE1XQ2k1TTcKdXVxdE5VRFVqTG9QSGJHYnQ3NGd1eTJqMnlWN1BQYXV6RmxjL1NWMzB3cURjRWNqa0RHajd0ZXB6d2VZQnJTdgpTMTBDZ2dFQUNqSHBrZE5VZkdzOGZnN0x4WDlFcURRZjY4SkhIblBscTQ1azFwN0U4UExmRytFVWNIMnUzekRzCjhmZDByTHAwb2Z3VEhONUNPMmhJVWhCVC9UU0poQ1VaOTZ2ZlRyeXVVWVJjdjZ2NkEvaW1OdFQ3MEQ0ZkZ0cXoKWnB3Si9GNzFwdVlJTmtwSEpteWIvSHdIZmxEdURyVkRjMFF3VmY2WkFPcE9QbnhzM0VHUElqNGdVcDFYMEdVcAp1TERCdVJtR0RFNEVKYUhYUjFHemM0YkduZVlUV0FnY1IxQ2MrbThuWnR3a2dxQWVia0ZtaXFmZDBPOGNkUDlUCkZKWjNSbkRJZHBCYmw0b0hJS0NiTWY0M2pkdlZnM2RTb0hwUTlXUkZ1MEhVQURqenJUS2Z5c0JLMFM3WnlZRmEKc1RoOGJ6QU01YXpBWlZSeHNTbXJiSm95VFRZM3pBPT0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
+kind: Secret
+metadata:
+  creationTimestamp: null
+  name: loxilb-ssl
+  namespace: kube-system
+type: Opaque
diff --git a/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml
new file mode 100644
index 000000000..93a52b373
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/kube-loxilb.yml
@@ -0,0 +1,133 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: kube-loxilb
+  namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: kube-loxilb
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - nodes
+    verbs:
+      - get
+      - watch
+      - list
+      - patch
+  - apiGroups:
+      - ""
+    resources:
+      - pods
+    verbs:
+      - get
+      - watch
+      - list
+      - patch
+  - apiGroups:
+      - ""
+    resources:
+      - endpoints
+      - services
+      - services/status
+    verbs:
+      - get
+      - watch
+      - list
+      - patch
+      - update
+  - apiGroups:
+      - discovery.k8s.io
+    resources:
+      - endpointslices
+    verbs:
+      - get
+      - watch
+      - list
+  - apiGroups:
+      - authentication.k8s.io
+    resources:
+      - tokenreviews
+    verbs:
+      - create
+  - apiGroups:
+      - authorization.k8s.io
+    resources:
+      - subjectaccessreviews
+    verbs:
+      - create
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: kube-loxilb
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: kube-loxilb
+subjects:
+  - kind: ServiceAccount
+    name: kube-loxilb
+    namespace: kube-system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: kube-loxilb
+  namespace: kube-system
+  labels:
+    app: kube-loxilb-app
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kube-loxilb-app
+  template:
+    metadata:
+      labels:
+        app: kube-loxilb-app
+    spec:
+      hostNetwork: true
+      dnsPolicy: ClusterFirstWithHostNet
+      tolerations:
+        - effect: NoSchedule
+          operator: Exists
+        # Mark the pod as a critical add-on for rescheduling.
+        - key: CriticalAddonsOnly
+          operator: Exists
+        - effect: NoExecute
+          operator: Exists
+      priorityClassName: system-node-critical
+      serviceAccountName: kube-loxilb
+      terminationGracePeriodSeconds: 0
+      containers:
+      - name: kube-loxilb
+        image: ghcr.io/loxilb-io/kube-loxilb:latest
+        imagePullPolicy: Always
+        command:
+        - /bin/kube-loxilb
+        args:
+        - --loxiURL=http://192.168.80.9:11111
+        - --externalCIDR=192.168.80.9/32
+        #- --zone=aws
+        #- --setBGP=64512
+        #- --setRoles=0.0.0.0
+        #- --extBGPPeers=192.168.90.9:64511
+        #- --monitor
+        #- --setLBMode=1
+        #- --config=/opt/loxilb/agent/kube-loxilb.conf
+        resources:
+          requests:
+            cpu: "100m"
+            memory: "50Mi"
+          limits:
+            cpu: "100m"
+            memory: "50Mi"
+        securityContext:
+          privileged: true
+          capabilities:
+            add: ["NET_ADMIN", "NET_RAW"]
diff --git a/cicd/k3s-flannel-loxilb-ingress/loxilb.sh b/cicd/k3s-flannel-loxilb-ingress/loxilb.sh
new file mode 100644
index 000000000..74e66ae9b
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/loxilb.sh
@@ -0,0 +1,13 @@
+export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/')
+
+apt-get update
+apt-get install -y software-properties-common
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu  $(lsb_release -cs)  stable"
+apt-get update
+apt-get install -y docker-ce
+docker run -u root --cap-add SYS_ADMIN   --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest
+echo alias loxicmd=\"sudo docker exec -it loxilb loxicmd\" >> ~/.bashrc
+echo alias loxilb=\"sudo docker exec -it loxilb \" >> ~/.bashrc
+
+echo $LOXILB_IP > /vagrant/loxilb-ip
diff --git a/cicd/k3s-flannel-loxilb-ingress/master.sh b/cicd/k3s-flannel-loxilb-ingress/master.sh
new file mode 100755
index 000000000..e78fce62e
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/master.sh
@@ -0,0 +1,15 @@
+export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/')
+
+curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik --disable servicelb --node-ip=${MASTER_IP}"  sh -
+
+echo $MASTER_IP > /vagrant/master-ip
+sudo cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token
+sudo sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /etc/rancher/k3s/k3s.yaml
+sudo cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml
+sudo kubectl apply -f /vagrant/kube-loxilb.yml
+sudo kubectl apply -f /vagrant/ingress/loxilb-secret.yml
+sudo kubectl apply -f /vagrant/ingress/loxilb-ingress-deploy.yml
+sudo kubectl apply -f /vagrant/ingress/loxilb-ingress-svc.yml
+sudo kubectl apply -f /vagrant/ingress/loxilb-ingress.yml
+sleep 30
+/vagrant/wait_ready.sh
diff --git a/cicd/k3s-flannel-loxilb-ingress/rmconfig.sh b/cicd/k3s-flannel-loxilb-ingress/rmconfig.sh
new file mode 100755
index 000000000..f157b24ba
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/rmconfig.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+vagrant destroy -f master
+vagrant destroy -f loxilb
diff --git a/cicd/k3s-flannel-loxilb-ingress/validation.sh b/cicd/k3s-flannel-loxilb-ingress/validation.sh
new file mode 100755
index 000000000..296b29f1f
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/validation.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+source ../common.sh
+echo k3s-loxi-ingress
+
+if [ "$1" ]; then
+  KUBECONFIG="$1"
+fi
+
+# Set space as the delimiter
+IFS=' '
+
+sleep 45
+
+echo "Service Info"
+vagrant ssh master -c 'sudo kubectl get svc -A'
+echo "Ingress Info"
+vagrant ssh master -c 'sudo kubectl get ingress -A'
+echo "LB Info"
+vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get lb -o wide'
+echo "EP Info"
+vagrant ssh loxilb -c 'sudo docker exec -i loxilb loxicmd get ep -o wide'
+
+print_debug_info() {
+  echo "llb1 route-info"
+  vagrant ssh loxilb -c 'ip route'
+  vagrant ssh master -c 'sudo kubectl get pods -A'
+  vagrant ssh master -c 'sudo kubectl get svc'
+  vagrant ssh master -c 'sudo kubectl get nodes'
+}
+
+out=$(curl -s --connect-timeout 30 -H "Application/json" -H "Content-type: application/json" -H "HOST: loxilb.io" --insecure https://192.168.80.9:443)
+if [[ ${out} == *"Welcome to nginx"* ]]; then
+  echo "k3s-loxi-ingress tcp [OK]"
+else
+  echo "k3s-loxi-ingress tcp [FAILED]"
+  print_debug_info
+  exit 1
+fi
+
+exit
diff --git a/cicd/k3s-flannel-loxilb-ingress/wait_ready.sh b/cicd/k3s-flannel-loxilb-ingress/wait_ready.sh
new file mode 100755
index 000000000..3736a1ba7
--- /dev/null
+++ b/cicd/k3s-flannel-loxilb-ingress/wait_ready.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+function wait_cluster_ready_full {
+  sudo kubectl wait pod --all --for=condition=Ready --namespace=kube-system --timeout=240s
+  sudo kubectl wait pod --all --for=condition=Ready --namespace=default --timeout=60s
+}
+
+wait_cluster_ready_full