Skip to content

Commit

Permalink
bugfix: add auth flag for sealer scale
Browse files Browse the repository at this point in the history
  • Loading branch information
kakzhou719 committed Jun 9, 2022
1 parent 61c1a72 commit 4587b00
Show file tree
Hide file tree
Showing 10 changed files with 180 additions and 89 deletions.
4 changes: 0 additions & 4 deletions apply/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,6 @@ func NewApplierFromFile(path string) (applydriver.Interface, error) {
}, nil
}

func NewApplier(cluster *v2.Cluster) (applydriver.Interface, error) {
return NewDefaultApplier(cluster)
}

func NewDefaultApplier(cluster *v2.Cluster) (applydriver.Interface, error) {
if cluster.Name == "" {
return nil, fmt.Errorf("cluster name cannot be empty")
Expand Down
4 changes: 2 additions & 2 deletions apply/processor/gen.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"fmt"
"strconv"

"github.com/sealerio/sealer/utils/yaml"
"github.com/sealerio/sealer/pkg/clusterfile"

"github.com/sealerio/sealer/utils/net"

Expand Down Expand Up @@ -74,7 +74,7 @@ func NewGenerateProcessor() (Processor, error) {

func (g *GenerateProcessor) init(cluster *v2.Cluster) error {
fileName := fmt.Sprintf("%s/.sealer/%s/Clusterfile", common.GetHomeDir(), cluster.Name)
if err := yaml.MarshalToFile(fileName, cluster); err != nil {
if err := clusterfile.SaveToDisk(cluster, fileName); err != nil {
return err
}
return nil
Expand Down
30 changes: 19 additions & 11 deletions apply/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,29 +49,37 @@ func (c *ClusterArgs) SetClusterArgs() error {
c.cluster.Kind = common.Cluster
c.cluster.Name = c.runArgs.ClusterName
c.cluster.Spec.Image = c.imageName
c.cluster.Spec.SSH.User = c.runArgs.User
c.cluster.Spec.SSH.Pk = c.runArgs.Pk
c.cluster.Spec.SSH.PkPasswd = c.runArgs.PkPassword
c.cluster.Spec.SSH.Port = strconv.Itoa(int(c.runArgs.Port))
c.cluster.Spec.Env = append(c.cluster.Spec.Env, c.runArgs.CustomEnv...)
c.cluster.Spec.CMDArgs = append(c.cluster.Spec.CMDArgs, c.runArgs.CMDArgs...)
if c.runArgs.Password != "" {
c.cluster.Spec.SSH.Passwd = c.runArgs.Password
}

err := PreProcessIPList(c.runArgs)
if err != nil {
return err
}

if net.IsIPList(c.runArgs.Masters) && (net.IsIPList(c.runArgs.Nodes) || c.runArgs.Nodes == "") {
// add common ssh config.
c.cluster.Spec.SSH = v1.SSH{
User: c.runArgs.User,
Passwd: c.runArgs.Password,
Pk: c.runArgs.Pk,
PkPasswd: c.runArgs.PkPassword,
Port: strconv.Itoa(int(c.runArgs.Port)),
}

masters := strings.Split(c.runArgs.Masters, ",")
nodes := strings.Split(c.runArgs.Nodes, ",")
c.hosts = []v2.Host{}

c.setHostWithIpsPort(masters, common.MASTER)
if len(nodes) != 0 {
// If s does not contain sep and sep is not empty, Split returns a
// slice of length 1 whose only element is s.
if len(nodes) > 1 {
c.setHostWithIpsPort(nodes, common.NODE)
}
c.cluster.Spec.Hosts = c.hosts
} else {
// if user execute sealer run without password and infra info,choose local host ip as master0 ip.
ip, err := net.GetLocalDefaultIP()
if err != nil {
return err
Expand All @@ -92,14 +100,14 @@ func (c *ClusterArgs) setHostWithIpsPort(ips []string, role string) {
for i := range ips {
ip, port := net.GetHostIPAndPortOrDefault(ips[i], strconv.Itoa(int(c.runArgs.Port)))
if _, ok := hostMap[port]; !ok {
hostMap[port] = &v2.Host{IPS: []string{ip}, Roles: []string{role}, SSH: v1.SSH{Port: port}}
hostMap[port] = &v2.Host{IPS: []string{ip}, Roles: []string{role}}
continue
}
hostMap[port].IPS = append(hostMap[port].IPS, ip)
}
_, master0Port := net.GetHostIPAndPortOrDefault(ips[0], strconv.Itoa(int(c.runArgs.Port)))
for port, host := range hostMap {
host.IPS = removeIPListDuplicatesAndEmpty(host.IPS)
host.IPS = removeDuplicate(host.IPS)
if port == master0Port && role == common.MASTER {
c.hosts = append([]v2.Host{*host}, c.hosts...)
continue
Expand All @@ -117,5 +125,5 @@ func NewApplierFromArgs(imageName string, runArgs *Args) (applydriver.Interface,
if err := c.SetClusterArgs(); err != nil {
return nil, err
}
return NewApplier(c.cluster)
return NewDefaultApplier(c.cluster)
}
128 changes: 90 additions & 38 deletions apply/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,13 @@ package apply

import (
"fmt"
"strconv"
"strings"

"github.com/sealerio/sealer/utils/hash"

v1 "github.com/sealerio/sealer/types/api/v1"

"github.com/sealerio/sealer/utils/yaml"

"github.com/sealerio/sealer/utils/net"
Expand Down Expand Up @@ -49,68 +54,109 @@ func NewScaleApplierFromArgs(clusterfile string, scaleArgs *Args, flag string) (
return nil, err
}

/* if err := utils.MarshalYamlToFile(clusterfile, cluster); err != nil {
return nil, err
}*/
applier, err := NewApplier(cluster)
applier, err := NewDefaultApplier(cluster)
if err != nil {
return nil, err
}
return applier, nil
}

func Join(cluster *v2.Cluster, scalingArgs *Args) error {
/* switch cluster.Spec.Provider {
case common.BAREMETAL:
return joinBaremetalNodes(cluster, scalingArgs)
case common.AliCloud:
return joinInfraNodes(cluster, scalingArgs)
case common.CONTAINER:
return joinInfraNodes(cluster, scalingArgs)
default:
return fmt.Errorf(" clusterfile provider type is not found !")
}*/
return joinBaremetalNodes(cluster, scalingArgs)
func Join(cluster *v2.Cluster, scaleArgs *Args) error {
return joinBaremetalNodes(cluster, scaleArgs)
}

func joinBaremetalNodes(cluster *v2.Cluster, scaleArgs *Args) error {
if err := PreProcessIPList(scaleArgs); err != nil {
var err error
// merge custom Env to the existed cluster
cluster.Spec.Env = append(cluster.Spec.Env, scaleArgs.CustomEnv...)

if err = PreProcessIPList(scaleArgs); err != nil {
return err
}

if (!net.IsIPList(scaleArgs.Nodes) && scaleArgs.Nodes != "") || (!net.IsIPList(scaleArgs.Masters) && scaleArgs.Masters != "") {
return fmt.Errorf(" Parameter error: The current mode should submit iplist!")
}

if scaleArgs.Masters != "" && net.IsIPList(scaleArgs.Masters) {
for i := 0; i < len(cluster.Spec.Hosts); i++ {
role := cluster.Spec.Hosts[i].Roles
if !strUtils.NotIn(common.MASTER, role) {
cluster.Spec.Hosts[i].IPS = removeIPListDuplicatesAndEmpty(append(cluster.Spec.Hosts[i].IPS, strings.Split(scaleArgs.Masters, ",")...))
break
}
if i == len(cluster.Spec.Hosts)-1 {
return fmt.Errorf("not found `master` role from file")
}
// if scaleArgs`s ssh auth credential is different from local cluster,will add it to each host.
// if not use local cluster ssh auth credential.
var changedSSH *v1.SSH

passwd := cluster.Spec.SSH.Passwd
if cluster.Spec.SSH.Encrypted {
passwd, err = hash.AesDecrypt([]byte(cluster.Spec.SSH.Passwd))
if err != nil {
return err
}
}
//add join node
if scaleArgs.Nodes != "" && net.IsIPList(scaleArgs.Nodes) {
for i := 0; i < len(cluster.Spec.Hosts); i++ {
role := cluster.Spec.Hosts[i].Roles
if !strUtils.NotIn(common.NODE, role) {
cluster.Spec.Hosts[i].IPS = removeIPListDuplicatesAndEmpty(append(cluster.Spec.Hosts[i].IPS, strings.Split(scaleArgs.Nodes, ",")...))
break

if scaleArgs.Password != "" && scaleArgs.Password != passwd {
// Encrypt password here to avoid merge failed.
passwd, err = hash.AesEncrypt([]byte(scaleArgs.Password))
if err != nil {
return err
}
changedSSH = &v1.SSH{
Encrypted: true,
User: scaleArgs.User,
Passwd: passwd,
Pk: scaleArgs.Pk,
PkPasswd: scaleArgs.PkPassword,
Port: strconv.Itoa(int(scaleArgs.Port)),
}
}

//add joined masters
if scaleArgs.Masters != "" {
masterIPs := cluster.GetMasterIPList()
addedMasterIP := removeDuplicate(strings.Split(scaleArgs.Masters, ","))

for _, ip := range addedMasterIP {
// if ip already taken by master will return join duplicated ip error
if !strUtils.NotIn(ip, masterIPs) {
return fmt.Errorf("failed to scale master for duplicated ip: %s", ip)
}
if i == len(cluster.Spec.Hosts)-1 {
hosts := v2.Host{IPS: removeIPListDuplicatesAndEmpty(strings.Split(scaleArgs.Nodes, ",")), Roles: []string{common.NODE}}
cluster.Spec.Hosts = append(cluster.Spec.Hosts, hosts)
}

host := v2.Host{
IPS: addedMasterIP,
Roles: []string{common.MASTER},
}

if changedSSH != nil {
host.SSH = *changedSSH
}

cluster.Spec.Hosts = append(cluster.Spec.Hosts, host)
}

//add joined nodes
if scaleArgs.Nodes != "" {
nodeIPs := cluster.GetNodeIPList()
addedNodeIP := removeDuplicate(strings.Split(scaleArgs.Nodes, ","))

for _, ip := range addedNodeIP {
// if ip already taken by node will return join duplicated ip error
if !strUtils.NotIn(ip, nodeIPs) {
return fmt.Errorf("failed to scale node for duplicated ip: %s", ip)
}
}

host := v2.Host{
IPS: addedNodeIP,
Roles: []string{common.NODE},
}

if changedSSH != nil {
host.SSH = *changedSSH
}

cluster.Spec.Hosts = append(cluster.Spec.Hosts, host)
}
return nil
}

func removeIPListDuplicatesAndEmpty(ipList []string) []string {
func removeDuplicate(ipList []string) []string {
return strUtils.RemoveDuplicate(strUtils.NewComparator(ipList, []string{""}).GetSrcSubtraction())
}

Expand All @@ -119,16 +165,22 @@ func Delete(cluster *v2.Cluster, scaleArgs *Args) error {
}

func deleteBaremetalNodes(cluster *v2.Cluster, scaleArgs *Args) error {
// adding custom Env params for delete option here to support executing users clean scripts via env.
cluster.Spec.Env = append(cluster.Spec.Env, scaleArgs.CustomEnv...)

if err := PreProcessIPList(scaleArgs); err != nil {
return err
}

if (!net.IsIPList(scaleArgs.Nodes) && scaleArgs.Nodes != "") || (!net.IsIPList(scaleArgs.Masters) && scaleArgs.Masters != "") {
return fmt.Errorf(" Parameter error: The current mode should submit iplist!")
}

//master0 machine cannot be deleted
if !strUtils.NotIn(cluster.GetMaster0IP(), strings.Split(scaleArgs.Masters, ",")) {
return fmt.Errorf("master0 machine cannot be deleted")
}

if scaleArgs.Masters != "" && net.IsIPList(scaleArgs.Masters) {
for i := range cluster.Spec.Hosts {
if !strUtils.NotIn(common.MASTER, cluster.Spec.Hosts[i].Roles) {
Expand Down
1 change: 1 addition & 0 deletions cmd/sealer/cmd/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ func init() {
deleteCmd.Flags().StringVarP(&deleteArgs.Nodes, "nodes", "n", "", "reduce Count or IPList to nodes")
deleteCmd.Flags().StringVarP(&deleteClusterFile, "Clusterfile", "f", "", "delete a kubernetes cluster with Clusterfile Annotations")
deleteCmd.Flags().StringVarP(&deleteClusterName, "cluster", "c", "", "delete a kubernetes cluster with cluster name")
deleteCmd.Flags().StringSliceVarP(&deleteArgs.CustomEnv, "env", "e", []string{}, "set custom environment variables")
deleteCmd.Flags().BoolVar(&runtime.ForceDelete, "force", false, "We also can input an --force flag to delete cluster by force")
deleteCmd.Flags().BoolP("all", "a", false, "this flags is for delete nodes, if this is true, empty all node ip")
}
9 changes: 9 additions & 0 deletions cmd/sealer/cmd/join.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
package cmd

import (
"github.com/sealerio/sealer/pkg/cert"
"github.com/spf13/cobra"

"github.com/sealerio/sealer/apply"
Expand Down Expand Up @@ -56,6 +57,14 @@ join default cluster:
func init() {
joinArgs = &apply.Args{}
rootCmd.AddCommand(joinCmd)

joinCmd.Flags().StringVarP(&joinArgs.User, "user", "u", "root", "set baremetal server username")
joinCmd.Flags().StringVarP(&joinArgs.Password, "passwd", "p", "", "set cloud provider or baremetal server password")
joinCmd.Flags().Uint16Var(&joinArgs.Port, "port", 22, "set the sshd service port number for the server (default port: 22)")
joinCmd.Flags().StringVar(&joinArgs.Pk, "pk", cert.GetUserHomeDir()+"/.ssh/id_rsa", "set baremetal server private key")
joinCmd.Flags().StringVar(&joinArgs.PkPassword, "pk-passwd", "", "set baremetal server private key password")
joinCmd.Flags().StringSliceVarP(&joinArgs.CustomEnv, "env", "e", []string{}, "set custom environment variables")

joinCmd.Flags().StringVarP(&joinArgs.Masters, "masters", "m", "", "set Count or IPList to masters")
joinCmd.Flags().StringVarP(&joinArgs.Nodes, "nodes", "n", "", "set Count or IPList to nodes")
joinCmd.Flags().StringVarP(&clusterName, "cluster-name", "c", "", "specify the name of cluster")
Expand Down
2 changes: 1 addition & 1 deletion cmd/sealer/cmd/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ var upgradeCmd = &cobra.Command{
if err != nil {
return err
}
applier, err := apply.NewApplier(desiredCluster)
applier, err := apply.NewDefaultApplier(desiredCluster)
if err != nil {
return err
}
Expand Down
36 changes: 33 additions & 3 deletions pkg/clusterfile/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,13 @@ import (
"path/filepath"
"strings"

"github.com/sealerio/sealer/utils/hash"

yamlUtils "github.com/sealerio/sealer/utils/yaml"

"github.com/sealerio/sealer/common"
"github.com/sealerio/sealer/pkg/cert"
v2 "github.com/sealerio/sealer/types/api/v2"
k8sRuntime "k8s.io/apimachinery/pkg/runtime"
)

var ErrClusterNotExist = fmt.Errorf("no cluster exist")
Expand Down Expand Up @@ -72,13 +73,42 @@ func GetDefaultCluster() (cluster *v2.Cluster, err error) {
return GetClusterFromFile(fmt.Sprintf("%s/.sealer/%s/Clusterfile", userHome, name))
}

func SaveToDisk(cluster k8sRuntime.Object, clusterName string) error {
// SaveToDisk save cluster obj to disk file with encrypted ssh credential
func SaveToDisk(cluster *v2.Cluster, clusterName string) error {
fileName := common.GetClusterWorkClusterfile(clusterName)
err := os.MkdirAll(filepath.Dir(fileName), os.ModePerm)
if err != nil {
return fmt.Errorf("mkdir failed %s %v", fileName, err)
}
cluster = cluster.DeepCopyObject()

// if user run cluster image without password,skip to encrypt.
if !cluster.Spec.SSH.Encrypted && cluster.Spec.SSH.Passwd != "" {
passwd, err := hash.AesEncrypt([]byte(cluster.Spec.SSH.Passwd))
if err != nil {
return err
}
cluster.Spec.SSH.Passwd = passwd
cluster.Spec.SSH.Encrypted = true
}

var hosts []v2.Host
for _, host := range cluster.Spec.Hosts {
if len(host.IPS) == 0 {
continue
}
if !host.SSH.Encrypted && host.SSH.Passwd != "" {
passwd, err := hash.AesEncrypt([]byte(host.SSH.Passwd))
if err != nil {
return err
}
host.SSH.Passwd = passwd
host.SSH.Encrypted = true
}
hosts = append(hosts, host)
}

cluster.Spec.Hosts = hosts

err = yamlUtils.MarshalToFile(fileName, cluster)
if err != nil {
return fmt.Errorf("marshal cluster file failed %v", err)
Expand Down
Loading

0 comments on commit 4587b00

Please sign in to comment.