From a3e3834df7521d74c0cc55ee19c3dfafa0df51be Mon Sep 17 00:00:00 2001 From: Lei Liu Date: Sun, 15 May 2022 20:37:59 +0800 Subject: [PATCH] Cleanup: use verb %w with fmt.Errorf() when generate new wrapped error from error --- .../app/controllermanager.go | 2 +- cmd/yurt-tunnel-server/app/options/options.go | 2 +- cmd/yurt-tunnel-server/app/start.go | 2 +- cmd/yurthub/app/options/options.go | 6 ++-- cmd/yurthub/app/start.go | 20 +++++------ .../kubernetes/controller/controller_utils.go | 14 ++++---- .../controller/util/node/controller_utils.go | 2 +- .../node_lifecycle_controller.go | 12 +++---- .../nodelifecycle/scheduler/taint_manager.go | 4 +-- pkg/controller/util/node/controller_utils.go | 14 ++++---- pkg/node-servant/components/yurthub.go | 2 +- pkg/preflight/checks.go | 4 +-- pkg/util/certmanager/certmanager.go | 4 +-- pkg/util/certmanager/pki.go | 4 +-- pkg/util/iptables/iptables.go | 30 +++++++++------- pkg/util/iptables/iptables_linux.go | 6 ++-- pkg/yurtctl/cmd/reset/phases/unmount.go | 1 + pkg/yurtctl/cmd/yurtinit/init.go | 4 +-- .../cmd/yurttest/kindinit/converter.go | 12 +++---- pkg/yurtctl/cmd/yurttest/kindinit/init.go | 2 +- .../cmd/yurttest/kindinit/kindoperator.go | 2 +- .../kubeadm/app/constants/constants_unix.go | 1 + .../app/constants/constants_windows.go | 1 + .../kubeadm/app/phases/kubelet/flags_unix.go | 1 + .../kubeadm/app/preflight/checks.go | 2 +- .../kubeadm/app/preflight/checks_unix.go | 1 + .../kubeadm/app/preflight/checks_windows.go | 1 + .../app/util/initsystem/initsystem_unix.go | 3 +- .../app/util/initsystem/initsystem_windows.go | 31 +++++++++-------- .../app/util/kubeconfig/kubeconfig_test.go | 3 +- .../kubeadm/app/util/runtime/runtime_unix.go | 1 + .../app/util/runtime/runtime_windows.go | 1 + .../apis/config/v1beta1/defaults_linux.go | 1 + .../apis/config/v1beta1/defaults_others.go | 1 + .../config/v1beta1/zz_generated.conversion.go | 1 + .../config/v1beta1/zz_generated.deepcopy.go | 1 + .../config/v1beta1/zz_generated.defaults.go | 1 + .../apis/config/zz_generated.deepcopy.go | 1 + .../kubelet/kubeletconfig/util/codec/codec.go | 2 +- pkg/yurtctl/util/edgenode/util.go | 6 ++-- pkg/yurtctl/util/kubernetes/apply_addons.go | 6 ++-- pkg/yurtctl/util/kubernetes/util.go | 34 +++++++++---------- pkg/yurtctl/util/system/util.go | 8 ++--- pkg/yurtctl/util/util.go | 3 +- pkg/yurthub/cachemanager/cache_manager.go | 13 +++---- .../cachemanager/cache_manager_test.go | 9 ++--- .../cachemanager/storage_wrapper_test.go | 5 +-- pkg/yurthub/certificate/hubself/cert_mgr.go | 11 +++--- pkg/yurthub/filter/filter.go | 3 +- pkg/yurthub/healthchecker/node_lease.go | 2 +- pkg/yurthub/proxy/local/local.go | 19 ++++++----- pkg/yurthub/proxy/remote/remote.go | 3 +- pkg/yurthub/proxy/util/util_test.go | 3 +- pkg/yurthub/storage/disk/storage.go | 2 +- pkg/yurthub/storage/disk/storage_test.go | 17 +++++----- pkg/yurthub/util/util_test.go | 7 ++-- pkg/yurttunnel/kubernetes/kubernetes.go | 2 +- pkg/yurttunnel/server/anpserver.go | 10 +++--- pkg/yurttunnel/server/interceptor.go | 6 ++-- pkg/yurttunnel/trafficforward/dns/dns.go | 12 +++---- pkg/yurttunnel/trafficforward/dns/handler.go | 2 +- .../trafficforward/iptables/iptables.go | 2 +- pkg/yurttunnel/util/util.go | 2 +- pkg/yurttunnel/util/util_test.go | 3 +- test/e2e/util/util.go | 4 +-- 65 files changed, 215 insertions(+), 182 deletions(-) diff --git a/cmd/yurt-controller-manager/app/controllermanager.go b/cmd/yurt-controller-manager/app/controllermanager.go index e49378c440c..82ed80815ab 100644 --- a/cmd/yurt-controller-manager/app/controllermanager.go +++ b/cmd/yurt-controller-manager/app/controllermanager.go @@ -319,7 +319,7 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien // If apiserver is not running we should wait for some time and fail only then. This is particularly // important when we start apiserver and controller manager at the same time. if err := genericcontrollermanager.WaitForAPIServer(versionedClient, 10*time.Second); err != nil { - return ControllerContext{}, fmt.Errorf("failed to wait for apiserver being healthy: %v", err) + return ControllerContext{}, fmt.Errorf("failed to wait for apiserver being healthy: %w", err) } ctx := ControllerContext{ diff --git a/cmd/yurt-tunnel-server/app/options/options.go b/cmd/yurt-tunnel-server/app/options/options.go index 8bf6ae23527..1dfb23603ea 100644 --- a/cmd/yurt-tunnel-server/app/options/options.go +++ b/cmd/yurt-tunnel-server/app/options/options.go @@ -142,7 +142,7 @@ func (o *ServerOptions) Config() (*config.Config, error) { cfg.ListenMetaAddr = net.JoinHostPort(o.InsecureBindAddr, o.MetaPort) cfg.RootCert, err = certmanager.GenRootCertPool(o.KubeConfig, constants.YurttunnelCAFile) if err != nil { - return nil, fmt.Errorf("fail to generate the rootCertPool: %s", err) + return nil, fmt.Errorf("fail to generate the rootCertPool: %w", err) } // function 'kubeutil.CreateClientSet' will try to create the clientset diff --git a/cmd/yurt-tunnel-server/app/start.go b/cmd/yurt-tunnel-server/app/start.go index d8a7ee4a38f..bc4fb7b43f2 100644 --- a/cmd/yurt-tunnel-server/app/start.go +++ b/cmd/yurt-tunnel-server/app/start.go @@ -87,7 +87,7 @@ func Run(cfg *config.CompletedConfig, stopCh <-chan struct{}) error { cfg.ListenAddrForMaster, cfg.DNSSyncPeriod) if err != nil { - return fmt.Errorf("fail to create a new dnsController, %v", err) + return fmt.Errorf("fail to create a new dnsController, %w", err) } go dnsController.Run(stopCh) } diff --git a/cmd/yurthub/app/options/options.go b/cmd/yurthub/app/options/options.go index 36ccb761bbc..cee8d1d10a6 100644 --- a/cmd/yurthub/app/options/options.go +++ b/cmd/yurthub/app/options/options.go @@ -126,7 +126,7 @@ func ValidateOptions(options *YurtHubOptions) error { } if err := verifyDummyIP(options.HubAgentDummyIfIP); err != nil { - return fmt.Errorf("dummy ip %s is not invalid, %v", options.HubAgentDummyIfIP, err) + return fmt.Errorf("dummy ip %s is not invalid, %w", options.HubAgentDummyIfIP, err) } return nil @@ -178,7 +178,7 @@ func verifyDummyIP(dummyIP string) error { _, dummyIfIPNet, err := net.ParseCIDR(DummyIfCIDR) if err != nil { - return fmt.Errorf("cidr(%s) is invalid, %v", DummyIfCIDR, err) + return fmt.Errorf("cidr(%s) is invalid, %w", DummyIfCIDR, err) } if !dummyIfIPNet.Contains(dip) { @@ -187,7 +187,7 @@ func verifyDummyIP(dummyIP string) error { _, exclusiveIPNet, err := net.ParseCIDR(ExclusiveCIDR) if err != nil { - return fmt.Errorf("cidr(%s) is invalid, %v", ExclusiveCIDR, err) + return fmt.Errorf("cidr(%s) is invalid, %w", ExclusiveCIDR, err) } if exclusiveIPNet.Contains(dip) { diff --git a/cmd/yurthub/app/start.go b/cmd/yurthub/app/start.go index f3dd6a0e5ef..a3a79c5d092 100644 --- a/cmd/yurthub/app/start.go +++ b/cmd/yurthub/app/start.go @@ -90,14 +90,14 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { klog.Infof("%d. create cert manager with %s mode", trace, cfg.CertMgrMode) certManager, err := cmr.New(cfg.CertMgrMode, cfg) if err != nil { - return fmt.Errorf("could not create certificate manager, %v", err) + return fmt.Errorf("could not create certificate manager, %w", err) } trace++ klog.Infof("%d. new transport manager", trace) transportManager, err := transport.NewTransportManager(certManager, stopCh) if err != nil { - return fmt.Errorf("could not new transport manager, %v", err) + return fmt.Errorf("could not new transport manager, %w", err) } trace++ @@ -106,7 +106,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { klog.Infof("%d. create health checker for remote servers ", trace) healthChecker, err = healthchecker.NewHealthChecker(cfg, transportManager, stopCh) if err != nil { - return fmt.Errorf("could not new health checker, %v", err) + return fmt.Errorf("could not new health checker, %w", err) } } else { klog.Infof("%d. disable health checker for node %s because it is a cloud node", trace, cfg.NodeName) @@ -120,14 +120,14 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { klog.Infof("%d. new restConfig manager for %s mode", trace, cfg.CertMgrMode) restConfigMgr, err := rest.NewRestConfigManager(cfg, certManager, healthChecker) if err != nil { - return fmt.Errorf("could not new restConfig manager, %v", err) + return fmt.Errorf("could not new restConfig manager, %w", err) } trace++ klog.Infof("%d. create tls config for secure servers ", trace) cfg.TLSConfig, err = server.GenUseCertMgrAndTLSConfig(restConfigMgr, certManager, filepath.Join(cfg.RootDir, "pki"), cfg.YurtHubProxyServerSecureDummyAddr, stopCh) if err != nil { - return fmt.Errorf("could not create tls config, %v", err) + return fmt.Errorf("could not create tls config, %w", err) } trace++ @@ -136,7 +136,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { klog.Infof("%d. new cache manager with storage wrapper and serializer manager", trace) cacheMgr, err = cachemanager.NewCacheManager(cfg.StorageWrapper, cfg.SerializerManager, cfg.RESTMapperManager, cfg.SharedFactory) if err != nil { - return fmt.Errorf("could not new cache manager, %v", err) + return fmt.Errorf("could not new cache manager, %w", err) } } else { klog.Infof("%d. disable cache manager for node %s because it is a cloud node", trace, cfg.NodeName) @@ -147,7 +147,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { klog.Infof("%d. new gc manager for node %s, and gc frequency is a random time between %d min and %d min", trace, cfg.NodeName, cfg.GCFrequency, 3*cfg.GCFrequency) gcMgr, err := gc.NewGCManager(cfg, restConfigMgr, stopCh) if err != nil { - return fmt.Errorf("could not new gc manager, %v", err) + return fmt.Errorf("could not new gc manager, %w", err) } gcMgr.Run() } else { @@ -163,7 +163,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { yurtProxyHandler, err := proxy.NewYurtReverseProxyHandler(cfg, cacheMgr, transportManager, healthChecker, certManager, tenantMgr, stopCh) if err != nil { - return fmt.Errorf("could not create reverse proxy handler, %v", err) + return fmt.Errorf("could not create reverse proxy handler, %w", err) } trace++ @@ -171,7 +171,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { klog.Infof("%d. create dummy network interface %s and init iptables manager", trace, cfg.HubAgentDummyIfName) networkMgr, err := network.NewNetworkManager(cfg) if err != nil { - return fmt.Errorf("could not create network manager, %v", err) + return fmt.Errorf("could not create network manager, %w", err) } networkMgr.Run(stopCh) trace++ @@ -185,7 +185,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { klog.Infof("%d. new %s server and begin to serve, proxy server: %s, secure proxy server: %s, hub server: %s", trace, projectinfo.GetHubName(), cfg.YurtHubProxyServerAddr, cfg.YurtHubProxyServerSecureAddr, cfg.YurtHubServerAddr) s, err := server.NewYurtHubServer(cfg, certManager, yurtProxyHandler) if err != nil { - return fmt.Errorf("could not create hub server, %v", err) + return fmt.Errorf("could not create hub server, %w", err) } s.Run() klog.Infof("hub agent exited") diff --git a/pkg/controller/kubernetes/controller/controller_utils.go b/pkg/controller/kubernetes/controller/controller_utils.go index 1d77dcf6c73..04107c70471 100644 --- a/pkg/controller/kubernetes/controller/controller_utils.go +++ b/pkg/controller/kubernetes/controller/controller_utils.go @@ -153,7 +153,7 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { oldData, err := json.Marshal(oldNode) if err != nil { - return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) + return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err) } newTaints := newNode.Spec.Taints @@ -161,12 +161,12 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n newNodeClone.Spec.Taints = newTaints newData, err := json.Marshal(newNodeClone) if err != nil { - return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) + return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) if err != nil { - return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) + return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err) } _, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) @@ -201,18 +201,18 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la oldData, err := json.Marshal(node) if err != nil { - return fmt.Errorf("failed to marshal the existing node %#v: %v", node, err) + return fmt.Errorf("failed to marshal the existing node %#v: %w", node, err) } newData, err := json.Marshal(newNode) if err != nil { - return fmt.Errorf("failed to marshal the new node %#v: %v", newNode, err) + return fmt.Errorf("failed to marshal the new node %#v: %w", newNode, err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{}) if err != nil { - return fmt.Errorf("failed to create a two-way merge patch: %v", err) + return fmt.Errorf("failed to create a two-way merge patch: %w", err) } if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return fmt.Errorf("failed to patch the node: %v", err) + return fmt.Errorf("failed to patch the node: %w", err) } return nil }) diff --git a/pkg/controller/kubernetes/controller/util/node/controller_utils.go b/pkg/controller/kubernetes/controller/util/node/controller_utils.go index 04f49e9c72d..429cb85a437 100644 --- a/pkg/controller/kubernetes/controller/util/node/controller_utils.go +++ b/pkg/controller/kubernetes/controller/util/node/controller_utils.go @@ -57,7 +57,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record. if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil { if apierrors.IsConflict(err) { updateErrList = append(updateErrList, - fmt.Errorf("update status failed for pod %q: %v", Pod(pod), err)) + fmt.Errorf("update status failed for pod %q: %w", Pod(pod), err)) continue } } diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 6d43357981d..ce0dd5fc15c 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -735,7 +735,7 @@ func (nc *Controller) doEvictionPass() { nodeUID, _ := value.UID.(string) pods, err := nc.getPodsAssignedToNode(value.Value) if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %v", value.Value, err)) + utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %w", value.Value, err)) return false, 0 } remaining, err := nodeutil.DeletePods(nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore) @@ -743,7 +743,7 @@ func (nc *Controller) doEvictionPass() { // We are not setting eviction status here. // New pods will be handled by zonePodEvictor retry // instead of immediate pod eviction. - utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) + utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %w", value.Value, err)) return false, 0 } if !nc.nodeEvictionMap.setStatus(value.Value, evicted) { @@ -829,7 +829,7 @@ func (nc *Controller) monitorNodeHealth() error { if currentReadyCondition != nil { pods, err := nc.getPodsAssignedToNode(node.Name) if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to list pods of node %v: %v", node.Name, err)) + utilruntime.HandleError(fmt.Errorf("unable to list pods of node %v: %w", node.Name, err)) if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue { // If error happened during node status transition (Ready -> NotReady) // we need to mark node for retry to force MarkPodsNotReady execution @@ -842,7 +842,7 @@ func (nc *Controller) monitorNodeHealth() error { nc.processTaintBaseEviction(node, &observedReadyCondition) } else { if err := nc.processNoTaintBaseEviction(node, &observedReadyCondition, gracePeriod, pods); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to evict all pods from node %v: %v; queuing for retry", node.Name, err)) + utilruntime.HandleError(fmt.Errorf("unable to evict all pods from node %v: %w; queuing for retry", node.Name, err)) } } @@ -854,7 +854,7 @@ func (nc *Controller) monitorNodeHealth() error { fallthrough case needsRetry && observedReadyCondition.Status != v1.ConditionTrue: if err = nodeutil.MarkPodsNotReady(nc.kubeClient, pods, node.Name, node); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err)) + utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %w; queuing for retry", node.Name, err)) nc.nodesToRetry.Store(node.Name, struct{}{}) continue } @@ -1450,7 +1450,7 @@ func (nc *Controller) evictPods(node *v1.Node, pods []*v1.Pod) (bool, error) { // Handling immediate pod deletion. _, err := nodeutil.DeletePods(nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore) if err != nil { - return false, fmt.Errorf("unable to delete pods from node %q: %v", node.Name, err) + return false, fmt.Errorf("unable to delete pods from node %q: %w", node.Name, err) } return false, nil } diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index 34a7e023243..5de3cf6eceb 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -404,7 +404,7 @@ func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) { tc.cancelWorkWithEvent(podNamespacedName) return } - utilruntime.HandleError(fmt.Errorf("could not get pod %s/%s: %v", podUpdate.podName, podUpdate.podNamespace, err)) + utilruntime.HandleError(fmt.Errorf("could not get pod %s/%s: %w", podUpdate.podName, podUpdate.podNamespace, err)) return } @@ -445,7 +445,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) { delete(tc.taintedNodes, nodeUpdate.nodeName) return } - utilruntime.HandleError(fmt.Errorf("cannot get node %s: %v", nodeUpdate.nodeName, err)) + utilruntime.HandleError(fmt.Errorf("cannot get node %s: %w", nodeUpdate.nodeName, err)) return } diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index 45e716ee84e..2e57c8d6997 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -68,7 +68,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record. if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil { if apierrors.IsConflict(err) { updateErrList = append(updateErrList, - fmt.Errorf("update status failed for pod %q: %v", ctlnode.Pod(pod), err)) + fmt.Errorf("update status failed for pod %q: %w", ctlnode.Pod(pod), err)) continue } } @@ -205,7 +205,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints if err != nil { utilruntime.HandleError( fmt.Errorf( - "unable to taint %+v unresponsive Node %q: %v", + "unable to taint %+v unresponsive Node %q: %w", taintsToAdd, node.Name, err)) @@ -217,7 +217,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints if err != nil { utilruntime.HandleError( fmt.Errorf( - "unable to remove %+v unneeded taint from unresponsive Node %q: %v", + "unable to remove %+v unneeded taint from unresponsive Node %q: %w", taintsToRemove, node.Name, err)) @@ -235,7 +235,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, labelsToUpdate map[ if err != nil { utilruntime.HandleError( fmt.Errorf( - "unable to update labels %+v for Node %q: %v", + "unable to update labels %+v for Node %q: %w", labelsToUpdate, node.Name, err)) @@ -250,7 +250,7 @@ func CreateAddNodeHandler(f func(node *v1.Node) error) func(obj interface{}) { return func(originalObj interface{}) { node := originalObj.(*v1.Node).DeepCopy() if err := f(node); err != nil { - utilruntime.HandleError(fmt.Errorf("Error while processing Node Add: %v", err)) + utilruntime.HandleError(fmt.Errorf("Error while processing Node Add: %w", err)) } } } @@ -262,7 +262,7 @@ func CreateUpdateNodeHandler(f func(oldNode, newNode *v1.Node) error) func(oldOb prevNode := origOldObj.(*v1.Node).DeepCopy() if err := f(prevNode, node); err != nil { - utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err)) + utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %w", err)) } } } @@ -287,7 +287,7 @@ func CreateDeleteNodeHandler(f func(node *v1.Node) error) func(obj interface{}) } node := originalNode.DeepCopy() if err := f(node); err != nil { - utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err)) + utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %w", err)) } } } diff --git a/pkg/node-servant/components/yurthub.go b/pkg/node-servant/components/yurthub.go index e4c91f8e0f2..2fae137120a 100644 --- a/pkg/node-servant/components/yurthub.go +++ b/pkg/node-servant/components/yurthub.go @@ -199,7 +199,7 @@ func pingClusterHealthz(client *http.Client, addr string) (bool, error) { b, err := io.ReadAll(resp.Body) defer resp.Body.Close() if err != nil { - return false, fmt.Errorf("failed to read response of cluster healthz, %v", err) + return false, fmt.Errorf("failed to read response of cluster healthz, %w", err) } if resp.StatusCode != http.StatusOK { diff --git a/pkg/preflight/checks.go b/pkg/preflight/checks.go index a3812b0f5c5..145a61ba46e 100644 --- a/pkg/preflight/checks.go +++ b/pkg/preflight/checks.go @@ -181,7 +181,7 @@ func (nc NodeServantJobCheck) Check() (warnings []error, errorList []error) { defer wg.Done() if err := kubeutil.RunJobAndCleanup(nc.cliSet, &entity, nc.waitServantJobTimeout, nc.checkServantJobPeriod); err != nil { - msg := fmt.Errorf("fail to run servant job(%s): %s\n", entity.GetName(), err) + msg := fmt.Errorf("fail to run servant job(%s): %w\n", entity.GetName(), err) res <- msg } else { klog.V(1).Infof("servant job(%s) has succeeded\n", entity.GetName()) @@ -273,7 +273,7 @@ func (dac DirExistingCheck) Check() (warnings, errorList []error) { defer f.Close() _, err = f.Readdirnames(1) - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, []error{errors.Errorf("%s is empty", dac.Path)} } return nil, nil diff --git a/pkg/util/certmanager/certmanager.go b/pkg/util/certmanager/certmanager.go index c5588f93e56..b230da613d0 100644 --- a/pkg/util/certmanager/certmanager.go +++ b/pkg/util/certmanager/certmanager.go @@ -191,7 +191,7 @@ func newCertManager( certificateStore, err := store.NewFileStoreWrapper(componentName, certDir, certDir, "", "") if err != nil { - return nil, fmt.Errorf("failed to initialize the server certificate store: %v", err) + return nil, fmt.Errorf("failed to initialize the server certificate store: %w", err) } getTemplate := func() *x509.CertificateRequest { @@ -223,7 +223,7 @@ func newCertManager( CertificateStore: certificateStore, }) if err != nil { - return nil, fmt.Errorf("failed to initialize server certificate manager: %v", err) + return nil, fmt.Errorf("failed to initialize server certificate manager: %w", err) } return certManager, nil diff --git a/pkg/util/certmanager/pki.go b/pkg/util/certmanager/pki.go index c50cc381c1e..ec3366781bb 100644 --- a/pkg/util/certmanager/pki.go +++ b/pkg/util/certmanager/pki.go @@ -74,7 +74,7 @@ func GenRootCertPool(kubeConfig, caFile string) (*x509.CertPool, error) { // load the root ca from the given kubeconfig file config, err := clientcmd.LoadFromFile(kubeConfig) if err != nil || config == nil { - return nil, fmt.Errorf("failed to load the kubeconfig file(%s), %v", + return nil, fmt.Errorf("failed to load the kubeconfig file(%s), %w", kubeConfig, err) } @@ -163,7 +163,7 @@ func GenCertPoolUseCA(caFile string) (*x509.CertPool, error) { if os.IsNotExist(err) { return nil, fmt.Errorf("CA file(%s) doesn't exist", caFile) } - return nil, fmt.Errorf("fail to stat the CA file(%s): %s", caFile, err) + return nil, fmt.Errorf("fail to stat the CA file(%s): %w", caFile, err) } caData, err := os.ReadFile(caFile) diff --git a/pkg/util/iptables/iptables.go b/pkg/util/iptables/iptables.go index bfc66ade8f4..3e92c561ae2 100644 --- a/pkg/util/iptables/iptables.go +++ b/pkg/util/iptables/iptables.go @@ -19,6 +19,7 @@ package iptables import ( "bytes" "context" + "errors" "fmt" "regexp" "strings" @@ -238,12 +239,13 @@ func (runner *runner) EnsureChain(table Table, chain Chain) (bool, error) { out, err := runner.run(opCreateChain, fullArgs) if err != nil { - if ee, ok := err.(utilexec.ExitError); ok { + var ee utilexec.ExitError + if errors.As(err, &ee) { if ee.Exited() && ee.ExitStatus() == 1 { return true, nil } } - return false, fmt.Errorf("error creating chain %q: %v: %s", chain, err, out) + return false, fmt.Errorf("error creating chain %q: %w: %s", chain, err, out) } return false, nil } @@ -257,7 +259,7 @@ func (runner *runner) FlushChain(table Table, chain Chain) error { out, err := runner.run(opFlushChain, fullArgs) if err != nil { - return fmt.Errorf("error flushing chain %q: %v: %s", chain, err, out) + return fmt.Errorf("error flushing chain %q: %w: %s", chain, err, out) } return nil } @@ -272,7 +274,7 @@ func (runner *runner) DeleteChain(table Table, chain Chain) error { // TODO: we could call iptables -S first, ignore the output and check for non-zero return (more like DeleteRule) out, err := runner.run(opDeleteChain, fullArgs) if err != nil { - return fmt.Errorf("error deleting chain %q: %v: %s", chain, err, out) + return fmt.Errorf("error deleting chain %q: %w: %s", chain, err, out) } return nil } @@ -293,7 +295,7 @@ func (runner *runner) EnsureRule(position RulePosition, table Table, chain Chain } out, err := runner.run(operation(position), fullArgs) if err != nil { - return false, fmt.Errorf("error appending rule: %v: %s", err, out) + return false, fmt.Errorf("error appending rule: %w: %s", err, out) } return false, nil } @@ -314,7 +316,7 @@ func (runner *runner) DeleteRule(table Table, chain Chain, args ...string) error } out, err := runner.run(opDeleteRule, fullArgs) if err != nil { - return fmt.Errorf("error deleting rule: %v: %s", err, out) + return fmt.Errorf("error deleting rule: %w: %s", err, out) } return nil } @@ -404,7 +406,7 @@ func (runner *runner) restoreInternal(args []string, data []byte, flush FlushFla cmd.SetStdin(bytes.NewBuffer(data)) b, err := cmd.CombinedOutput() if err != nil { - return fmt.Errorf("%v (%s)", err, b) + return fmt.Errorf("%w (%s)", err, b) } return nil } @@ -470,7 +472,7 @@ func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...st klog.V(1).Infof("running %s -t %s", iptablesSaveCmd, string(table)) out, err := runner.exec.Command(iptablesSaveCmd, "-t", string(table)).CombinedOutput() if err != nil { - return false, fmt.Errorf("error checking rule: %v", err) + return false, fmt.Errorf("error checking rule: %w", err) } // Sadly, iptables has inconsistent quoting rules for comments. Just remove all quotes. @@ -518,20 +520,21 @@ func (runner *runner) checkRuleUsingCheck(args []string) (bool, error) { defer cancel() out, err := runner.runContext(ctx, opCheckRule, args) - if ctx.Err() == context.DeadlineExceeded { + if errors.Is(ctx.Err(), context.DeadlineExceeded) { return false, fmt.Errorf("timed out while checking rules") } if err == nil { return true, nil } - if ee, ok := err.(utilexec.ExitError); ok { + var ee utilexec.ExitError + if errors.As(err, &ee) { // iptables uses exit(1) to indicate a failure of the operation, // as compared to a malformed commandline, for example. if ee.Exited() && ee.ExitStatus() == 1 { return false, nil } } - return false, fmt.Errorf("error checking rule: %v: %s", err, out) + return false, fmt.Errorf("error checking rule: %w: %s", err, out) } const ( @@ -638,7 +641,7 @@ func getIPTablesVersion(exec utilexec.Interface, protocol Protocol) (*utilversio } version, err := utilversion.ParseGeneric(match[1]) if err != nil { - return nil, fmt.Errorf("iptables version %q is not a valid version string: %v", match[1], err) + return nil, fmt.Errorf("iptables version %q is not a valid version string: %w", match[1], err) } return version, nil @@ -748,7 +751,8 @@ const iptablesStatusResourceProblem = 4 // problem" and was unable to attempt the request. In particular, this will be true if it // times out trying to get the iptables lock. func isResourceError(err error) bool { - if ee, isExitError := err.(utilexec.ExitError); isExitError { + var ee utilexec.ExitError + if errors.As(err, &ee) { return ee.ExitStatus() == iptablesStatusResourceProblem } return false diff --git a/pkg/util/iptables/iptables_linux.go b/pkg/util/iptables/iptables_linux.go index 94202e3de6b..237d67acb1f 100644 --- a/pkg/util/iptables/iptables_linux.go +++ b/pkg/util/iptables/iptables_linux.go @@ -69,7 +69,7 @@ func grabIptablesLocks(lockfilePath string) (iptablesLocker, error) { // Roughly duplicate iptables 1.6.x xtables_lock() function. l.lock16, err = os.OpenFile(lockfilePath, os.O_CREATE, 0600) if err != nil { - return nil, fmt.Errorf("failed to open iptables lock %s: %v", lockfilePath, err) + return nil, fmt.Errorf("failed to open iptables lock %s: %w", lockfilePath, err) } if err := wait.PollImmediate(200*time.Millisecond, 2*time.Second, func() (bool, error) { @@ -78,7 +78,7 @@ func grabIptablesLocks(lockfilePath string) (iptablesLocker, error) { } return true, nil }); err != nil { - return nil, fmt.Errorf("failed to acquire new iptables lock: %v", err) + return nil, fmt.Errorf("failed to acquire new iptables lock: %w", err) } // Roughly duplicate iptables 1.4.x xtables_lock() function. @@ -89,7 +89,7 @@ func grabIptablesLocks(lockfilePath string) (iptablesLocker, error) { } return true, nil }); err != nil { - return nil, fmt.Errorf("failed to acquire old iptables lock: %v", err) + return nil, fmt.Errorf("failed to acquire old iptables lock: %w", err) } success = true diff --git a/pkg/yurtctl/cmd/reset/phases/unmount.go b/pkg/yurtctl/cmd/reset/phases/unmount.go index ab0f81bb118..1796da898b9 100644 --- a/pkg/yurtctl/cmd/reset/phases/unmount.go +++ b/pkg/yurtctl/cmd/reset/phases/unmount.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/pkg/yurtctl/cmd/yurtinit/init.go b/pkg/yurtctl/cmd/yurtinit/init.go index 05a9e8404f5..4c7494f73b5 100644 --- a/pkg/yurtctl/cmd/yurtinit/init.go +++ b/pkg/yurtctl/cmd/yurtinit/init.go @@ -191,7 +191,7 @@ func CheckAndInstallSealer() error { if b, err := exec.Command("sealer", "version").CombinedOutput(); err == nil { info := make(map[string]string) if err := json.Unmarshal(b, &info); err != nil { - return fmt.Errorf("Can't get the existing sealer version: %v", err) + return fmt.Errorf("Can't get the existing sealer version: %w", err) } sealerVersion := info["gitVersion"] if strutil.IsInStringLst(ValidSealerVersions, sealerVersion) { @@ -209,7 +209,7 @@ func CheckAndInstallSealer() error { savePath := fmt.Sprintf("%s/sealer-%s-linux-%s.tar.gz", TmpDownloadDir, DefaultSealerVersion, runtime.GOARCH) klog.V(1).Infof("Download sealer from: %s", packageUrl) if err := util.DownloadFile(packageUrl, savePath, 3); err != nil { - return fmt.Errorf("Download sealer fail: %v", err) + return fmt.Errorf("Download sealer fail: %w", err) } if err := util.Untar(savePath, TmpDownloadDir); err != nil { return err diff --git a/pkg/yurtctl/cmd/yurttest/kindinit/converter.go b/pkg/yurtctl/cmd/yurttest/kindinit/converter.go index f703f404630..797defb967e 100644 --- a/pkg/yurtctl/cmd/yurttest/kindinit/converter.go +++ b/pkg/yurtctl/cmd/yurttest/kindinit/converter.go @@ -105,13 +105,13 @@ func (c *ClusterConverter) Run() error { func (c *ClusterConverter) labelEdgeNodes() error { nodeLst, err := c.ClientSet.CoreV1().Nodes().List(context.Background(), v1.ListOptions{}) if err != nil { - return fmt.Errorf("failed to list nodes, %s", err) + return fmt.Errorf("failed to list nodes, %w", err) } for _, node := range nodeLst.Items { isEdge := strutil.IsInStringLst(c.EdgeNodes, node.Name) if _, err = kubeutil.AddEdgeWorkerLabelAndAutonomyAnnotation( c.ClientSet, &node, strconv.FormatBool(isEdge), "false"); err != nil { - return fmt.Errorf("failed to add label to edge node %s, %s", node.Name, err) + return fmt.Errorf("failed to add label to edge node %s, %w", node.Name, err) } } return nil @@ -206,7 +206,7 @@ func prepareYurthubStart(cliSet *kubernetes.Clientset, kcfg string) (string, err // prepare join-token for yurthub joinToken, err := kubeutil.GetOrCreateJoinTokenString(cliSet) if err != nil || joinToken == "" { - return "", fmt.Errorf("fail to get join token: %v", err) + return "", fmt.Errorf("fail to get join token: %w", err) } return joinToken, nil } @@ -217,13 +217,13 @@ func prepareClusterInfoConfigMap(client *kubernetes.Clientset, file string) erro if err != nil && apierrors.IsNotFound(err) { // Create the cluster-info ConfigMap with the associated RBAC rules if err := kubeadmapi.CreateBootstrapConfigMapIfNotExists(client, file); err != nil { - return fmt.Errorf("error creating bootstrap ConfigMap, %v", err) + return fmt.Errorf("error creating bootstrap ConfigMap, %w", err) } if err := kubeadmapi.CreateClusterInfoRBACRules(client); err != nil { - return fmt.Errorf("error creating clusterinfo RBAC rules, %v", err) + return fmt.Errorf("error creating clusterinfo RBAC rules, %w", err) } } else if err != nil || info == nil { - return fmt.Errorf("fail to get configmap, %v", err) + return fmt.Errorf("fail to get configmap, %w", err) } else { klog.V(4).Infof("%s/%s configmap already exists, skip to prepare it", info.Namespace, info.Name) } diff --git a/pkg/yurtctl/cmd/yurttest/kindinit/init.go b/pkg/yurtctl/cmd/yurttest/kindinit/init.go index fb868108dac..b10413b9e2b 100644 --- a/pkg/yurtctl/cmd/yurttest/kindinit/init.go +++ b/pkg/yurtctl/cmd/yurttest/kindinit/init.go @@ -374,7 +374,7 @@ func (i *Initializer) loadImagesToKindNodes(images, nodes []string) error { func getGoBinPath() (string, error) { gopath, err := exec.Command("bash", "-c", "go env GOPATH").CombinedOutput() if err != nil { - return "", fmt.Errorf("failed to get GOPATH, %s", err) + return "", fmt.Errorf("failed to get GOPATH, %w", err) } return filepath.Join(string(gopath), "bin"), nil } diff --git a/pkg/yurtctl/cmd/yurttest/kindinit/kindoperator.go b/pkg/yurtctl/cmd/yurttest/kindinit/kindoperator.go index 774fe9e8bf3..34dbd20220a 100644 --- a/pkg/yurtctl/cmd/yurttest/kindinit/kindoperator.go +++ b/pkg/yurtctl/cmd/yurttest/kindinit/kindoperator.go @@ -103,7 +103,7 @@ func (k *KindOperator) KindInstall() error { } minorVer, err := k.goMinorVersion() if err != nil { - return fmt.Errorf("failed to get go minor version, %s", err) + return fmt.Errorf("failed to get go minor version, %w", err) } installCMD := k.getInstallCmd(minorVer, defaultKindVersion) klog.V(1).Infof("start to install kind, running command: %s", installCMD) diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_unix.go index 16ff72d5556..812faf064b5 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_unix.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go index 6daae0a1fff..1a44a82723d 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/constants/constants_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go index 17b13366605..b2e00179e4a 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/phases/kubelet/flags_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go index 01862fafd32..60ef215d53d 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks.go @@ -235,7 +235,7 @@ func (dac DirAvailableCheck) Check() (warnings, errorList []error) { defer f.Close() _, err = f.Readdirnames(1) - if err != io.EOF { + if !errors.Is(err, io.EOF) { return nil, []error{errors.Errorf("%s is not empty", dac.Path)} } diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go index 2bbbd9ca2d3..5a9da3b7c44 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go index 3076480d2f2..5828fb96ff8 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/preflight/checks_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go index 5cbf9099e75..f1778eff330 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* @@ -86,7 +87,7 @@ func (sysd SystemdInitSystem) EnableCommand(service string) string { // reloadSystemd reloads the systemd daemon func (sysd SystemdInitSystem) reloadSystemd() error { if err := exec.Command("systemctl", "daemon-reload").Run(); err != nil { - return fmt.Errorf("failed to reload systemd: %v", err) + return fmt.Errorf("failed to reload systemd: %w", err) } return nil } diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go index 394272ddcb3..fb1dd8a8b99 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/initsystem/initsystem_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* @@ -45,14 +46,14 @@ func (sysd WindowsInitSystem) ServiceStart(service string) error { s, err := m.OpenService(service) if err != nil { - return fmt.Errorf("could not access service %s: %v", service, err) + return fmt.Errorf("could not access service %s: %w", service, err) } defer s.Close() // Check if service is already started status, err := s.Query() if err != nil { - return fmt.Errorf("could not query service %s: %v", service, err) + return fmt.Errorf("could not query service %s: %w", service, err) } if status.State != svc.Stopped && status.State != svc.StopPending { @@ -67,20 +68,20 @@ func (sysd WindowsInitSystem) ServiceStart(service string) error { time.Sleep(300 * time.Millisecond) status, err = s.Query() if err != nil { - return fmt.Errorf("could not retrieve %s service status: %v", service, err) + return fmt.Errorf("could not retrieve %s service status: %w", service, err) } } // Start the service err = s.Start("is", "manual-started") if err != nil { - return fmt.Errorf("could not start service %s: %v", service, err) + return fmt.Errorf("could not start service %s: %w", service, err) } // Check that the start was successful status, err = s.Query() if err != nil { - return fmt.Errorf("could not query service %s: %v", service, err) + return fmt.Errorf("could not query service %s: %w", service, err) } timeout = time.Now().Add(10 * time.Second) for status.State != svc.Running { @@ -90,7 +91,7 @@ func (sysd WindowsInitSystem) ServiceStart(service string) error { time.Sleep(300 * time.Millisecond) status, err = s.Query() if err != nil { - return fmt.Errorf("could not retrieve %s service status: %v", service, err) + return fmt.Errorf("could not retrieve %s service status: %w", service, err) } } return nil @@ -99,10 +100,10 @@ func (sysd WindowsInitSystem) ServiceStart(service string) error { // ServiceRestart tries to reload the environment and restart the specific service func (sysd WindowsInitSystem) ServiceRestart(service string) error { if err := sysd.ServiceStop(service); err != nil { - return fmt.Errorf("couldn't stop service %s: %v", service, err) + return fmt.Errorf("couldn't stop service %s: %w", service, err) } if err := sysd.ServiceStart(service); err != nil { - return fmt.Errorf("couldn't start service %s: %v", service, err) + return fmt.Errorf("couldn't start service %s: %w", service, err) } return nil @@ -119,14 +120,14 @@ func (sysd WindowsInitSystem) ServiceStop(service string) error { s, err := m.OpenService(service) if err != nil { - return fmt.Errorf("could not access service %s: %v", service, err) + return fmt.Errorf("could not access service %s: %w", service, err) } defer s.Close() // Check if service is already stopped status, err := s.Query() if err != nil { - return fmt.Errorf("could not query service %s: %v", service, err) + return fmt.Errorf("could not query service %s: %w", service, err) } if status.State == svc.Stopped { @@ -143,7 +144,7 @@ func (sysd WindowsInitSystem) ServiceStop(service string) error { time.Sleep(300 * time.Millisecond) status, err = s.Query() if err != nil { - return fmt.Errorf("could not retrieve %s service status: %v", service, err) + return fmt.Errorf("could not retrieve %s service status: %w", service, err) } } return nil @@ -152,13 +153,13 @@ func (sysd WindowsInitSystem) ServiceStop(service string) error { // Stop the service status, err = s.Control(svc.Stop) if err != nil { - return fmt.Errorf("could not stop service %s: %v", service, err) + return fmt.Errorf("could not stop service %s: %w", service, err) } // Check that the stop was successful status, err = s.Query() if err != nil { - return fmt.Errorf("could not query service %s: %v", service, err) + return fmt.Errorf("could not query service %s: %w", service, err) } timeout := time.Now().Add(10 * time.Second) for status.State != svc.Stopped { @@ -168,7 +169,7 @@ func (sysd WindowsInitSystem) ServiceStop(service string) error { time.Sleep(300 * time.Millisecond) status, err = s.Query() if err != nil { - return fmt.Errorf("could not retrieve %s service status: %v", service, err) + return fmt.Errorf("could not retrieve %s service status: %w", service, err) } } return nil @@ -238,7 +239,7 @@ func (sysd WindowsInitSystem) ServiceIsActive(service string) bool { func GetInitSystem() (InitSystem, error) { m, err := mgr.Connect() if err != nil { - return nil, fmt.Errorf("no supported init system detected: %v", err) + return nil, fmt.Errorf("no supported init system detected: %w", err) } defer m.Disconnect() return &WindowsInitSystem{}, nil diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go index 66dee617a3c..749fcbe5f92 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/kubeconfig/kubeconfig_test.go @@ -18,6 +18,7 @@ package kubeconfig import ( "bytes" + "errors" "fmt" "os" "testing" @@ -169,7 +170,7 @@ func TestWriteKubeconfigToDisk(t *testing.T) { ) configPath := fmt.Sprintf("%s/etc/kubernetes/%s.conf", tmpdir, rt.name) err := WriteToDisk(configPath, c) - if err != rt.expected { + if !errors.Is(err, rt.expected) { t.Errorf( "failed WriteToDisk with an error:\n\texpected: %s\n\t actual: %s", rt.expected, diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go index b15c3037313..11bc059dea7 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows /* diff --git a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go index 0c6a7b496dc..35a84cd3855 100644 --- a/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go +++ b/pkg/yurtctl/kubernetes/kubeadm/app/util/runtime/runtime_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go index 7e1060a03e4..6d3ec77dc7c 100644 --- a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go index 74464a3c840..2b8ac3d4306 100644 --- a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/defaults_others.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go index f9d180049f3..84fe2e78902 100644 --- a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.conversion.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go index 016d58c856a..a47bc95e8a7 100644 --- a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go index 7c127d46e2f..eb800358626 100644 --- a/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/v1beta1/zz_generated.defaults.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go b/pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go index 35acde4a0d6..5584478a392 100644 --- a/pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go +++ b/pkg/yurtctl/kubernetes/kubelet/apis/config/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* diff --git a/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go b/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go index 91dec5e7346..9e2bd5f28ba 100644 --- a/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go +++ b/pkg/yurtctl/kubernetes/kubelet/kubeletconfig/util/codec/codec.go @@ -91,7 +91,7 @@ func DecodeKubeletConfiguration(kubeletCodecs *serializer.CodecFactory, data []b if lenientErr != nil { // Lenient decoding failed with the current version, return the // original strict error. - return nil, fmt.Errorf("failed lenient decoding: %v", err) + return nil, fmt.Errorf("failed lenient decoding: %w", err) } // Continue with the v1beta1 object that was decoded leniently, but emit a warning. klog.Warningf("using lenient decoding as strict decoding failed: %v", err) diff --git a/pkg/yurtctl/util/edgenode/util.go b/pkg/yurtctl/util/edgenode/util.go index d9cb956158c..e785fb05ab2 100644 --- a/pkg/yurtctl/util/edgenode/util.go +++ b/pkg/yurtctl/util/edgenode/util.go @@ -64,7 +64,7 @@ func GetContentFormFile(filename string, regularExpression string) ([]string, er func GetSingleContentFromFile(filename string, regularExpression string) (string, error) { contents, err := GetContentFormFile(filename, regularExpression) if err != nil { - return "", fmt.Errorf("failed to read file %s, %v", filename, err) + return "", fmt.Errorf("failed to read file %s, %w", filename, err) } if contents == nil { return "", fmt.Errorf("no matching string %s in file %s", regularExpression, filename) @@ -89,11 +89,11 @@ func EnsureDir(dirname string) error { func CopyFile(sourceFile string, destinationFile string, perm os.FileMode) error { content, err := os.ReadFile(sourceFile) if err != nil { - return fmt.Errorf("failed to read source file %s: %v", sourceFile, err) + return fmt.Errorf("failed to read source file %s: %w", sourceFile, err) } err = os.WriteFile(destinationFile, content, perm) if err != nil { - return fmt.Errorf("failed to write destination file %s: %v", destinationFile, err) + return fmt.Errorf("failed to write destination file %s: %w", destinationFile, err) } return nil } diff --git a/pkg/yurtctl/util/kubernetes/apply_addons.go b/pkg/yurtctl/util/kubernetes/apply_addons.go index 5c62bd787f9..46e9f35be38 100644 --- a/pkg/yurtctl/util/kubernetes/apply_addons.go +++ b/pkg/yurtctl/util/kubernetes/apply_addons.go @@ -242,7 +242,7 @@ func DeleteYurthubSetting(client *kubernetes.Clientset) error { if err := client.RbacV1().ClusterRoleBindings(). Delete(context.Background(), edgenode.YurthubComponentName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("fail to delete the clusterrolebinding/%s: %s", + return fmt.Errorf("fail to delete the clusterrolebinding/%s: %w", edgenode.YurthubComponentName, err) } @@ -250,7 +250,7 @@ func DeleteYurthubSetting(client *kubernetes.Clientset) error { if err := client.RbacV1().ClusterRoles(). Delete(context.Background(), edgenode.YurthubComponentName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("fail to delete the clusterrole/%s: %s", + return fmt.Errorf("fail to delete the clusterrole/%s: %w", edgenode.YurthubComponentName, err) } @@ -258,7 +258,7 @@ func DeleteYurthubSetting(client *kubernetes.Clientset) error { if err := client.CoreV1().ConfigMaps(edgenode.YurthubNamespace). Delete(context.Background(), edgenode.YurthubCmName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("fail to delete the configmap/%s: %s", + return fmt.Errorf("fail to delete the configmap/%s: %w", edgenode.YurthubCmName, err) } diff --git a/pkg/yurtctl/util/kubernetes/util.go b/pkg/yurtctl/util/kubernetes/util.go index 83c15186427..6118c1fc43a 100644 --- a/pkg/yurtctl/util/kubernetes/util.go +++ b/pkg/yurtctl/util/kubernetes/util.go @@ -109,7 +109,7 @@ func processCreateErr(kind string, name string, err error) error { klog.V(4).Infof("[WARNING] %s/%s is already in cluster, skip to prepare it", kind, name) return nil } - return fmt.Errorf("fail to create the %s/%s: %v", kind, name, err) + return fmt.Errorf("fail to create the %s/%s: %w", kind, name, err) } klog.V(4).Infof("%s/%s is created", kind, name) return nil @@ -123,7 +123,7 @@ func CreateServiceAccountFromYaml(cliSet *kubernetes.Clientset, ns, saTmpl strin } sa, ok := obj.(*corev1.ServiceAccount) if !ok { - return fmt.Errorf("fail to assert serviceaccount: %v", err) + return fmt.Errorf("fail to assert serviceaccount: %w", err) } _, err = cliSet.CoreV1().ServiceAccounts(ns).Create(context.Background(), sa, metav1.CreateOptions{}) return processCreateErr("serviceaccount", sa.Name, err) @@ -137,7 +137,7 @@ func CreateClusterRoleFromYaml(cliSet *kubernetes.Clientset, crTmpl string) erro } cr, ok := obj.(*rbacv1.ClusterRole) if !ok { - return fmt.Errorf("fail to assert clusterrole: %v", err) + return fmt.Errorf("fail to assert clusterrole: %w", err) } _, err = cliSet.RbacV1().ClusterRoles().Create(context.Background(), cr, metav1.CreateOptions{}) return processCreateErr("clusterrole", cr.Name, err) @@ -151,7 +151,7 @@ func CreateClusterRoleBindingFromYaml(cliSet *kubernetes.Clientset, crbTmpl stri } crb, ok := obj.(*rbacv1.ClusterRoleBinding) if !ok { - return fmt.Errorf("fail to assert clusterrolebinding: %v", err) + return fmt.Errorf("fail to assert clusterrolebinding: %w", err) } _, err = cliSet.RbacV1().ClusterRoleBindings().Create(context.Background(), crb, metav1.CreateOptions{}) return processCreateErr("clusterrolebinding", crb.Name, err) @@ -165,7 +165,7 @@ func CreateConfigMapFromYaml(cliSet *kubernetes.Clientset, ns, cmTmpl string) er } cm, ok := obj.(*corev1.ConfigMap) if !ok { - return fmt.Errorf("fail to assert configmap: %v", err) + return fmt.Errorf("fail to assert configmap: %w", err) } _, err = cliSet.CoreV1().ConfigMaps(ns).Create(context.Background(), cm, metav1.CreateOptions{}) return processCreateErr("configmap", cm.Name, err) @@ -211,11 +211,11 @@ func CreateDaemonSetFromYaml(cliSet *kubernetes.Clientset, ns, dsTmpl string, ct } ds, ok := obj.(*appsv1.DaemonSet) if !ok { - return fmt.Errorf("fail to assert daemonset: %v", err) + return fmt.Errorf("fail to assert daemonset: %w", err) } _, err = cliSet.AppsV1().DaemonSets(ns).Create(context.Background(), ds, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("fail to create the daemonset/%s: %v", ds.Name, err) + return fmt.Errorf("fail to create the daemonset/%s: %w", ds.Name, err) } klog.V(4).Infof("daemonset/%s is created", ds.Name) return nil @@ -229,7 +229,7 @@ func CreateServiceFromYaml(cliSet *kubernetes.Clientset, ns, svcTmpl string) err } svc, ok := obj.(*corev1.Service) if !ok { - return fmt.Errorf("fail to assert service: %v", err) + return fmt.Errorf("fail to assert service: %w", err) } _, err = cliSet.CoreV1().Services(ns).Create(context.Background(), svc, metav1.CreateOptions{}) return processCreateErr("service", svc.Name, err) @@ -244,7 +244,7 @@ func CreateRoleFromYaml(cliSet *kubernetes.Clientset, ns, crTmpl string) error { } ro, ok := obj.(*rbacv1.Role) if !ok { - return fmt.Errorf("fail to assert role: %v", err) + return fmt.Errorf("fail to assert role: %w", err) } _, err = cliSet.RbacV1().Roles(ns).Create(context.Background(), ro, metav1.CreateOptions{}) return processCreateErr("role", ro.Name, err) @@ -258,7 +258,7 @@ func CreateRoleBindingFromYaml(cliSet *kubernetes.Clientset, ns, crbTmpl string) } rb, ok := obj.(*rbacv1.RoleBinding) if !ok { - return fmt.Errorf("fail to assert rolebinding: %v", err) + return fmt.Errorf("fail to assert rolebinding: %w", err) } _, err = cliSet.RbacV1().RoleBindings(ns).Create(context.Background(), rb, metav1.CreateOptions{}) return processCreateErr("rolebinding", rb.Name, err) @@ -272,7 +272,7 @@ func CreateSecretFromYaml(cliSet *kubernetes.Clientset, ns, saTmpl string) error } se, ok := obj.(*corev1.Secret) if !ok { - return fmt.Errorf("fail to assert secret: %v", err) + return fmt.Errorf("fail to assert secret: %w", err) } _, err = cliSet.CoreV1().Secrets(ns).Create(context.Background(), se, metav1.CreateOptions{}) @@ -287,7 +287,7 @@ func CreateMutatingWebhookConfigurationFromYaml(cliSet *kubernetes.Clientset, sv } mw, ok := obj.(*v1beta1.MutatingWebhookConfiguration) if !ok { - return fmt.Errorf("fail to assert mutatingwebhookconfiguration: %v", err) + return fmt.Errorf("fail to assert mutatingwebhookconfiguration: %w", err) } _, err = cliSet.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(context.Background(), mw, metav1.CreateOptions{}) return processCreateErr("mutatingwebhookconfiguration", mw.Name, err) @@ -301,7 +301,7 @@ func CreateValidatingWebhookConfigurationFromYaml(cliSet *kubernetes.Clientset, } vw, ok := obj.(*v1beta1.ValidatingWebhookConfiguration) if !ok { - return fmt.Errorf("fail to assert validatingwebhookconfiguration: %v", err) + return fmt.Errorf("fail to assert validatingwebhookconfiguration: %w", err) } _, err = cliSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(context.Background(), vw, metav1.CreateOptions{}) return processCreateErr("validatingwebhookconfiguration", vw.Name, err) @@ -519,7 +519,7 @@ func RunServantJobs( for _, nodeName := range nodeNames { job, err := getJob(nodeName) if err != nil { - return fmt.Errorf("fail to get job for node %s: %s", nodeName, err) + return fmt.Errorf("fail to get job for node %s: %w", nodeName, err) } jobByNodeName[nodeName] = job } @@ -634,7 +634,7 @@ func GetOrCreateJoinTokenString(cliSet *kubernetes.Clientset) (string, error) { klog.V(1).Infoln("[token] retrieving list of bootstrap tokens") secrets, err := cliSet.CoreV1().Secrets(metav1.NamespaceSystem).List(context.Background(), listOptions) if err != nil { - return "", fmt.Errorf("%v%s", err, "failed to list bootstrap tokens") + return "", fmt.Errorf("%w%s", err, "failed to list bootstrap tokens") } for _, secret := range secrets.Items { @@ -655,7 +655,7 @@ func GetOrCreateJoinTokenString(cliSet *kubernetes.Clientset) (string, error) { tokenStr, err := bootstraputil.GenerateBootstrapToken() if err != nil { - return "", fmt.Errorf("couldn't generate random token, %v", err) + return "", fmt.Errorf("couldn't generate random token, %w", err) } token, err := kubeadmapi.NewBootstrapTokenString(tokenStr) if err != nil { @@ -740,7 +740,7 @@ func CheckAndInstallKubelet(kubernetesResourceServer, clusterVersion string) err savePath := fmt.Sprintf("%s/kubernetes-node-linux-%s.tar.gz", constants.TmpDownloadDir, runtime.GOARCH) klog.V(1).Infof("Download kubelet from: %s", packageUrl) if err := util.DownloadFile(packageUrl, savePath, 3); err != nil { - return fmt.Errorf("Download kuelet fail: %v", err) + return fmt.Errorf("Download kuelet fail: %w", err) } if err := util.Untar(savePath, constants.TmpDownloadDir); err != nil { return err diff --git a/pkg/yurtctl/util/system/util.go b/pkg/yurtctl/util/system/util.go index 33527137283..d606bb1e8f6 100644 --- a/pkg/yurtctl/util/system/util.go +++ b/pkg/yurtctl/util/system/util.go @@ -42,7 +42,7 @@ net.bridge.bridge-nf-call-iptables = 1` func SetIpv4Forward() error { klog.Infof("Setting ipv4 forward") if err := os.WriteFile(ip_forward, []byte("1"), 0644); err != nil { - return fmt.Errorf("Write content 1 to file %s fail: %v ", ip_forward, err) + return fmt.Errorf("Write content 1 to file %s fail: %w ", ip_forward, err) } return nil } @@ -51,7 +51,7 @@ func SetIpv4Forward() error { func SetBridgeSetting() error { klog.Info("Setting bridge settings for kubernetes.") if err := os.WriteFile(constants.SysctlK8sConfig, []byte(kubernetsBridgeSetting), 0644); err != nil { - return fmt.Errorf("Write file %s fail: %v ", constants.SysctlK8sConfig, err) + return fmt.Errorf("Write file %s fail: %w ", constants.SysctlK8sConfig, err) } if exist, _ := edgenode.FileExists(bridgenf); !exist { @@ -61,10 +61,10 @@ func SetBridgeSetting() error { } } if err := os.WriteFile(bridgenf, []byte("1"), 0644); err != nil { - return fmt.Errorf("Write file %s fail: %v ", bridgenf, err) + return fmt.Errorf("Write file %s fail: %w ", bridgenf, err) } if err := os.WriteFile(bridgenf6, []byte("1"), 0644); err != nil { - return fmt.Errorf("Write file %s fail: %v ", bridgenf, err) + return fmt.Errorf("Write file %s fail: %w ", bridgenf, err) } return nil } diff --git a/pkg/yurtctl/util/util.go b/pkg/yurtctl/util/util.go index 803a0c624df..9916051232b 100644 --- a/pkg/yurtctl/util/util.go +++ b/pkg/yurtctl/util/util.go @@ -19,6 +19,7 @@ package util import ( "archive/tar" "compress/gzip" + "errors" "fmt" "io" "net/http" @@ -105,7 +106,7 @@ func Untar(tarFile, dest string) error { for { hdr, err := tr.Next() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } return err diff --git a/pkg/yurthub/cachemanager/cache_manager.go b/pkg/yurthub/cachemanager/cache_manager.go index d9cb8a46389..368d37180e2 100644 --- a/pkg/yurthub/cachemanager/cache_manager.go +++ b/pkg/yurthub/cachemanager/cache_manager.go @@ -19,6 +19,7 @@ package cachemanager import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -171,7 +172,7 @@ func (cm *cacheManager) queryListObject(req *http.Request) (runtime.Object, erro // If the GVR information is recognized, return list or empty list objs, err := cm.storage.List(key) if err != nil { - if err != storage.ErrStorageNotFound { + if !errors.Is(err, storage.ErrStorageNotFound) { return nil, err } else if isPodKey(key) { // because at least there will be yurt-hub pod on the node. @@ -326,7 +327,7 @@ func (cm *cacheManager) saveWatchObject(ctx context.Context, info *apirequest.Re klog.V(2).Infof("pod(%s) is %s", key, string(watchType)) } - if err == storage.ErrStorageAccessConflict { + if errors.Is(err, storage.ErrStorageAccessConflict) { klog.V(2).Infof("skip to cache watch event because key(%s) is under processing", key) } else if err != nil { klog.Errorf("failed to process watch object %s, %v", key, err) @@ -362,7 +363,7 @@ func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.Req items, err := meta.ExtractList(list) if err != nil { klog.Errorf("unable to understand list result %#v (%v)", list, err) - return fmt.Errorf("unable to understand list result %#v (%v)", list, err) + return fmt.Errorf("unable to understand list result %#v (%w)", list, err) } klog.V(5).Infof("list items for %s is: %d", util.ReqInfoString(info), len(items)) @@ -390,7 +391,7 @@ func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.Req } key, _ := util.KeyFunc(comp, info.Resource, ns, name) err = cm.saveOneObjectWithValidation(key, items[0]) - if err == storage.ErrStorageAccessConflict { + if errors.Is(err, storage.ErrStorageAccessConflict) { klog.V(2).Infof("skip to cache list object because key(%s) is under processing", key) return nil } @@ -465,7 +466,7 @@ func (cm *cacheManager) saveOneObject(ctx context.Context, info *apirequest.Requ } if err := cm.saveOneObjectWithValidation(key, obj); err != nil { - if err != storage.ErrStorageAccessConflict { + if !errors.Is(err, storage.ErrStorageAccessConflict) { return err } klog.V(2).Infof("skip to cache object because key(%s) is under processing", key) @@ -506,7 +507,7 @@ func (cm *cacheManager) saveOneObjectWithValidation(key string, obj runtime.Obje } else if os.IsNotExist(err) || oldObj == nil { return cm.storage.Create(key, obj) } else { - if err != storage.ErrStorageAccessConflict { + if !errors.Is(err, storage.ErrStorageAccessConflict) { return cm.storage.Create(key, obj) } return err diff --git a/pkg/yurthub/cachemanager/cache_manager_test.go b/pkg/yurthub/cachemanager/cache_manager_test.go index 81509b99eb3..34194f0fd47 100644 --- a/pkg/yurthub/cachemanager/cache_manager_test.go +++ b/pkg/yurthub/cachemanager/cache_manager_test.go @@ -19,6 +19,7 @@ package cachemanager import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -482,7 +483,7 @@ func TestCacheGetResponse(t *testing.T) { obj, err := sWrapper.Get(tt.key) if err != nil || obj == nil { - if tt.expectResult.err != err { + if !errors.Is(tt.expectResult.err, err) { t.Errorf("expect get error %v, but got %v", tt.expectResult.err, err) } t.Logf("get expected err %v for key %s", tt.expectResult.err, tt.key) @@ -825,7 +826,7 @@ func TestCacheWatchResponse(t *testing.T) { if tt.expectResult.err && err == nil { t.Errorf("expect err, but do not got err") - } else if err != nil && err != io.EOF { + } else if err != nil && !errors.Is(err, io.EOF) { t.Errorf("failed to cache resposne, %v", err) } @@ -1321,7 +1322,7 @@ func TestCacheListResponse(t *testing.T) { objs, err := sWrapper.List(tt.key) if err != nil { // If error is storage.ErrStorageNotFound, it means that no object is cached in the hard disk - if err == storage.ErrStorageNotFound { + if errors.Is(err, storage.ErrStorageNotFound) { if len(tt.expectResult.data) != 0 { t.Errorf("expect %v objects, but get nothing.", len(tt.expectResult.data)) } @@ -2305,7 +2306,7 @@ func TestQueryCacheForList(t *testing.T) { t.Errorf("Got no error, but expect err") } - if tt.expectResult.queryErr != nil && tt.expectResult.queryErr != err { + if tt.expectResult.queryErr != nil && !errors.Is(tt.expectResult.queryErr, err) { t.Errorf("expect err %v, but got %v", tt.expectResult.queryErr, err) } } else { diff --git a/pkg/yurthub/cachemanager/storage_wrapper_test.go b/pkg/yurthub/cachemanager/storage_wrapper_test.go index e85984a9109..33f8d2605aa 100644 --- a/pkg/yurthub/cachemanager/storage_wrapper_test.go +++ b/pkg/yurthub/cachemanager/storage_wrapper_test.go @@ -17,6 +17,7 @@ limitations under the License. package cachemanager import ( + "errors" "fmt" "os" "testing" @@ -160,14 +161,14 @@ func TestStorageWrapper(t *testing.T) { t.Errorf("failed to delete obj, %v", err) } _, err = sWrapper.Get("kubelet/pods/default/mypod1") - if err != storage.ErrStorageNotFound { + if !errors.Is(err, storage.ErrStorageNotFound) { t.Errorf("unexpected error, %v", err) } }) t.Run("Test list obj in empty path", func(t *testing.T) { _, err = sWrapper.List("kubelet/pods/default") - if err != storage.ErrStorageNotFound { + if !errors.Is(err, storage.ErrStorageNotFound) { t.Errorf("failed to list obj, %v", err) } }) diff --git a/pkg/yurthub/certificate/hubself/cert_mgr.go b/pkg/yurthub/certificate/hubself/cert_mgr.go index aa3a9304f4b..139c37e881c 100644 --- a/pkg/yurthub/certificate/hubself/cert_mgr.go +++ b/pkg/yurthub/certificate/hubself/cert_mgr.go @@ -22,6 +22,7 @@ import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "errors" "fmt" "net/url" "os" @@ -293,7 +294,7 @@ func (ycm *yurtHubCertManager) initCaCert() error { kubeConfig, err := clientcmd.Load([]byte(kubeconfigStr)) if err != nil { - return fmt.Errorf("could not load kube config string, %v", err) + return fmt.Errorf("could not load kube config string, %w", err) } if len(kubeConfig.Clusters) != 1 { @@ -338,7 +339,7 @@ func (ycm *yurtHubCertManager) initBootstrap() error { ycm.bootstrapConfStore = bootstrapConfStore contents, err := ycm.bootstrapConfStore.Get(bootstrapConfigFileName) - if err == storage.ErrStorageNotFound { + if errors.Is(err, storage.ErrStorageNotFound) { klog.Infof("%s bootstrap conf file does not exist, so create it", ycm.hubName) return ycm.createBootstrapConfFile(ycm.joinToken) } else if err != nil { @@ -390,7 +391,7 @@ func (ycm *yurtHubCertManager) initClientCertificateManager() error { CertificateStore: s, }) if err != nil { - return fmt.Errorf("failed to initialize client certificate manager: %v", err) + return fmt.Errorf("failed to initialize client certificate manager: %w", err) } ycm.hubClientCertManager = m m.Start() @@ -554,7 +555,7 @@ func createInsecureRestClientConfig(remoteServer *url.URL) (*restclient.Config, restConfig, err := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { - return nil, fmt.Errorf("failed to create insecure rest client configuration, %v", err) + return nil, fmt.Errorf("failed to create insecure rest client configuration, %w", err) } return restConfig, nil } @@ -625,7 +626,7 @@ func (ycm *yurtHubCertManager) updateBootstrapConfFile(joinToken string) error { curKubeConfig, err := util.LoadKubeConfig(ycm.getBootstrapConfFile()) if err != nil || curKubeConfig == nil { klog.Errorf("could not get current bootstrap config for %s, %v", ycm.hubName, err) - return fmt.Errorf("could not load bootstrap conf file(%s), %v", ycm.getBootstrapConfFile(), err) + return fmt.Errorf("could not load bootstrap conf file(%s), %w", ycm.getBootstrapConfFile(), err) } if curKubeConfig.AuthInfos[bootstrapUser] != nil { diff --git a/pkg/yurthub/filter/filter.go b/pkg/yurthub/filter/filter.go index fdb9219b12e..5de6742ad35 100644 --- a/pkg/yurthub/filter/filter.go +++ b/pkg/yurthub/filter/filter.go @@ -19,6 +19,7 @@ package filter import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -152,7 +153,7 @@ func NewFilterReadCloser( if dr.isWatch { go func(req *http.Request, rc io.ReadCloser, ch chan watch.Event) { err := handler.StreamResponseFilter(rc, ch) - if err != nil && err != io.EOF && err != context.Canceled { + if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { klog.Errorf("filter(%s) watch response ended with error, %v", dr.ownerName, err) } }(req, rc, dr.ch) diff --git a/pkg/yurthub/healthchecker/node_lease.go b/pkg/yurthub/healthchecker/node_lease.go index 1efc0dc5bf6..8133e97697b 100644 --- a/pkg/yurthub/healthchecker/node_lease.go +++ b/pkg/yurthub/healthchecker/node_lease.go @@ -114,7 +114,7 @@ func (nl *nodeLeaseImpl) backoffEnsureLease() (*coordinationv1.Lease, bool, erro } sleep = sleep * 2 if sleep > maxBackoff { - return nil, false, fmt.Errorf("backoff ensure lease error: %v", err) + return nil, false, fmt.Errorf("backoff ensure lease error: %w", err) } nl.clock.Sleep(sleep) } diff --git a/pkg/yurthub/proxy/local/local.go b/pkg/yurthub/proxy/local/local.go index a7e14736f6b..ebb82be7789 100644 --- a/pkg/yurthub/proxy/local/local.go +++ b/pkg/yurthub/proxy/local/local.go @@ -18,13 +18,14 @@ package local import ( "bytes" + "errors" "fmt" "io" "net/http" "strconv" "time" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -82,7 +83,7 @@ func (lp *LocalProxy) ServeHTTP(w http.ResponseWriter, req *http.Request) { } } else { klog.Errorf("request(%s) is not supported when cluster is unhealthy", util.ReqString(req)) - util.Err(errors.NewBadRequest(fmt.Sprintf("request(%s) is not supported when cluster is unhealthy", util.ReqString(req))), w, req) + util.Err(apierrors.NewBadRequest(fmt.Sprintf("request(%s) is not supported when cluster is unhealthy", util.ReqString(req))), w, req) } } @@ -154,12 +155,12 @@ func (lp *LocalProxy) localWatch(w http.ResponseWriter, req *http.Request) error flusher, ok := w.(http.Flusher) if !ok { err := fmt.Errorf("unable to start watch - can't get http.Flusher: %#v", w) - return errors.NewInternalError(err) + return apierrors.NewInternalError(err) } opts := metainternalversion.ListOptions{} if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { - return errors.NewBadRequest(err.Error()) + return apierrors.NewBadRequest(err.Error()) } ctx := req.Context() @@ -201,20 +202,20 @@ func (lp *LocalProxy) localWatch(w http.ResponseWriter, req *http.Request) error func (lp *LocalProxy) localReqCache(w http.ResponseWriter, req *http.Request) error { if !lp.cacheMgr.CanCacheFor(req) { klog.Errorf("can not cache for %s", util.ReqString(req)) - return errors.NewBadRequest(fmt.Sprintf("can not cache for %s", util.ReqString(req))) + return apierrors.NewBadRequest(fmt.Sprintf("can not cache for %s", util.ReqString(req))) } obj, err := lp.cacheMgr.QueryCache(req) - if err == storage.ErrStorageNotFound || err == hubmeta.ErrGVRNotRecognized { + if errors.Is(err, storage.ErrStorageNotFound) || errors.Is(err, hubmeta.ErrGVRNotRecognized) { klog.Errorf("object not found for %s", util.ReqString(req)) reqInfo, _ := apirequest.RequestInfoFrom(req.Context()) - return errors.NewNotFound(schema.GroupResource{Group: reqInfo.APIGroup, Resource: reqInfo.Resource}, reqInfo.Name) + return apierrors.NewNotFound(schema.GroupResource{Group: reqInfo.APIGroup, Resource: reqInfo.Resource}, reqInfo.Name) } else if err != nil { klog.Errorf("failed to query cache for %s, %v", util.ReqString(req), err) - return errors.NewInternalError(err) + return apierrors.NewInternalError(err) } else if obj == nil { klog.Errorf("no cache object for %s", util.ReqString(req)) - return errors.NewInternalError(fmt.Errorf("no cache object for %s", util.ReqString(req))) + return apierrors.NewInternalError(fmt.Errorf("no cache object for %s", util.ReqString(req))) } return util.WriteObject(http.StatusOK, obj, w, req) diff --git a/pkg/yurthub/proxy/remote/remote.go b/pkg/yurthub/proxy/remote/remote.go index da1506c4575..e9b43265ffb 100644 --- a/pkg/yurthub/proxy/remote/remote.go +++ b/pkg/yurthub/proxy/remote/remote.go @@ -18,6 +18,7 @@ package remote import ( "context" + "errors" "fmt" "io" "net/http" @@ -185,7 +186,7 @@ func (rp *RemoteProxy) modifyResponse(resp *http.Response) error { wrapPrc, _ := util.NewGZipReaderCloser(resp.Header, prc, req, "cache-manager") go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { err := rp.cacheMgr.CacheResponse(req, prc, stopCh) - if err != nil && err != io.EOF && err != context.Canceled { + if err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { klog.Errorf("%s response cache ended with error, %v", util.ReqString(req), err) } }(req, wrapPrc, rp.stopCh) diff --git a/pkg/yurthub/proxy/util/util_test.go b/pkg/yurthub/proxy/util/util_test.go index c6adbc1f1b9..a1ec1f60f8b 100644 --- a/pkg/yurthub/proxy/util/util_test.go +++ b/pkg/yurthub/proxy/util/util_test.go @@ -18,6 +18,7 @@ package util import ( "context" + "errors" "fmt" "net/http" "net/http/httptest" @@ -310,7 +311,7 @@ func TestWithRequestTimeout(t *testing.T) { handler = filters.WithRequestInfo(handler, resolver) handler.ServeHTTP(httptest.NewRecorder(), req) - if ctxErr != tc.Err { + if !errors.Is(ctxErr, tc.Err) { t.Errorf("%s: expect context cancel error %v, but got %v", k, tc.Err, ctxErr) } } diff --git a/pkg/yurthub/storage/disk/storage.go b/pkg/yurthub/storage/disk/storage.go index a2dda5fd24e..dfc823f7b09 100644 --- a/pkg/yurthub/storage/disk/storage.go +++ b/pkg/yurthub/storage/disk/storage.go @@ -204,7 +204,7 @@ func (ds *diskStorage) get(path string) ([]byte, error) { if os.IsNotExist(err) { return []byte{}, storage.ErrStorageNotFound } - return nil, fmt.Errorf("failed to get bytes from %s, %v", path, err) + return nil, fmt.Errorf("failed to get bytes from %s, %w", path, err) } else if info.Mode().IsRegular() { b, err := os.ReadFile(path) if err != nil { diff --git a/pkg/yurthub/storage/disk/storage_test.go b/pkg/yurthub/storage/disk/storage_test.go index 6cf4f13e8f9..d8d1121a2b8 100644 --- a/pkg/yurthub/storage/disk/storage_test.go +++ b/pkg/yurthub/storage/disk/storage_test.go @@ -17,6 +17,7 @@ limitations under the License. package disk import ( + "errors" "os" "testing" @@ -139,7 +140,7 @@ func TestCreate(t *testing.T) { for key, data := range tc.keysData { err = s.Create(key, []byte(data)) if err != nil { - if tc.createErr != err { + if !errors.Is(err, tc.createErr) { t.Errorf("%s: expect create error %v, but got %v", k, tc.createErr, err) } } @@ -148,7 +149,7 @@ func TestCreate(t *testing.T) { for key, result := range tc.result { b, err := s.Get(key) if result.err != nil { - if result.err != err { + if errors.Is(result.err, err) { t.Errorf("%s(key=%s) expect error %v, but got error %v", k, key, result.err, err) } } @@ -259,7 +260,7 @@ func TestDelete(t *testing.T) { for _, key := range tc.deleteKeys { err = s.Delete(key) if result, ok := tc.result[key]; ok { - if result.deleteErr != err { + if !errors.Is(result.deleteErr, err) { t.Errorf("%s: delete key(%s) expect error %v, but got %v", k, key, result.deleteErr, err) } } else if err != nil { @@ -270,7 +271,7 @@ func TestDelete(t *testing.T) { for key, result := range tc.result { _, err := s.Get(key) if result.getErr != nil { - if result.getErr != err { + if !errors.Is(result.getErr, err) { t.Errorf("%s: expect error %v, but got error %v", k, result.getErr, err) } } @@ -345,7 +346,7 @@ func TestGet(t *testing.T) { for key, result := range tc.result { b, err := s.Get(key) if result.err != nil { - if result.err != err { + if !errors.Is(result.err, err) { t.Errorf("%s: expect error %v, but got error %v", k, result.err, err) } } @@ -524,7 +525,7 @@ func TestList(t *testing.T) { data, err := s.List(tc.listKey) if err != nil { - if tc.listErr != err { + if !errors.Is(tc.listErr, err) { t.Errorf("%s: list(%s) expect error %v, but got error %v", k, tc.listKey, tc.listErr, err) } } @@ -617,7 +618,7 @@ func TestUpdate(t *testing.T) { for key, data := range tc.updateKeys { err = s.Update(key, []byte(data)) if err != nil { - if tc.updateErr != err { + if !errors.Is(tc.updateErr, err) { t.Errorf("%s: expect error %v, but got %v", k, tc.updateErr, err) } } @@ -700,7 +701,7 @@ func TestReplace(t *testing.T) { err = s.Replace(tc.listKey, contents) if err != nil { - if tc.replaceErr != err { + if errors.Is(tc.replaceErr, err) { t.Errorf("%s: expect error %v, but got %v", k, tc.replaceErr, err) } } diff --git a/pkg/yurthub/util/util_test.go b/pkg/yurthub/util/util_test.go index 39ab50cebb3..e8bd931e8a7 100644 --- a/pkg/yurthub/util/util_test.go +++ b/pkg/yurthub/util/util_test.go @@ -19,6 +19,7 @@ package util import ( "bytes" "encoding/base64" + "errors" "io" "testing" ) @@ -50,7 +51,7 @@ func TestDualReader(t *testing.T) { t.Errorf("nr: bytes read = %q want %q", dst2, src) } - if n, err := rc.Read(dst1); n != 0 || err != io.EOF { + if n, err := rc.Read(dst1); n != 0 || !errors.Is(err, io.EOF) { t.Errorf("rc.Read at EOF = %d, %v want 0, EOF", n, err) } @@ -58,7 +59,7 @@ func TestDualReader(t *testing.T) { t.Errorf("rc.Close failed %v", err) } - if n, err := prc.Read(dst1); n != 0 || err != io.EOF { + if n, err := prc.Read(dst1); n != 0 || !errors.Is(err, io.EOF) { t.Errorf("nr.Read at EOF = %d, %v want 0, EOF", n, err) } } @@ -75,7 +76,7 @@ func TestDualReaderByPreClose(t *testing.T) { t.Errorf("prc.Close failed %v", err) } - if n, err := io.ReadFull(rc, dst); n != 0 || err != io.ErrClosedPipe { + if n, err := io.ReadFull(rc, dst); n != 0 || !errors.Is(err, io.ErrClosedPipe) { t.Errorf("closed dualReadCloser: ReadFull(r, dst) = %d, %v; want 0, EPIPE", n, err) } } diff --git a/pkg/yurttunnel/kubernetes/kubernetes.go b/pkg/yurttunnel/kubernetes/kubernetes.go index 84814dab853..020ae1db970 100644 --- a/pkg/yurttunnel/kubernetes/kubernetes.go +++ b/pkg/yurttunnel/kubernetes/kubernetes.go @@ -56,7 +56,7 @@ func CreateClientSetKubeConfig(kubeConfig string) (*kubernetes.Clientset, error) } cfg, err = clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { - return nil, fmt.Errorf("fail to create the clientset based on %s: %v", + return nil, fmt.Errorf("fail to create the clientset based on %s: %w", kubeConfig, err) } cliSet, err := kubernetes.NewForConfig(cfg) diff --git a/pkg/yurttunnel/server/anpserver.go b/pkg/yurttunnel/server/anpserver.go index 986f67a3bf3..75e1da8e2df 100644 --- a/pkg/yurttunnel/server/anpserver.go +++ b/pkg/yurttunnel/server/anpserver.go @@ -66,7 +66,7 @@ func (ats *anpTunnelServer) Run() error { ats.interceptorServerUDSFile, ats.tlsCfg) if proxierErr != nil { - return fmt.Errorf("fail to run the proxier: %s", proxierErr) + return fmt.Errorf("fail to run the proxier: %w", proxierErr) } wrappedHandler, err := wh.WrapHandler( @@ -74,7 +74,7 @@ func (ats *anpTunnelServer) Run() error { ats.wrappers, ) if err != nil { - return fmt.Errorf("fail to wrap handler: %v", err) + return fmt.Errorf("fail to wrap handler: %w", err) } // 2. start the master server @@ -85,13 +85,13 @@ func (ats *anpTunnelServer) Run() error { ats.serverMasterInsecureAddr, ats.tlsCfg) if masterServerErr != nil { - return fmt.Errorf("fail to run master server: %s", masterServerErr) + return fmt.Errorf("fail to run master server: %w", masterServerErr) } // 3. start the agent server agentServerErr := runAgentServer(ats.tlsCfg, ats.serverAgentAddr, proxyServer) if agentServerErr != nil { - return fmt.Errorf("fail to run agent server: %s", agentServerErr) + return fmt.Errorf("fail to run agent server: %w", agentServerErr) } return nil @@ -193,7 +193,7 @@ func runAgentServer(tlsCfg *tls.Config, listener, err := net.Listen("tcp", agentServerAddr) klog.Info("start handling connection from agents") if err != nil { - return fmt.Errorf("fail to listen to agent on %s: %s", agentServerAddr, err) + return fmt.Errorf("fail to listen to agent on %s: %w", agentServerAddr, err) } go grpcServer.Serve(listener) return nil diff --git a/pkg/yurttunnel/server/interceptor.go b/pkg/yurttunnel/server/interceptor.go index 0fcbfa61590..b5087f587c0 100644 --- a/pkg/yurttunnel/server/interceptor.go +++ b/pkg/yurttunnel/server/interceptor.go @@ -78,7 +78,7 @@ func NewRequestInterceptor(udsSockFile string, cfg *tls.Config) *RequestIntercep klog.V(4).Infof("Sending request to %q.", addr) proxyConn, err := net.Dial("unix", udsSockFile) if err != nil { - return nil, fmt.Errorf("dialing proxy %q failed: %v", udsSockFile, err) + return nil, fmt.Errorf("dialing proxy %q failed: %w", udsSockFile, err) } var connectHeaders string @@ -94,7 +94,7 @@ func NewRequestInterceptor(udsSockFile string, cfg *tls.Config) *RequestIntercep res, err := http.ReadResponse(br, nil) if err != nil { proxyConn.Close() - return nil, fmt.Errorf("reading HTTP response from CONNECT to %s via proxy %s failed: %v", addr, udsSockFile, err) + return nil, fmt.Errorf("reading HTTP response from CONNECT to %s via proxy %s failed: %w", addr, udsSockFile, err) } if res.StatusCode != 200 { proxyConn.Close() @@ -107,7 +107,7 @@ func NewRequestInterceptor(udsSockFile string, cfg *tls.Config) *RequestIntercep tlsTunnelConn := tls.Client(proxyConn, cfg) if err := tlsTunnelConn.Handshake(); err != nil { proxyConn.Close() - return nil, fmt.Errorf("fail to setup TLS handshake through the Tunnel: %s", err) + return nil, fmt.Errorf("fail to setup TLS handshake through the Tunnel: %w", err) } klog.V(4).Infof("successfully setup TLS connection to %q with headers: %s", addr, connectHeaders) return tlsTunnelConn, nil diff --git a/pkg/yurttunnel/trafficforward/dns/dns.go b/pkg/yurttunnel/trafficforward/dns/dns.go index 6cee2a293b3..c78be4117d1 100644 --- a/pkg/yurttunnel/trafficforward/dns/dns.go +++ b/pkg/yurttunnel/trafficforward/dns/dns.go @@ -290,7 +290,7 @@ func (dnsctl *coreDNSRecordController) ensureCoreDNSRecordConfigMap() error { } _, err = dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Create(context.Background(), cm, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("failed to create ConfigMap %v/%v, %v", + return fmt.Errorf("failed to create ConfigMap %v/%v, %w", constants.YurttunnelServerServiceNs, yurttunnelDNSRecordConfigMapName, err) } } @@ -352,11 +352,11 @@ func (dnsctl *coreDNSRecordController) getTunnelServerIP(useCache bool) (string, svc, err := dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs). Get(context.Background(), constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) if err != nil { - return "", fmt.Errorf("failed to get %v/%v service, %v", + return "", fmt.Errorf("failed to get %v/%v service, %w", constants.YurttunnelServerServiceNs, constants.YurttunnelServerInternalServiceName, err) } if len(svc.Spec.ClusterIP) == 0 { - return "", fmt.Errorf("unable find ClusterIP from %s/%s service, %v", + return "", fmt.Errorf("unable find ClusterIP from %s/%s service, %w", constants.YurttunnelServerServiceNs, constants.YurttunnelServerInternalServiceName, err) } @@ -377,7 +377,7 @@ func (dnsctl *coreDNSRecordController) updateDNSRecords(records []string) error } cm.Data[constants.YurttunnelDNSRecordNodeDataKey] = strings.Join(records, "\n") if _, err := dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Update(context.Background(), cm, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("failed to update configmap %v/%v, %v", + return fmt.Errorf("failed to update configmap %v/%v, %w", constants.YurttunnelServerServiceNs, yurttunnelDNSRecordConfigMapName, err) } return nil @@ -387,7 +387,7 @@ func (dnsctl *coreDNSRecordController) updateTunnelServerSvcDnatPorts(ports []st svc, err := dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs). Get(context.Background(), constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("failed to sync tunnel server internal service, %v", err) + return fmt.Errorf("failed to sync tunnel server internal service, %w", err) } changed, updatedSvcPorts := resolveServicePorts(svc, ports, portMappings) @@ -398,7 +398,7 @@ func (dnsctl *coreDNSRecordController) updateTunnelServerSvcDnatPorts(ports []st svc.Spec.Ports = updatedSvcPorts _, err = dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs).Update(context.Background(), svc, metav1.UpdateOptions{}) if err != nil { - return fmt.Errorf("failed to sync tunnel server service, %v", err) + return fmt.Errorf("failed to sync tunnel server service, %w", err) } return nil } diff --git a/pkg/yurttunnel/trafficforward/dns/handler.go b/pkg/yurttunnel/trafficforward/dns/handler.go index d727e00db60..8714ceee4d4 100644 --- a/pkg/yurttunnel/trafficforward/dns/handler.go +++ b/pkg/yurttunnel/trafficforward/dns/handler.go @@ -227,7 +227,7 @@ func (dnsctl *coreDNSRecordController) getCurrentDNSRecords() ([]string, error) data, ok := cm.Data[constants.YurttunnelDNSRecordNodeDataKey] if !ok { - return nil, fmt.Errorf("key %q not found in %s/%s ConfigMap, %v", + return nil, fmt.Errorf("key %q not found in %s/%s ConfigMap, %w", constants.YurttunnelDNSRecordNodeDataKey, constants.YurttunnelServerServiceNs, yurttunnelDNSRecordConfigMapName, err) } diff --git a/pkg/yurttunnel/trafficforward/iptables/iptables.go b/pkg/yurttunnel/trafficforward/iptables/iptables.go index 6ec005ceea7..23230fc1287 100644 --- a/pkg/yurttunnel/trafficforward/iptables/iptables.go +++ b/pkg/yurttunnel/trafficforward/iptables/iptables.go @@ -480,7 +480,7 @@ func (im *iptablesManager) clearConnTrackEntriesForIPPort(ip, port string) error if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { klog.Errorf("clear conntrack for %s:%s failed: %q, error message: %s", ip, port, string(output), err) - return fmt.Errorf("clear conntrack for %s:%s failed: %q, error message: %s", + return fmt.Errorf("clear conntrack for %s:%s failed: %q, error message: %w", ip, port, string(output), err) } return nil diff --git a/pkg/yurttunnel/util/util.go b/pkg/yurttunnel/util/util.go index 0b4db70a418..0810247e907 100644 --- a/pkg/yurttunnel/util/util.go +++ b/pkg/yurttunnel/util/util.go @@ -95,7 +95,7 @@ func GetConfiguredProxyPortsAndMappings(client clientset.Interface, insecureList YurttunnelServerDnatConfigMapNs, YurttunnelServerDnatConfigMapName) } else { - return []string{}, map[string]string{}, fmt.Errorf("fail to get configmap %s/%s: %v", + return []string{}, map[string]string{}, fmt.Errorf("fail to get configmap %s/%s: %w", YurttunnelServerDnatConfigMapNs, YurttunnelServerDnatConfigMapName, err) } diff --git a/pkg/yurttunnel/util/util_test.go b/pkg/yurttunnel/util/util_test.go index f9215e1bcc3..7a1dc791329 100644 --- a/pkg/yurttunnel/util/util_test.go +++ b/pkg/yurttunnel/util/util_test.go @@ -18,6 +18,7 @@ package util import ( "context" + "errors" "net" "net/http" "net/url" @@ -156,7 +157,7 @@ func TestResolveProxyPortsAndMappings(t *testing.T) { for k, tt := range testcases { t.Run(k, func(t *testing.T) { ports, portMappings, err := resolveProxyPortsAndMappings(tt.configMap, insecureListenAddr, secureListenAddr) - if tt.expectResult.err != err { + if !errors.Is(tt.expectResult.err, err) { t.Errorf("expect error: %v, but got error: %v", tt.expectResult.err, err) } diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go index ac2aa35f6ee..367c9b012a6 100644 --- a/test/e2e/util/util.go +++ b/test/e2e/util/util.go @@ -42,12 +42,12 @@ const ( func LoadRestConfigAndClientset(kubeconfig string) (*restclient.Config, *clientset.Clientset, error) { config, err := util.LoadRESTClientConfig(kubeconfig) if err != nil { - return nil, nil, fmt.Errorf("error load rest client config: %v", err) + return nil, nil, fmt.Errorf("error load rest client config: %w", err) } client, err := clientset.NewForConfig(config) if err != nil { - return nil, nil, fmt.Errorf("error new clientset: %v", err) + return nil, nil, fmt.Errorf("error new clientset: %w", err) } return config, client, nil