Skip to content

Commit

Permalink
Cleanup: use verb %w with fmt.Errorf() when
Browse files Browse the repository at this point in the history
generate new wrapped error from error
  • Loading branch information
zhaodiaoer committed May 15, 2022
1 parent 4e2a6ad commit a3e3834
Show file tree
Hide file tree
Showing 65 changed files with 215 additions and 182 deletions.
2 changes: 1 addition & 1 deletion cmd/yurt-controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
if err := genericcontrollermanager.WaitForAPIServer(versionedClient, 10*time.Second); err != nil {
return ControllerContext{}, fmt.Errorf("failed to wait for apiserver being healthy: %v", err)
return ControllerContext{}, fmt.Errorf("failed to wait for apiserver being healthy: %w", err)
}

ctx := ControllerContext{
Expand Down
2 changes: 1 addition & 1 deletion cmd/yurt-tunnel-server/app/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ func (o *ServerOptions) Config() (*config.Config, error) {
cfg.ListenMetaAddr = net.JoinHostPort(o.InsecureBindAddr, o.MetaPort)
cfg.RootCert, err = certmanager.GenRootCertPool(o.KubeConfig, constants.YurttunnelCAFile)
if err != nil {
return nil, fmt.Errorf("fail to generate the rootCertPool: %s", err)
return nil, fmt.Errorf("fail to generate the rootCertPool: %w", err)
}

// function 'kubeutil.CreateClientSet' will try to create the clientset
Expand Down
2 changes: 1 addition & 1 deletion cmd/yurt-tunnel-server/app/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func Run(cfg *config.CompletedConfig, stopCh <-chan struct{}) error {
cfg.ListenAddrForMaster,
cfg.DNSSyncPeriod)
if err != nil {
return fmt.Errorf("fail to create a new dnsController, %v", err)
return fmt.Errorf("fail to create a new dnsController, %w", err)
}
go dnsController.Run(stopCh)
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/yurthub/app/options/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func ValidateOptions(options *YurtHubOptions) error {
}

if err := verifyDummyIP(options.HubAgentDummyIfIP); err != nil {
return fmt.Errorf("dummy ip %s is not invalid, %v", options.HubAgentDummyIfIP, err)
return fmt.Errorf("dummy ip %s is not invalid, %w", options.HubAgentDummyIfIP, err)
}

return nil
Expand Down Expand Up @@ -178,7 +178,7 @@ func verifyDummyIP(dummyIP string) error {

_, dummyIfIPNet, err := net.ParseCIDR(DummyIfCIDR)
if err != nil {
return fmt.Errorf("cidr(%s) is invalid, %v", DummyIfCIDR, err)
return fmt.Errorf("cidr(%s) is invalid, %w", DummyIfCIDR, err)
}

if !dummyIfIPNet.Contains(dip) {
Expand All @@ -187,7 +187,7 @@ func verifyDummyIP(dummyIP string) error {

_, exclusiveIPNet, err := net.ParseCIDR(ExclusiveCIDR)
if err != nil {
return fmt.Errorf("cidr(%s) is invalid, %v", ExclusiveCIDR, err)
return fmt.Errorf("cidr(%s) is invalid, %w", ExclusiveCIDR, err)
}

if exclusiveIPNet.Contains(dip) {
Expand Down
20 changes: 10 additions & 10 deletions cmd/yurthub/app/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,14 +90,14 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error {
klog.Infof("%d. create cert manager with %s mode", trace, cfg.CertMgrMode)
certManager, err := cmr.New(cfg.CertMgrMode, cfg)
if err != nil {
return fmt.Errorf("could not create certificate manager, %v", err)
return fmt.Errorf("could not create certificate manager, %w", err)
}
trace++

klog.Infof("%d. new transport manager", trace)
transportManager, err := transport.NewTransportManager(certManager, stopCh)
if err != nil {
return fmt.Errorf("could not new transport manager, %v", err)
return fmt.Errorf("could not new transport manager, %w", err)
}
trace++

Expand All @@ -106,7 +106,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error {
klog.Infof("%d. create health checker for remote servers ", trace)
healthChecker, err = healthchecker.NewHealthChecker(cfg, transportManager, stopCh)
if err != nil {
return fmt.Errorf("could not new health checker, %v", err)
return fmt.Errorf("could not new health checker, %w", err)
}
} else {
klog.Infof("%d. disable health checker for node %s because it is a cloud node", trace, cfg.NodeName)
Expand All @@ -120,14 +120,14 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error {
klog.Infof("%d. new restConfig manager for %s mode", trace, cfg.CertMgrMode)
restConfigMgr, err := rest.NewRestConfigManager(cfg, certManager, healthChecker)
if err != nil {
return fmt.Errorf("could not new restConfig manager, %v", err)
return fmt.Errorf("could not new restConfig manager, %w", err)
}
trace++

klog.Infof("%d. create tls config for secure servers ", trace)
cfg.TLSConfig, err = server.GenUseCertMgrAndTLSConfig(restConfigMgr, certManager, filepath.Join(cfg.RootDir, "pki"), cfg.YurtHubProxyServerSecureDummyAddr, stopCh)
if err != nil {
return fmt.Errorf("could not create tls config, %v", err)
return fmt.Errorf("could not create tls config, %w", err)
}
trace++

Expand All @@ -136,7 +136,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error {
klog.Infof("%d. new cache manager with storage wrapper and serializer manager", trace)
cacheMgr, err = cachemanager.NewCacheManager(cfg.StorageWrapper, cfg.SerializerManager, cfg.RESTMapperManager, cfg.SharedFactory)
if err != nil {
return fmt.Errorf("could not new cache manager, %v", err)
return fmt.Errorf("could not new cache manager, %w", err)
}
} else {
klog.Infof("%d. disable cache manager for node %s because it is a cloud node", trace, cfg.NodeName)
Expand All @@ -147,7 +147,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error {
klog.Infof("%d. new gc manager for node %s, and gc frequency is a random time between %d min and %d min", trace, cfg.NodeName, cfg.GCFrequency, 3*cfg.GCFrequency)
gcMgr, err := gc.NewGCManager(cfg, restConfigMgr, stopCh)
if err != nil {
return fmt.Errorf("could not new gc manager, %v", err)
return fmt.Errorf("could not new gc manager, %w", err)
}
gcMgr.Run()
} else {
Expand All @@ -163,15 +163,15 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error {
yurtProxyHandler, err := proxy.NewYurtReverseProxyHandler(cfg, cacheMgr, transportManager, healthChecker, certManager, tenantMgr, stopCh)

if err != nil {
return fmt.Errorf("could not create reverse proxy handler, %v", err)
return fmt.Errorf("could not create reverse proxy handler, %w", err)
}
trace++

if cfg.EnableDummyIf {
klog.Infof("%d. create dummy network interface %s and init iptables manager", trace, cfg.HubAgentDummyIfName)
networkMgr, err := network.NewNetworkManager(cfg)
if err != nil {
return fmt.Errorf("could not create network manager, %v", err)
return fmt.Errorf("could not create network manager, %w", err)
}
networkMgr.Run(stopCh)
trace++
Expand All @@ -185,7 +185,7 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error {
klog.Infof("%d. new %s server and begin to serve, proxy server: %s, secure proxy server: %s, hub server: %s", trace, projectinfo.GetHubName(), cfg.YurtHubProxyServerAddr, cfg.YurtHubProxyServerSecureAddr, cfg.YurtHubServerAddr)
s, err := server.NewYurtHubServer(cfg, certManager, yurtProxyHandler)
if err != nil {
return fmt.Errorf("could not create hub server, %v", err)
return fmt.Errorf("could not create hub server, %w", err)
}
s.Run()
klog.Infof("hub agent exited")
Expand Down
14 changes: 7 additions & 7 deletions pkg/controller/kubernetes/controller/controller_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,20 +153,20 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, t
func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err)
}

newTaints := newNode.Spec.Taints
newNodeClone := oldNode.DeepCopy()
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err)
}

patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err)
}

_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
Expand Down Expand Up @@ -201,18 +201,18 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la

oldData, err := json.Marshal(node)
if err != nil {
return fmt.Errorf("failed to marshal the existing node %#v: %v", node, err)
return fmt.Errorf("failed to marshal the existing node %#v: %w", node, err)
}
newData, err := json.Marshal(newNode)
if err != nil {
return fmt.Errorf("failed to marshal the new node %#v: %v", newNode, err)
return fmt.Errorf("failed to marshal the new node %#v: %w", newNode, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{})
if err != nil {
return fmt.Errorf("failed to create a two-way merge patch: %v", err)
return fmt.Errorf("failed to create a two-way merge patch: %w", err)
}
if _, err := kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("failed to patch the node: %v", err)
return fmt.Errorf("failed to patch the node: %w", err)
}
return nil
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil {
if apierrors.IsConflict(err) {
updateErrList = append(updateErrList,
fmt.Errorf("update status failed for pod %q: %v", Pod(pod), err))
fmt.Errorf("update status failed for pod %q: %w", Pod(pod), err))
continue
}
}
Expand Down
12 changes: 6 additions & 6 deletions pkg/controller/nodelifecycle/node_lifecycle_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -735,15 +735,15 @@ func (nc *Controller) doEvictionPass() {
nodeUID, _ := value.UID.(string)
pods, err := nc.getPodsAssignedToNode(value.Value)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %v", value.Value, err))
utilruntime.HandleError(fmt.Errorf("unable to list pods from node %q: %w", value.Value, err))
return false, 0
}
remaining, err := nodeutil.DeletePods(nc.kubeClient, pods, nc.recorder, value.Value, nodeUID, nc.daemonSetStore)
if err != nil {
// We are not setting eviction status here.
// New pods will be handled by zonePodEvictor retry
// instead of immediate pod eviction.
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %w", value.Value, err))
return false, 0
}
if !nc.nodeEvictionMap.setStatus(value.Value, evicted) {
Expand Down Expand Up @@ -829,7 +829,7 @@ func (nc *Controller) monitorNodeHealth() error {
if currentReadyCondition != nil {
pods, err := nc.getPodsAssignedToNode(node.Name)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to list pods of node %v: %v", node.Name, err))
utilruntime.HandleError(fmt.Errorf("unable to list pods of node %v: %w", node.Name, err))
if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue {
// If error happened during node status transition (Ready -> NotReady)
// we need to mark node for retry to force MarkPodsNotReady execution
Expand All @@ -842,7 +842,7 @@ func (nc *Controller) monitorNodeHealth() error {
nc.processTaintBaseEviction(node, &observedReadyCondition)
} else {
if err := nc.processNoTaintBaseEviction(node, &observedReadyCondition, gracePeriod, pods); err != nil {
utilruntime.HandleError(fmt.Errorf("unable to evict all pods from node %v: %v; queuing for retry", node.Name, err))
utilruntime.HandleError(fmt.Errorf("unable to evict all pods from node %v: %w; queuing for retry", node.Name, err))
}
}

Expand All @@ -854,7 +854,7 @@ func (nc *Controller) monitorNodeHealth() error {
fallthrough
case needsRetry && observedReadyCondition.Status != v1.ConditionTrue:
if err = nodeutil.MarkPodsNotReady(nc.kubeClient, pods, node.Name, node); err != nil {
utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err))
utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %w; queuing for retry", node.Name, err))
nc.nodesToRetry.Store(node.Name, struct{}{})
continue
}
Expand Down Expand Up @@ -1450,7 +1450,7 @@ func (nc *Controller) evictPods(node *v1.Node, pods []*v1.Pod) (bool, error) {
// Handling immediate pod deletion.
_, err := nodeutil.DeletePods(nc.kubeClient, pods, nc.recorder, node.Name, string(node.UID), nc.daemonSetStore)
if err != nil {
return false, fmt.Errorf("unable to delete pods from node %q: %v", node.Name, err)
return false, fmt.Errorf("unable to delete pods from node %q: %w", node.Name, err)
}
return false, nil
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/nodelifecycle/scheduler/taint_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ func (tc *NoExecuteTaintManager) handlePodUpdate(podUpdate podUpdateItem) {
tc.cancelWorkWithEvent(podNamespacedName)
return
}
utilruntime.HandleError(fmt.Errorf("could not get pod %s/%s: %v", podUpdate.podName, podUpdate.podNamespace, err))
utilruntime.HandleError(fmt.Errorf("could not get pod %s/%s: %w", podUpdate.podName, podUpdate.podNamespace, err))
return
}

Expand Down Expand Up @@ -445,7 +445,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
delete(tc.taintedNodes, nodeUpdate.nodeName)
return
}
utilruntime.HandleError(fmt.Errorf("cannot get node %s: %v", nodeUpdate.nodeName, err))
utilruntime.HandleError(fmt.Errorf("cannot get node %s: %w", nodeUpdate.nodeName, err))
return
}

Expand Down
14 changes: 7 additions & 7 deletions pkg/controller/util/node/controller_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil {
if apierrors.IsConflict(err) {
updateErrList = append(updateErrList,
fmt.Errorf("update status failed for pod %q: %v", ctlnode.Pod(pod), err))
fmt.Errorf("update status failed for pod %q: %w", ctlnode.Pod(pod), err))
continue
}
}
Expand Down Expand Up @@ -205,7 +205,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to taint %+v unresponsive Node %q: %v",
"unable to taint %+v unresponsive Node %q: %w",
taintsToAdd,
node.Name,
err))
Expand All @@ -217,7 +217,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to remove %+v unneeded taint from unresponsive Node %q: %v",
"unable to remove %+v unneeded taint from unresponsive Node %q: %w",
taintsToRemove,
node.Name,
err))
Expand All @@ -235,7 +235,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, labelsToUpdate map[
if err != nil {
utilruntime.HandleError(
fmt.Errorf(
"unable to update labels %+v for Node %q: %v",
"unable to update labels %+v for Node %q: %w",
labelsToUpdate,
node.Name,
err))
Expand All @@ -250,7 +250,7 @@ func CreateAddNodeHandler(f func(node *v1.Node) error) func(obj interface{}) {
return func(originalObj interface{}) {
node := originalObj.(*v1.Node).DeepCopy()
if err := f(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add: %v", err))
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add: %w", err))
}
}
}
Expand All @@ -262,7 +262,7 @@ func CreateUpdateNodeHandler(f func(oldNode, newNode *v1.Node) error) func(oldOb
prevNode := origOldObj.(*v1.Node).DeepCopy()

if err := f(prevNode, node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err))
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %w", err))
}
}
}
Expand All @@ -287,7 +287,7 @@ func CreateDeleteNodeHandler(f func(node *v1.Node) error) func(obj interface{})
}
node := originalNode.DeepCopy()
if err := f(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %v", err))
utilruntime.HandleError(fmt.Errorf("Error while processing Node Add/Delete: %w", err))
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/node-servant/components/yurthub.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ func pingClusterHealthz(client *http.Client, addr string) (bool, error) {
b, err := io.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return false, fmt.Errorf("failed to read response of cluster healthz, %v", err)
return false, fmt.Errorf("failed to read response of cluster healthz, %w", err)
}

if resp.StatusCode != http.StatusOK {
Expand Down
4 changes: 2 additions & 2 deletions pkg/preflight/checks.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ func (nc NodeServantJobCheck) Check() (warnings []error, errorList []error) {
defer wg.Done()
if err := kubeutil.RunJobAndCleanup(nc.cliSet, &entity,
nc.waitServantJobTimeout, nc.checkServantJobPeriod); err != nil {
msg := fmt.Errorf("fail to run servant job(%s): %s\n", entity.GetName(), err)
msg := fmt.Errorf("fail to run servant job(%s): %w\n", entity.GetName(), err)
res <- msg
} else {
klog.V(1).Infof("servant job(%s) has succeeded\n", entity.GetName())
Expand Down Expand Up @@ -273,7 +273,7 @@ func (dac DirExistingCheck) Check() (warnings, errorList []error) {
defer f.Close()

_, err = f.Readdirnames(1)
if err == io.EOF {
if errors.Is(err, io.EOF) {
return nil, []error{errors.Errorf("%s is empty", dac.Path)}
}
return nil, nil
Expand Down
4 changes: 2 additions & 2 deletions pkg/util/certmanager/certmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func newCertManager(
certificateStore, err :=
store.NewFileStoreWrapper(componentName, certDir, certDir, "", "")
if err != nil {
return nil, fmt.Errorf("failed to initialize the server certificate store: %v", err)
return nil, fmt.Errorf("failed to initialize the server certificate store: %w", err)
}

getTemplate := func() *x509.CertificateRequest {
Expand Down Expand Up @@ -223,7 +223,7 @@ func newCertManager(
CertificateStore: certificateStore,
})
if err != nil {
return nil, fmt.Errorf("failed to initialize server certificate manager: %v", err)
return nil, fmt.Errorf("failed to initialize server certificate manager: %w", err)
}

return certManager, nil
Expand Down
4 changes: 2 additions & 2 deletions pkg/util/certmanager/pki.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func GenRootCertPool(kubeConfig, caFile string) (*x509.CertPool, error) {
// load the root ca from the given kubeconfig file
config, err := clientcmd.LoadFromFile(kubeConfig)
if err != nil || config == nil {
return nil, fmt.Errorf("failed to load the kubeconfig file(%s), %v",
return nil, fmt.Errorf("failed to load the kubeconfig file(%s), %w",
kubeConfig, err)
}

Expand Down Expand Up @@ -163,7 +163,7 @@ func GenCertPoolUseCA(caFile string) (*x509.CertPool, error) {
if os.IsNotExist(err) {
return nil, fmt.Errorf("CA file(%s) doesn't exist", caFile)
}
return nil, fmt.Errorf("fail to stat the CA file(%s): %s", caFile, err)
return nil, fmt.Errorf("fail to stat the CA file(%s): %w", caFile, err)
}

caData, err := os.ReadFile(caFile)
Expand Down
Loading

0 comments on commit a3e3834

Please sign in to comment.