diff --git a/pkg/apis/pipeline/v1beta1/task_validation.go b/pkg/apis/pipeline/v1beta1/task_validation.go index 9615129e6a1..982f33f08c7 100644 --- a/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/pkg/apis/pipeline/v1beta1/task_validation.go @@ -78,7 +78,7 @@ func (tr TaskResult) Validate(_ context.Context) *apis.FieldError { // a mount path which conflicts with any other declared workspaces, with the explicitly // declared volume mounts, or with the stepTemplate. The names must also be unique. -func ValidateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) (errs *apis.FieldError) { +func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) (errs *apis.FieldError) { mountPaths := sets.NewString() for _, step := range steps { for _, vm := range step.VolumeMounts { diff --git a/pkg/artifacts/artifact_storage_test.go b/pkg/artifacts/artifact_storage_test.go index 76e4a5b8cc3..dd82d792658 100644 --- a/pkg/artifacts/artifact_storage_test.go +++ b/pkg/artifacts/artifact_storage_test.go @@ -56,7 +56,7 @@ var ( } defaultStorageClass *string customStorageClass = "custom-storage-class" - persistentVolumeClaim = GetPersistentVolumeClaim(config.DefaultPVCSize, defaultStorageClass) + persistentVolumeClaim = getPersistentVolumeClaim(config.DefaultPVCSize, defaultStorageClass) quantityComparer = cmp.Comparer(func(x, y resource.Quantity) bool { return x.Cmp(y) == 0 }) @@ -101,7 +101,7 @@ var ( } ) -func GetPersistentVolumeClaim(size string, storageClassName *string) *corev1.PersistentVolumeClaim { +func getPersistentVolumeClaim(size string, storageClassName *string) *corev1.PersistentVolumeClaim { pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: "pipelineruntest-pvc", Namespace: pipelinerun.Namespace, OwnerReferences: []metav1.OwnerReference{pipelinerun.GetOwnerReference()}}, Spec: corev1.PersistentVolumeClaimSpec{ @@ -158,7 +158,7 @@ func TestNeedsPVC(t *testing.T) { ArtifactBucket: artifactBucket, } ctx := config.ToContext(context.Background(), &configs) - needed := NeedsPVC(ctx) + needed := needsPVC(ctx) if needed != c.pvcNeeded { t.Fatalf("Expected that ConfigMapNeedsPVC would be %t, but was %t", c.pvcNeeded, needed) } @@ -181,7 +181,7 @@ func TestInitializeArtifactStorage(t *testing.T) { storagetype: "pvc", expectedArtifactStorage: &storage.ArtifactPVC{ Name: "pipelineruntest", - PersistentVolumeClaim: GetPersistentVolumeClaim("10Gi", defaultStorageClass), + PersistentVolumeClaim: getPersistentVolumeClaim("10Gi", defaultStorageClass), ShellImage: "busybox", }, }, { @@ -192,7 +192,7 @@ func TestInitializeArtifactStorage(t *testing.T) { storagetype: "pvc", expectedArtifactStorage: &storage.ArtifactPVC{ Name: "pipelineruntest", - PersistentVolumeClaim: GetPersistentVolumeClaim("5Gi", &customStorageClass), + PersistentVolumeClaim: getPersistentVolumeClaim("5Gi", &customStorageClass), ShellImage: "busybox", }, }, { @@ -447,7 +447,7 @@ func TestCleanupArtifactStorage(t *testing.T) { storageConfig: map[string]string{}, }} { t.Run(c.desc, func(t *testing.T) { - fakekubeclient := fakek8s.NewSimpleClientset(GetPVCSpec(pipelinerun, persistentVolumeClaim.Spec.Resources.Requests["storage"], defaultStorageClass)) + fakekubeclient := fakek8s.NewSimpleClientset(getPVCSpec(pipelinerun, persistentVolumeClaim.Spec.Resources.Requests["storage"], defaultStorageClass)) ab, err := config.NewArtifactBucketFromMap(c.storageConfig) if err != nil { t.Fatalf("Error getting an ArtifactBucket from data %s, %s", c.storageConfig, err) diff --git a/pkg/artifacts/artifacts_storage.go b/pkg/artifacts/artifacts_storage.go index 55f0017167a..9357b04d0b2 100644 --- a/pkg/artifacts/artifacts_storage.go +++ b/pkg/artifacts/artifacts_storage.go @@ -104,7 +104,7 @@ func InitializeArtifactStorage(ctx context.Context, images pipeline.Images, pr * return &ArtifactStorageNone{}, nil } - if NeedsPVC(ctx) { + if needsPVC(ctx) { pvc, err := createPVC(ctx, pr, c) if err != nil { return nil, err @@ -112,14 +112,14 @@ func InitializeArtifactStorage(ctx context.Context, images pipeline.Images, pr * return &storage.ArtifactPVC{Name: pr.Name, PersistentVolumeClaim: pvc, ShellImage: images.ShellImage}, nil } - return NewArtifactBucketFromConfig(ctx, images), nil + return newArtifactBucketFromConfig(ctx, images), nil } // CleanupArtifactStorage will delete the PipelineRun's artifact storage PVC if it exists. The PVC is created for using // an output workspace or artifacts from one Task to another Task. No other PVCs will be impacted by this cleanup. func CleanupArtifactStorage(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) error { - if NeedsPVC(ctx) { + if needsPVC(ctx) { err := deletePVC(pr, c) if err != nil { return err @@ -128,9 +128,9 @@ func CleanupArtifactStorage(ctx context.Context, pr *v1beta1.PipelineRun, c kube return nil } -// NeedsPVC checks if the Tekton is is configured to use a bucket for artifact storage, +// needsPVC checks if the Tekton is is configured to use a bucket for artifact storage, // returning true if instead a PVC is needed. -func NeedsPVC(ctx context.Context) bool { +func needsPVC(ctx context.Context) bool { bucketConfig := config.FromContextOrDefaults(ctx).ArtifactBucket if bucketConfig == nil { return true @@ -145,14 +145,14 @@ func NeedsPVC(ctx context.Context) bool { // GetArtifactStorage returns the storage interface to enable // consumer code to get a container step for copy to/from storage func GetArtifactStorage(ctx context.Context, images pipeline.Images, prName string, c kubernetes.Interface) ArtifactStorageInterface { - if NeedsPVC(ctx) { + if needsPVC(ctx) { return &storage.ArtifactPVC{Name: prName, ShellImage: images.ShellImage} } - return NewArtifactBucketFromConfig(ctx, images) + return newArtifactBucketFromConfig(ctx, images) } -// NewArtifactBucketFromConfig creates a Bucket from the supplied ConfigMap -func NewArtifactBucketFromConfig(ctx context.Context, images pipeline.Images) *storage.ArtifactBucket { +// newArtifactBucketFromConfig creates a Bucket from the supplied ConfigMap +func newArtifactBucketFromConfig(ctx context.Context, images pipeline.Images) *storage.ArtifactBucket { c := &storage.ArtifactBucket{ ShellImage: images.ShellImage, GsutilImage: images.GsutilImage, @@ -191,7 +191,7 @@ func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interf pvcStorageClassName = &pvcConfig.StorageClassName } - pvcSpec := GetPVCSpec(pr, pvcSize, pvcStorageClassName) + pvcSpec := getPVCSpec(pr, pvcSize, pvcStorageClassName) pvc, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(pvcSpec) if err != nil { return nil, fmt.Errorf("failed to claim Persistent Volume %q due to error: %w", pr.Name, err) @@ -214,8 +214,8 @@ func deletePVC(pr *v1beta1.PipelineRun, c kubernetes.Interface) error { return nil } -// GetPVCSpec returns the PVC to create for a given PipelineRun -func GetPVCSpec(pr *v1beta1.PipelineRun, pvcSize resource.Quantity, storageClassName *string) *corev1.PersistentVolumeClaim { +// getPVCSpec returns the PVC to create for a given PipelineRun +func getPVCSpec(pr *v1beta1.PipelineRun, pvcSize resource.Quantity, storageClassName *string) *corev1.PersistentVolumeClaim { return &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: pr.Namespace, diff --git a/pkg/contexts/contexts.go b/pkg/contexts/contexts.go index 63fad40d814..ac8dfd1722d 100644 --- a/pkg/contexts/contexts.go +++ b/pkg/contexts/contexts.go @@ -22,16 +22,16 @@ import "context" // with a context.Context. type hdcnKey struct{} -// WithDefaultConfigurationName notes on the context for nested validation +// withDefaultConfigurationName notes on the context for nested validation // that there is a default configuration name, which affects how an empty // configurationName is validated. -func WithDefaultConfigurationName(ctx context.Context) context.Context { +func withDefaultConfigurationName(ctx context.Context) context.Context { return context.WithValue(ctx, hdcnKey{}, struct{}{}) } -// HasDefaultConfigurationName checks to see whether the given context has +// hasDefaultConfigurationName checks to see whether the given context has // been marked as having a default configurationName. -func HasDefaultConfigurationName(ctx context.Context) bool { +func hasDefaultConfigurationName(ctx context.Context) bool { return ctx.Value(hdcnKey{}) != nil } diff --git a/pkg/contexts/contexts_test.go b/pkg/contexts/contexts_test.go index 243ba6bf372..73b2f1d40a8 100644 --- a/pkg/contexts/contexts_test.go +++ b/pkg/contexts/contexts_test.go @@ -30,13 +30,13 @@ func TestContexts(t *testing.T) { want bool }{{ name: "has default config name", - ctx: WithDefaultConfigurationName(ctx), - check: HasDefaultConfigurationName, + ctx: withDefaultConfigurationName(ctx), + check: hasDefaultConfigurationName, want: true, }, { name: "doesn't have default config name", ctx: ctx, - check: HasDefaultConfigurationName, + check: hasDefaultConfigurationName, want: false, }, { name: "are upgrading via defaulting", diff --git a/pkg/git/git.go b/pkg/git/git.go index 9d7dca7419d..04480d5b8b3 100644 --- a/pkg/git/git.go +++ b/pkg/git/git.go @@ -157,13 +157,13 @@ func Fetch(logger *zap.SugaredLogger, spec FetchSpec) error { if err != nil { return err } - ref, err := ShowRef(logger, "HEAD", spec.Path) + ref, err := showRef(logger, "HEAD", spec.Path) if err != nil { return err } logger.Infof("Successfully cloned %s @ %s (%s) in path %s", trimmedURL, commit, ref, spec.Path) if spec.Submodules { - if err := SubmoduleFetch(logger, spec); err != nil { + if err := submoduleFetch(logger, spec); err != nil { return err } } @@ -178,7 +178,7 @@ func ShowCommit(logger *zap.SugaredLogger, revision, path string) (string, error return strings.TrimSuffix(output, "\n"), nil } -func ShowRef(logger *zap.SugaredLogger, revision, path string) (string, error) { +func showRef(logger *zap.SugaredLogger, revision, path string) (string, error) { output, err := run(logger, path, "show", "-q", "--pretty=format:%D", revision) if err != nil { return "", err @@ -186,7 +186,7 @@ func ShowRef(logger *zap.SugaredLogger, revision, path string) (string, error) { return strings.TrimSuffix(output, "\n"), nil } -func SubmoduleFetch(logger *zap.SugaredLogger, spec FetchSpec) error { +func submoduleFetch(logger *zap.SugaredLogger, spec FetchSpec) error { if spec.Path != "" { if err := os.Chdir(spec.Path); err != nil { return fmt.Errorf("failed to change directory with path %s; err: %w", spec.Path, err) diff --git a/pkg/pod/status.go b/pkg/pod/status.go index be8efc9e31a..1bdcee6034f 100644 --- a/pkg/pod/status.go +++ b/pkg/pod/status.go @@ -62,7 +62,7 @@ const ( ReasonPodCreationFailed = "PodCreationFailed" // ReasonPending indicates that the pod is in corev1.Pending, and the reason is not - // ReasonExceededNodeResources or IsPodHitConfigError + // ReasonExceededNodeResources or isPodHitConfigError ReasonPending = "Pending" //timeFormat is RFC3339 with millisecond @@ -101,7 +101,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev trs := &tr.Status if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { // If the taskRunStatus doesn't exist yet, it's because we just started running - MarkStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") } sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers) @@ -272,9 +272,9 @@ func extractStartedAtTimeFromResults(results []v1beta1.PipelineResourceResult) ( func updateCompletedTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { if DidTaskRunFail(pod) { msg := getFailureMessage(pod) - MarkStatusFailure(trs, msg) + markStatusFailure(trs, msg) } else { - MarkStatusSuccess(trs) + markStatusSuccess(trs) } // update tr completed time @@ -284,21 +284,21 @@ func updateCompletedTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { func updateIncompleteTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { switch pod.Status.Phase { case corev1.PodRunning: - MarkStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") case corev1.PodPending: var reason, msg string switch { case IsPodExceedingNodeResources(pod): reason = ReasonExceededNodeResources msg = "TaskRun Pod exceeded available resources" - case IsPodHitConfigError(pod): + case isPodHitConfigError(pod): reason = ReasonCreateContainerConfigError msg = getWaitingMessage(pod) default: reason = ReasonPending msg = getWaitingMessage(pod) } - MarkStatusRunning(trs, reason, msg) + markStatusRunning(trs, reason, msg) } } @@ -368,8 +368,8 @@ func IsPodExceedingNodeResources(pod *corev1.Pod) bool { return false } -// IsPodHitConfigError returns true if the Pod's status undicates there are config error raised -func IsPodHitConfigError(pod *corev1.Pod) bool { +// isPodHitConfigError returns true if the Pod's status undicates there are config error raised +func isPodHitConfigError(pod *corev1.Pod) bool { for _, containerStatus := range pod.Status.ContainerStatuses { if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == "CreateContainerConfigError" { return true @@ -405,8 +405,8 @@ func getWaitingMessage(pod *corev1.Pod) string { return "Pending" } -// MarkStatusRunning sets taskrun status to running -func MarkStatusRunning(trs *v1beta1.TaskRunStatus, reason, message string) { +// markStatusRunning sets taskrun status to running +func markStatusRunning(trs *v1beta1.TaskRunStatus, reason, message string) { trs.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown, @@ -415,8 +415,8 @@ func MarkStatusRunning(trs *v1beta1.TaskRunStatus, reason, message string) { }) } -// MarkStatusFailure sets taskrun status to failure -func MarkStatusFailure(trs *v1beta1.TaskRunStatus, message string) { +// markStatusFailure sets taskrun status to failure +func markStatusFailure(trs *v1beta1.TaskRunStatus, message string) { trs.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse, @@ -425,8 +425,8 @@ func MarkStatusFailure(trs *v1beta1.TaskRunStatus, message string) { }) } -// MarkStatusSuccess sets taskrun status to success -func MarkStatusSuccess(trs *v1beta1.TaskRunStatus) { +// markStatusSuccess sets taskrun status to success +func markStatusSuccess(trs *v1beta1.TaskRunStatus) { trs.SetCondition(&apis.Condition{ Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue, diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go index c7bb2305700..25ec0f8f64d 100644 --- a/pkg/pod/status_test.go +++ b/pkg/pod/status_test.go @@ -1228,7 +1228,7 @@ func TestSidecarsReady(t *testing.T) { func TestMarkStatusRunning(t *testing.T) { trs := v1beta1.TaskRunStatus{} - MarkStatusRunning(&trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + markStatusRunning(&trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") expected := &apis.Condition{ Type: apis.ConditionSucceeded, @@ -1244,7 +1244,7 @@ func TestMarkStatusRunning(t *testing.T) { func TestMarkStatusFailure(t *testing.T) { trs := v1beta1.TaskRunStatus{} - MarkStatusFailure(&trs, "failure message") + markStatusFailure(&trs, "failure message") expected := &apis.Condition{ Type: apis.ConditionSucceeded, @@ -1260,7 +1260,7 @@ func TestMarkStatusFailure(t *testing.T) { func TestMarkStatusSuccess(t *testing.T) { trs := v1beta1.TaskRunStatus{} - MarkStatusSuccess(&trs) + markStatusSuccess(&trs) expected := &apis.Condition{ Type: apis.ConditionSucceeded, diff --git a/pkg/reconciler/events/cloudevent/cloud_event_controller.go b/pkg/reconciler/events/cloudevent/cloud_event_controller.go index 6a5dfa7b962..970c097bd23 100644 --- a/pkg/reconciler/events/cloudevent/cloud_event_controller.go +++ b/pkg/reconciler/events/cloudevent/cloud_event_controller.go @@ -76,7 +76,7 @@ func SendCloudEvents(tr *v1beta1.TaskRun, ceclient CEClient, logger *zap.Sugared logger = logger.With(zap.String("taskrun", tr.Name)) // Make the event we would like to send: - event, err := EventForTaskRun(tr) + event, err := eventForTaskRun(tr) if err != nil || event == nil { logger.With(zap.Error(err)).Error("failed to produce a cloudevent from TaskRun.") return err @@ -132,7 +132,7 @@ func SendCloudEventWithRetries(ctx context.Context, object runtime.Object) error if ceClient == nil { return errors.New("No cloud events client found in the context") } - event, err := EventForObjectWithCondition(o) + event, err := eventForObjectWithCondition(o) if err != nil { return err } diff --git a/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go b/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go index 6c2a9da83b0..400d1faa2ee 100644 --- a/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go +++ b/pkg/reconciler/events/cloudevent/cloud_event_controller_test.go @@ -138,7 +138,7 @@ func TestSendCloudEvents(t *testing.T) { successfulBehaviour := FakeClientBehaviour{ SendSuccessfully: true, } - err := SendCloudEvents(tc.taskRun, NewFakeClient(&successfulBehaviour), logger) + err := SendCloudEvents(tc.taskRun, newFakeClient(&successfulBehaviour), logger) if err != nil { t.Fatalf("Unexpected error sending cloud events: %v", err) } @@ -196,7 +196,7 @@ func TestSendCloudEventsErrors(t *testing.T) { unsuccessfulBehaviour := FakeClientBehaviour{ SendSuccessfully: false, } - err := SendCloudEvents(tc.taskRun, NewFakeClient(&unsuccessfulBehaviour), logger) + err := SendCloudEvents(tc.taskRun, newFakeClient(&unsuccessfulBehaviour), logger) if err == nil { t.Fatalf("Unexpected success sending cloud events: %v", err) } diff --git a/pkg/reconciler/events/cloudevent/cloudevent.go b/pkg/reconciler/events/cloudevent/cloudevent.go index 1bff4d4b852..f98ac8b8887 100644 --- a/pkg/reconciler/events/cloudevent/cloudevent.go +++ b/pkg/reconciler/events/cloudevent/cloudevent.go @@ -77,8 +77,8 @@ type TektonCloudEventData struct { PipelineRun *v1beta1.PipelineRun `json:"pipelineRun,omitempty"` } -// NewTektonCloudEventData returns a new instance of NewTektonCloudEventData -func NewTektonCloudEventData(runObject objectWithCondition) TektonCloudEventData { +// newTektonCloudEventData returns a new instance of newTektonCloudEventData +func newTektonCloudEventData(runObject objectWithCondition) TektonCloudEventData { tektonCloudEventData := TektonCloudEventData{} switch v := runObject.(type) { case *v1beta1.TaskRun: @@ -89,9 +89,9 @@ func NewTektonCloudEventData(runObject objectWithCondition) TektonCloudEventData return tektonCloudEventData } -// EventForObjectWithCondition creates a new event based for a objectWithCondition, +// eventForObjectWithCondition creates a new event based for a objectWithCondition, // or return an error if not possible. -func EventForObjectWithCondition(runObject objectWithCondition) (*cloudevents.Event, error) { +func eventForObjectWithCondition(runObject objectWithCondition) (*cloudevents.Event, error) { event := cloudevents.NewEvent() event.SetID(uuid.New().String()) event.SetSubject(runObject.GetObjectMeta().GetName()) @@ -105,30 +105,30 @@ func EventForObjectWithCondition(runObject objectWithCondition) (*cloudevents.Ev } event.SetType(eventType.String()) - if err := event.SetData(cloudevents.ApplicationJSON, NewTektonCloudEventData(runObject)); err != nil { + if err := event.SetData(cloudevents.ApplicationJSON, newTektonCloudEventData(runObject)); err != nil { return nil, err } return &event, nil } -// EventForTaskRun will create a new event based on a TaskRun, +// eventForTaskRun will create a new event based on a TaskRun, // or return an error if not possible. -func EventForTaskRun(taskRun *v1beta1.TaskRun) (*cloudevents.Event, error) { +func eventForTaskRun(taskRun *v1beta1.TaskRun) (*cloudevents.Event, error) { // Check if the TaskRun is defined if taskRun == nil { return nil, errors.New("Cannot send an event for an empty TaskRun") } - return EventForObjectWithCondition(taskRun) + return eventForObjectWithCondition(taskRun) } -// EventForPipelineRun will create a new event based on a TaskRun, +// eventForPipelineRun will create a new event based on a TaskRun, // or return an error if not possible. -func EventForPipelineRun(pipelineRun *v1beta1.PipelineRun) (*cloudevents.Event, error) { +func eventForPipelineRun(pipelineRun *v1beta1.PipelineRun) (*cloudevents.Event, error) { // Check if the TaskRun is defined if pipelineRun == nil { return nil, errors.New("Cannot send an event for an empty PipelineRun") } - return EventForObjectWithCondition(pipelineRun) + return eventForObjectWithCondition(pipelineRun) } func getEventType(runObject objectWithCondition) (*TektonEventType, error) { diff --git a/pkg/reconciler/events/cloudevent/cloudevent_test.go b/pkg/reconciler/events/cloudevent/cloudevent_test.go index 958e9b20469..a31e521576d 100644 --- a/pkg/reconciler/events/cloudevent/cloudevent_test.go +++ b/pkg/reconciler/events/cloudevent/cloudevent_test.go @@ -112,7 +112,7 @@ func TestEventForTaskRun(t *testing.T) { t.Run(c.desc, func(t *testing.T) { names.TestingSeed() - got, err := EventForTaskRun(c.taskRun) + got, err := eventForTaskRun(c.taskRun) if err != nil { t.Fatalf("I did not expect an error but I got %s", err) } else { @@ -123,7 +123,7 @@ func TestEventForTaskRun(t *testing.T) { if d := cmp.Diff(string(c.wantEventType), got.Type()); d != "" { t.Errorf("Wrong Event Type %s", diff.PrintWantGot(d)) } - wantData := NewTektonCloudEventData(c.taskRun) + wantData := newTektonCloudEventData(c.taskRun) gotData := TektonCloudEventData{} if err := got.DataAs(&gotData); err != nil { t.Errorf("Unexpected error from DataAsl; %s", err) @@ -169,7 +169,7 @@ func TestEventForPipelineRun(t *testing.T) { t.Run(c.desc, func(t *testing.T) { names.TestingSeed() - got, err := EventForPipelineRun(c.pipelineRun) + got, err := eventForPipelineRun(c.pipelineRun) if err != nil { t.Fatalf("I did not expect an error but I got %s", err) } else { @@ -180,7 +180,7 @@ func TestEventForPipelineRun(t *testing.T) { if d := cmp.Diff(string(c.wantEventType), got.Type()); d != "" { t.Errorf("Wrong Event Type %s", diff.PrintWantGot(d)) } - wantData := NewTektonCloudEventData(c.pipelineRun) + wantData := newTektonCloudEventData(c.pipelineRun) gotData := TektonCloudEventData{} if err := got.DataAs(&gotData); err != nil { t.Errorf("Unexpected error from DataAsl; %s", err) diff --git a/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go b/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go index 252ba72be7e..b9ef53f7163 100644 --- a/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go +++ b/pkg/reconciler/events/cloudevent/cloudeventsfakeclient.go @@ -39,8 +39,8 @@ type FakeClient struct { Events chan string } -// NewFakeClient is a FakeClient factory, it returns a client for the target -func NewFakeClient(behaviour *FakeClientBehaviour) cloudevents.Client { +// newFakeClient is a FakeClient factory, it returns a client for the target +func newFakeClient(behaviour *FakeClientBehaviour) cloudevents.Client { return FakeClient{ behaviour: behaviour, Events: make(chan string, bufferSize), @@ -74,5 +74,5 @@ func (c FakeClient) StartReceiver(ctx context.Context, fn interface{}) error { // WithClient adds to the context a fake client with the desired behaviour func WithClient(ctx context.Context, behaviour *FakeClientBehaviour) context.Context { - return context.WithValue(ctx, CECKey{}, NewFakeClient(behaviour)) + return context.WithValue(ctx, CECKey{}, newFakeClient(behaviour)) } diff --git a/pkg/reconciler/events/event.go b/pkg/reconciler/events/event.go index 54d0d1ede9c..6fa7830853a 100644 --- a/pkg/reconciler/events/event.go +++ b/pkg/reconciler/events/event.go @@ -103,7 +103,7 @@ func EmitError(c record.EventRecorder, err error, object runtime.Object) { } } -// EmitEvent emits an event for object if afterCondition is different from beforeCondition +// emitEvent emits an event for object if afterCondition is different from beforeCondition // // Status "ConditionUnknown": // beforeCondition == nil, emit EventReasonStarted @@ -112,12 +112,12 @@ func EmitError(c record.EventRecorder, err error, object runtime.Object) { // Status "ConditionTrue": emit EventReasonSucceded // Status "ConditionFalse": emit EventReasonFailed // Deprecated: use Emit -func EmitEvent(ctx context.Context, beforeCondition *apis.Condition, afterCondition *apis.Condition, object runtime.Object) { +func emitEvent(ctx context.Context, beforeCondition *apis.Condition, afterCondition *apis.Condition, object runtime.Object) { Emit(ctx, beforeCondition, afterCondition, object) } -// EmitErrorEvent emits a failure associated to an error +// emitErrorEvent emits a failure associated to an error // Deprecated: use EmitError instead -func EmitErrorEvent(c record.EventRecorder, err error, object runtime.Object) { +func emitErrorEvent(c record.EventRecorder, err error, object runtime.Object) { EmitError(c, err, object) } diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 7a883e0f90c..ff49b4761be 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -324,7 +324,7 @@ func TestReconcile(t *testing.T) { ClusterTasks: clusterTasks, PipelineResources: rs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -438,7 +438,7 @@ func TestReconcile_PipelineSpecTaskSpec(t *testing.T) { PipelineRuns: prs, Pipelines: ps, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -679,7 +679,7 @@ func TestReconcile_InvalidPipelineRuns(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := append(tc.wantEvents, "Warning InternalError 1 error occurred") @@ -1003,7 +1003,7 @@ func TestReconcileOnCompletedPipelineRun(t *testing.T) { Tasks: ts, TaskRuns: trs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -1090,7 +1090,7 @@ func TestReconcileOnCancelledPipelineRun(t *testing.T) { Tasks: ts, TaskRuns: trs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -1130,7 +1130,7 @@ func TestReconcileWithTimeout(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -1182,7 +1182,7 @@ func TestReconcileWithoutPVC(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run", []string{}, false) @@ -1297,7 +1297,7 @@ func TestReconcileCancelledPipelineRun(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -1357,7 +1357,7 @@ func TestReconcilePropagateLabels(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() _, clients := prt.reconcileRun("foo", "test-pipeline-run-with-labels", []string{}, false) @@ -1399,7 +1399,7 @@ func TestReconcileWithDifferentServiceAccounts(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() _, clients := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", []string{}, false) @@ -1523,7 +1523,7 @@ func TestReconcileWithTimeoutAndRetry(t *testing.T) { Tasks: ts, TaskRuns: trs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, _ := prt.reconcileRun("foo", "test-pipeline-retry-run-with-timeout", []string{}, false) @@ -1558,7 +1558,7 @@ func TestReconcilePropagateAnnotations(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() _, clients := prt.reconcileRun("foo", "test-pipeline-run-with-annotations", []string{}, false) @@ -1716,7 +1716,7 @@ func TestReconcileAndPropagateCustomPipelineTaskRunSpec(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() _, clients := prt.reconcileRun("foo", prName, []string{}, false) @@ -1808,7 +1808,7 @@ func TestReconcileWithConditionChecks(t *testing.T) { Tasks: ts, Conditions: conditions, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -1926,7 +1926,7 @@ func TestReconcileWithFailingConditionChecks(t *testing.T) { TaskRuns: trs, Conditions: conditions, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -2035,7 +2035,7 @@ func TestReconcileWithWhenExpressionsWithParameters(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -2156,7 +2156,7 @@ func TestReconcileWithWhenExpressionsWithTaskResults(t *testing.T) { Tasks: ts, TaskRuns: trs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -2252,7 +2252,7 @@ func TestReconcileWithAffinityAssistantStatefulSet(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", pipelineRunName, []string{}, false) @@ -2344,7 +2344,7 @@ func TestReconcileWithVolumeClaimTemplateWorkspace(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", pipelineRunName, []string{}, false) @@ -2420,7 +2420,7 @@ func TestReconcileWithVolumeClaimTemplateWorkspaceUsingSubPaths(t *testing.T) { Pipelines: ps, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run", []string{}, false) @@ -2546,7 +2546,7 @@ func TestReconcileWithTaskResults(t *testing.T) { Tasks: ts, TaskRuns: trs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() _, clients := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", []string{}, false) @@ -2617,7 +2617,7 @@ func TestReconcileWithTaskResultsEmbeddedNoneStarted(t *testing.T) { PipelineRuns: prs, Tasks: ts, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", []string{}, false) @@ -2725,7 +2725,7 @@ func TestReconcileWithPipelineResults(t *testing.T) { Tasks: ts, TaskRuns: trs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, _ := prt.reconcileRun("foo", "test-pipeline-run-different-service-accs", []string{}, false) @@ -2932,7 +2932,7 @@ func TestReconcileOutOfSyncPipelineRun(t *testing.T) { TaskRuns: trs, Conditions: cs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", prOutOfSync.Name, []string{}, false) @@ -3660,7 +3660,7 @@ func TestReconcilePipeline_FinalTasks(t *testing.T) { Tasks: tt.ts, TaskRuns: tt.trs, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", tt.pipelineRunName, []string{}, false) @@ -3803,7 +3803,7 @@ func TestReconcile_CloudEvents(t *testing.T) { Tasks: ts, ConfigMaps: cms, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() wantEvents := []string{ @@ -3874,7 +3874,7 @@ func TestReconcilePipeline_TaskSpecMetadata(t *testing.T) { PipelineRuns: prs, Pipelines: ps, } - prt := NewPipelineRunTest(d, t) + prt := newPipelineRunTest(d, t) defer prt.Cancel() reconciledRun, clients := prt.reconcileRun("foo", "test-pipeline-run-success", []string{}, false) @@ -3925,9 +3925,9 @@ func TestReconcilePipeline_TaskSpecMetadata(t *testing.T) { } } -// NewPipelineRunTest returns PipelineRunTest with a new PipelineRun controller created with specified state through data +// newPipelineRunTest returns PipelineRunTest with a new PipelineRun controller created with specified state through data // This PipelineRunTest can be reused for multiple PipelineRuns by calling reconcileRun for each pipelineRun -func NewPipelineRunTest(data test.Data, t *testing.T) *PipelineRunTest { +func newPipelineRunTest(data test.Data, t *testing.T) *PipelineRunTest { t.Helper() testAssets, cancel := getPipelineRunController(t, data) return &PipelineRunTest{ diff --git a/pkg/reconciler/pipelinerun/resources/conditionresolution.go b/pkg/reconciler/pipelinerun/resources/conditionresolution.go index 973988b1d07..894dcfeee58 100644 --- a/pkg/reconciler/pipelinerun/resources/conditionresolution.go +++ b/pkg/reconciler/pipelinerun/resources/conditionresolution.go @@ -114,7 +114,7 @@ func (rcc *ResolvedConditionCheck) ConditionToTaskSpec() (*v1beta1.TaskSpec, err } convertParamTemplates(&t.Steps[0], rcc.Condition.Spec.Params) - err := ApplyResourceSubstitution(&t.Steps[0], rcc.ResolvedResources, rcc.Condition.Spec.Resources, rcc.images) + err := applyResourceSubstitution(&t.Steps[0], rcc.ResolvedResources, rcc.Condition.Spec.Resources, rcc.images) if err != nil { return nil, fmt.Errorf("failed to replace resource template strings %w", err) @@ -134,9 +134,9 @@ func convertParamTemplates(step *v1beta1.Step, params []v1beta1.ParamSpec) { v1beta1.ApplyStepReplacements(step, replacements, map[string][]string{}) } -// ApplyResourceSubstitution applies the substitution from values in resources which are referenced +// applyResourceSubstitution applies the substitution from values in resources which are referenced // in spec as subitems of the replacementStr. -func ApplyResourceSubstitution(step *v1beta1.Step, resolvedResources map[string]*resourcev1alpha1.PipelineResource, conditionResources []v1beta1.ResourceDeclaration, images pipeline.Images) error { +func applyResourceSubstitution(step *v1beta1.Step, resolvedResources map[string]*resourcev1alpha1.PipelineResource, conditionResources []v1beta1.ResourceDeclaration, images pipeline.Images) error { replacements := make(map[string]string) for _, cr := range conditionResources { if rSpec, ok := resolvedResources[cr.Name]; ok { diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go index 7c23ea4e0d7..514dc3ea578 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution.go @@ -347,7 +347,7 @@ func ResolvePipelineRun( spec = *pt.TaskSpec.TaskSpec } spec.SetDefaults(contexts.WithUpgradeViaDefaulting(ctx)) - rtr, err := ResolvePipelineTaskResources(pt, &spec, taskName, kind, providedResources) + rtr, err := resolvePipelineTaskResources(pt, &spec, taskName, kind, providedResources) if err != nil { return nil, fmt.Errorf("couldn't match referenced resources with declared resources: %w", err) } @@ -451,9 +451,9 @@ func resolveConditionChecks(pt *v1beta1.PipelineTask, taskRunStatus map[string]* return rccs, nil } -// ResolvePipelineTaskResources matches PipelineResources referenced by pt inputs and outputs with the +// resolvePipelineTaskResources matches PipelineResources referenced by pt inputs and outputs with the // providedResources and returns an instance of ResolvedTaskResources. -func ResolvePipelineTaskResources(pt v1beta1.PipelineTask, ts *v1beta1.TaskSpec, taskName string, kind v1beta1.TaskKind, providedResources map[string]*resourcev1alpha1.PipelineResource) (*resources.ResolvedTaskResources, error) { +func resolvePipelineTaskResources(pt v1beta1.PipelineTask, ts *v1beta1.TaskSpec, taskName string, kind v1beta1.TaskKind, providedResources map[string]*resourcev1alpha1.PipelineResource) (*resources.ResolvedTaskResources, error) { rtr := resources.ResolvedTaskResources{ TaskName: taskName, TaskSpec: ts, diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index ecfd37cd602..1ca72cbf7e8 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -539,7 +539,7 @@ var taskWithOptionalResources = &v1beta1.Task{ }, } -func DagFromState(state PipelineRunState) (*dag.Graph, error) { +func dagFromState(state PipelineRunState) (*dag.Graph, error) { pts := []v1beta1.PipelineTask{} for _, rprt := range state { pts = append(pts, *rprt.PipelineTask) @@ -835,7 +835,7 @@ func TestIsSkipped(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - d, err := DagFromState(tc.state) + d, err := dagFromState(tc.state) if err != nil { t.Fatalf("Could not get a dag from the TC state %#v: %v", tc.state, err) } diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go index c1f2cc5d20b..6c911a1b2e3 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go @@ -457,7 +457,7 @@ func TestPipelineRunState_SuccessfulOrSkippedDAGTasks(t *testing.T) { }} for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - d, err := DagFromState(tc.state) + d, err := dagFromState(tc.state) if err != nil { t.Fatalf("Unexpected error while buildig DAG for state %v: %v", tc.state, err) } @@ -882,7 +882,7 @@ func TestGetPipelineConditionStatus(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { pr := tb.PipelineRun("somepipelinerun") - d, err := DagFromState(tc.state) + d, err := dagFromState(tc.state) if err != nil { t.Fatalf("Unexpected error while buildig DAG for state %v: %v", tc.state, err) } @@ -1024,7 +1024,7 @@ func TestGetPipelineConditionStatus_WithFinalTasks(t *testing.T) { // pipeline should result in timeout if its runtime exceeds its spec.Timeout based on its status.Timeout func TestGetPipelineConditionStatus_PipelineTimeouts(t *testing.T) { - d, err := DagFromState(oneFinishedState) + d, err := dagFromState(oneFinishedState) if err != nil { t.Fatalf("Unexpected error while buildig DAG for state %v: %v", oneFinishedState, err) }