Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleaning up multiple unnecessarily exported methods #3327

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/apis/pipeline/v1beta1/task_validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func (tr TaskResult) Validate(_ context.Context) *apis.FieldError {

// a mount path which conflicts with any other declared workspaces, with the explicitly
// declared volume mounts, or with the stepTemplate. The names must also be unique.
func ValidateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) (errs *apis.FieldError) {
func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *corev1.Container) (errs *apis.FieldError) {
mountPaths := sets.NewString()
for _, step := range steps {
for _, vm := range step.VolumeMounts {
Expand Down
12 changes: 6 additions & 6 deletions pkg/artifacts/artifact_storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ var (
}
defaultStorageClass *string
customStorageClass = "custom-storage-class"
persistentVolumeClaim = GetPersistentVolumeClaim(config.DefaultPVCSize, defaultStorageClass)
persistentVolumeClaim = getPersistentVolumeClaim(config.DefaultPVCSize, defaultStorageClass)
quantityComparer = cmp.Comparer(func(x, y resource.Quantity) bool {
return x.Cmp(y) == 0
})
Expand Down Expand Up @@ -101,7 +101,7 @@ var (
}
)

func GetPersistentVolumeClaim(size string, storageClassName *string) *corev1.PersistentVolumeClaim {
func getPersistentVolumeClaim(size string, storageClassName *string) *corev1.PersistentVolumeClaim {
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "pipelineruntest-pvc", Namespace: pipelinerun.Namespace, OwnerReferences: []metav1.OwnerReference{pipelinerun.GetOwnerReference()}},
Spec: corev1.PersistentVolumeClaimSpec{
Expand Down Expand Up @@ -158,7 +158,7 @@ func TestNeedsPVC(t *testing.T) {
ArtifactBucket: artifactBucket,
}
ctx := config.ToContext(context.Background(), &configs)
needed := NeedsPVC(ctx)
needed := needsPVC(ctx)
if needed != c.pvcNeeded {
t.Fatalf("Expected that ConfigMapNeedsPVC would be %t, but was %t", c.pvcNeeded, needed)
}
Expand All @@ -181,7 +181,7 @@ func TestInitializeArtifactStorage(t *testing.T) {
storagetype: "pvc",
expectedArtifactStorage: &storage.ArtifactPVC{
Name: "pipelineruntest",
PersistentVolumeClaim: GetPersistentVolumeClaim("10Gi", defaultStorageClass),
PersistentVolumeClaim: getPersistentVolumeClaim("10Gi", defaultStorageClass),
ShellImage: "busybox",
},
}, {
Expand All @@ -192,7 +192,7 @@ func TestInitializeArtifactStorage(t *testing.T) {
storagetype: "pvc",
expectedArtifactStorage: &storage.ArtifactPVC{
Name: "pipelineruntest",
PersistentVolumeClaim: GetPersistentVolumeClaim("5Gi", &customStorageClass),
PersistentVolumeClaim: getPersistentVolumeClaim("5Gi", &customStorageClass),
ShellImage: "busybox",
},
}, {
Expand Down Expand Up @@ -447,7 +447,7 @@ func TestCleanupArtifactStorage(t *testing.T) {
storageConfig: map[string]string{},
}} {
t.Run(c.desc, func(t *testing.T) {
fakekubeclient := fakek8s.NewSimpleClientset(GetPVCSpec(pipelinerun, persistentVolumeClaim.Spec.Resources.Requests["storage"], defaultStorageClass))
fakekubeclient := fakek8s.NewSimpleClientset(getPVCSpec(pipelinerun, persistentVolumeClaim.Spec.Resources.Requests["storage"], defaultStorageClass))
ab, err := config.NewArtifactBucketFromMap(c.storageConfig)
if err != nil {
t.Fatalf("Error getting an ArtifactBucket from data %s, %s", c.storageConfig, err)
Expand Down
24 changes: 12 additions & 12 deletions pkg/artifacts/artifacts_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,22 +104,22 @@ func InitializeArtifactStorage(ctx context.Context, images pipeline.Images, pr *
return &ArtifactStorageNone{}, nil
}

if NeedsPVC(ctx) {
if needsPVC(ctx) {
pvc, err := createPVC(ctx, pr, c)
if err != nil {
return nil, err
}
return &storage.ArtifactPVC{Name: pr.Name, PersistentVolumeClaim: pvc, ShellImage: images.ShellImage}, nil
}

return NewArtifactBucketFromConfig(ctx, images), nil
return newArtifactBucketFromConfig(ctx, images), nil
}

// CleanupArtifactStorage will delete the PipelineRun's artifact storage PVC if it exists. The PVC is created for using
// an output workspace or artifacts from one Task to another Task. No other PVCs will be impacted by this cleanup.
func CleanupArtifactStorage(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interface) error {

if NeedsPVC(ctx) {
if needsPVC(ctx) {
err := deletePVC(pr, c)
if err != nil {
return err
Expand All @@ -128,9 +128,9 @@ func CleanupArtifactStorage(ctx context.Context, pr *v1beta1.PipelineRun, c kube
return nil
}

// NeedsPVC checks if the Tekton is is configured to use a bucket for artifact storage,
// needsPVC checks if the Tekton is is configured to use a bucket for artifact storage,
// returning true if instead a PVC is needed.
func NeedsPVC(ctx context.Context) bool {
func needsPVC(ctx context.Context) bool {
bucketConfig := config.FromContextOrDefaults(ctx).ArtifactBucket
if bucketConfig == nil {
return true
Expand All @@ -145,14 +145,14 @@ func NeedsPVC(ctx context.Context) bool {
// GetArtifactStorage returns the storage interface to enable
// consumer code to get a container step for copy to/from storage
func GetArtifactStorage(ctx context.Context, images pipeline.Images, prName string, c kubernetes.Interface) ArtifactStorageInterface {
if NeedsPVC(ctx) {
if needsPVC(ctx) {
return &storage.ArtifactPVC{Name: prName, ShellImage: images.ShellImage}
}
return NewArtifactBucketFromConfig(ctx, images)
return newArtifactBucketFromConfig(ctx, images)
}

// NewArtifactBucketFromConfig creates a Bucket from the supplied ConfigMap
func NewArtifactBucketFromConfig(ctx context.Context, images pipeline.Images) *storage.ArtifactBucket {
// newArtifactBucketFromConfig creates a Bucket from the supplied ConfigMap
func newArtifactBucketFromConfig(ctx context.Context, images pipeline.Images) *storage.ArtifactBucket {
c := &storage.ArtifactBucket{
ShellImage: images.ShellImage,
GsutilImage: images.GsutilImage,
Expand Down Expand Up @@ -191,7 +191,7 @@ func createPVC(ctx context.Context, pr *v1beta1.PipelineRun, c kubernetes.Interf
pvcStorageClassName = &pvcConfig.StorageClassName
}

pvcSpec := GetPVCSpec(pr, pvcSize, pvcStorageClassName)
pvcSpec := getPVCSpec(pr, pvcSize, pvcStorageClassName)
pvc, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(pvcSpec)
if err != nil {
return nil, fmt.Errorf("failed to claim Persistent Volume %q due to error: %w", pr.Name, err)
Expand All @@ -214,8 +214,8 @@ func deletePVC(pr *v1beta1.PipelineRun, c kubernetes.Interface) error {
return nil
}

// GetPVCSpec returns the PVC to create for a given PipelineRun
func GetPVCSpec(pr *v1beta1.PipelineRun, pvcSize resource.Quantity, storageClassName *string) *corev1.PersistentVolumeClaim {
// getPVCSpec returns the PVC to create for a given PipelineRun
func getPVCSpec(pr *v1beta1.PipelineRun, pvcSize resource.Quantity, storageClassName *string) *corev1.PersistentVolumeClaim {
return &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Namespace: pr.Namespace,
Expand Down
8 changes: 4 additions & 4 deletions pkg/contexts/contexts.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@ import "context"
// with a context.Context.
type hdcnKey struct{}

// WithDefaultConfigurationName notes on the context for nested validation
// withDefaultConfigurationName notes on the context for nested validation
// that there is a default configuration name, which affects how an empty
// configurationName is validated.
func WithDefaultConfigurationName(ctx context.Context) context.Context {
func withDefaultConfigurationName(ctx context.Context) context.Context {
return context.WithValue(ctx, hdcnKey{}, struct{}{})
}

// HasDefaultConfigurationName checks to see whether the given context has
// hasDefaultConfigurationName checks to see whether the given context has
// been marked as having a default configurationName.
func HasDefaultConfigurationName(ctx context.Context) bool {
func hasDefaultConfigurationName(ctx context.Context) bool {
return ctx.Value(hdcnKey{}) != nil
}

Expand Down
6 changes: 3 additions & 3 deletions pkg/contexts/contexts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ func TestContexts(t *testing.T) {
want bool
}{{
name: "has default config name",
ctx: WithDefaultConfigurationName(ctx),
check: HasDefaultConfigurationName,
ctx: withDefaultConfigurationName(ctx),
check: hasDefaultConfigurationName,
want: true,
}, {
name: "doesn't have default config name",
ctx: ctx,
check: HasDefaultConfigurationName,
check: hasDefaultConfigurationName,
want: false,
}, {
name: "are upgrading via defaulting",
Expand Down
8 changes: 4 additions & 4 deletions pkg/git/git.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,13 +157,13 @@ func Fetch(logger *zap.SugaredLogger, spec FetchSpec) error {
if err != nil {
return err
}
ref, err := ShowRef(logger, "HEAD", spec.Path)
ref, err := showRef(logger, "HEAD", spec.Path)
if err != nil {
return err
}
logger.Infof("Successfully cloned %s @ %s (%s) in path %s", trimmedURL, commit, ref, spec.Path)
if spec.Submodules {
if err := SubmoduleFetch(logger, spec); err != nil {
if err := submoduleFetch(logger, spec); err != nil {
return err
}
}
Expand All @@ -178,15 +178,15 @@ func ShowCommit(logger *zap.SugaredLogger, revision, path string) (string, error
return strings.TrimSuffix(output, "\n"), nil
}

func ShowRef(logger *zap.SugaredLogger, revision, path string) (string, error) {
func showRef(logger *zap.SugaredLogger, revision, path string) (string, error) {
output, err := run(logger, path, "show", "-q", "--pretty=format:%D", revision)
if err != nil {
return "", err
}
return strings.TrimSuffix(output, "\n"), nil
}

func SubmoduleFetch(logger *zap.SugaredLogger, spec FetchSpec) error {
func submoduleFetch(logger *zap.SugaredLogger, spec FetchSpec) error {
if spec.Path != "" {
if err := os.Chdir(spec.Path); err != nil {
return fmt.Errorf("failed to change directory with path %s; err: %w", spec.Path, err)
Expand Down
30 changes: 15 additions & 15 deletions pkg/pod/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ const (
ReasonPodCreationFailed = "PodCreationFailed"

// ReasonPending indicates that the pod is in corev1.Pending, and the reason is not
// ReasonExceededNodeResources or IsPodHitConfigError
// ReasonExceededNodeResources or isPodHitConfigError
ReasonPending = "Pending"

//timeFormat is RFC3339 with millisecond
Expand Down Expand Up @@ -101,7 +101,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev
trs := &tr.Status
if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown {
// If the taskRunStatus doesn't exist yet, it's because we just started running
MarkStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
}

sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers)
Expand Down Expand Up @@ -272,9 +272,9 @@ func extractStartedAtTimeFromResults(results []v1beta1.PipelineResourceResult) (
func updateCompletedTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) {
if DidTaskRunFail(pod) {
msg := getFailureMessage(pod)
MarkStatusFailure(trs, msg)
markStatusFailure(trs, msg)
} else {
MarkStatusSuccess(trs)
markStatusSuccess(trs)
}

// update tr completed time
Expand All @@ -284,21 +284,21 @@ func updateCompletedTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) {
func updateIncompleteTaskRun(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) {
switch pod.Status.Phase {
case corev1.PodRunning:
MarkStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
case corev1.PodPending:
var reason, msg string
switch {
case IsPodExceedingNodeResources(pod):
reason = ReasonExceededNodeResources
msg = "TaskRun Pod exceeded available resources"
case IsPodHitConfigError(pod):
case isPodHitConfigError(pod):
reason = ReasonCreateContainerConfigError
msg = getWaitingMessage(pod)
default:
reason = ReasonPending
msg = getWaitingMessage(pod)
}
MarkStatusRunning(trs, reason, msg)
markStatusRunning(trs, reason, msg)
}
}

Expand Down Expand Up @@ -368,8 +368,8 @@ func IsPodExceedingNodeResources(pod *corev1.Pod) bool {
return false
}

// IsPodHitConfigError returns true if the Pod's status undicates there are config error raised
func IsPodHitConfigError(pod *corev1.Pod) bool {
// isPodHitConfigError returns true if the Pod's status undicates there are config error raised
func isPodHitConfigError(pod *corev1.Pod) bool {
for _, containerStatus := range pod.Status.ContainerStatuses {
if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason == "CreateContainerConfigError" {
return true
Expand Down Expand Up @@ -405,8 +405,8 @@ func getWaitingMessage(pod *corev1.Pod) string {
return "Pending"
}

// MarkStatusRunning sets taskrun status to running
func MarkStatusRunning(trs *v1beta1.TaskRunStatus, reason, message string) {
// markStatusRunning sets taskrun status to running
func markStatusRunning(trs *v1beta1.TaskRunStatus, reason, message string) {
trs.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
Expand All @@ -415,8 +415,8 @@ func MarkStatusRunning(trs *v1beta1.TaskRunStatus, reason, message string) {
})
}

// MarkStatusFailure sets taskrun status to failure
func MarkStatusFailure(trs *v1beta1.TaskRunStatus, message string) {
// markStatusFailure sets taskrun status to failure
func markStatusFailure(trs *v1beta1.TaskRunStatus, message string) {
trs.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Expand All @@ -425,8 +425,8 @@ func MarkStatusFailure(trs *v1beta1.TaskRunStatus, message string) {
})
}

// MarkStatusSuccess sets taskrun status to success
func MarkStatusSuccess(trs *v1beta1.TaskRunStatus) {
// markStatusSuccess sets taskrun status to success
func markStatusSuccess(trs *v1beta1.TaskRunStatus) {
trs.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionTrue,
Expand Down
6 changes: 3 additions & 3 deletions pkg/pod/status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1228,7 +1228,7 @@ func TestSidecarsReady(t *testing.T) {

func TestMarkStatusRunning(t *testing.T) {
trs := v1beta1.TaskRunStatus{}
MarkStatusRunning(&trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")
markStatusRunning(&trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing")

expected := &apis.Condition{
Type: apis.ConditionSucceeded,
Expand All @@ -1244,7 +1244,7 @@ func TestMarkStatusRunning(t *testing.T) {

func TestMarkStatusFailure(t *testing.T) {
trs := v1beta1.TaskRunStatus{}
MarkStatusFailure(&trs, "failure message")
markStatusFailure(&trs, "failure message")

expected := &apis.Condition{
Type: apis.ConditionSucceeded,
Expand All @@ -1260,7 +1260,7 @@ func TestMarkStatusFailure(t *testing.T) {

func TestMarkStatusSuccess(t *testing.T) {
trs := v1beta1.TaskRunStatus{}
MarkStatusSuccess(&trs)
markStatusSuccess(&trs)

expected := &apis.Condition{
Type: apis.ConditionSucceeded,
Expand Down
4 changes: 2 additions & 2 deletions pkg/reconciler/events/cloudevent/cloud_event_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func SendCloudEvents(tr *v1beta1.TaskRun, ceclient CEClient, logger *zap.Sugared
logger = logger.With(zap.String("taskrun", tr.Name))

// Make the event we would like to send:
event, err := EventForTaskRun(tr)
event, err := eventForTaskRun(tr)
if err != nil || event == nil {
logger.With(zap.Error(err)).Error("failed to produce a cloudevent from TaskRun.")
return err
Expand Down Expand Up @@ -132,7 +132,7 @@ func SendCloudEventWithRetries(ctx context.Context, object runtime.Object) error
if ceClient == nil {
return errors.New("No cloud events client found in the context")
}
event, err := EventForObjectWithCondition(o)
event, err := eventForObjectWithCondition(o)
if err != nil {
return err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func TestSendCloudEvents(t *testing.T) {
successfulBehaviour := FakeClientBehaviour{
SendSuccessfully: true,
}
err := SendCloudEvents(tc.taskRun, NewFakeClient(&successfulBehaviour), logger)
err := SendCloudEvents(tc.taskRun, newFakeClient(&successfulBehaviour), logger)
if err != nil {
t.Fatalf("Unexpected error sending cloud events: %v", err)
}
Expand Down Expand Up @@ -196,7 +196,7 @@ func TestSendCloudEventsErrors(t *testing.T) {
unsuccessfulBehaviour := FakeClientBehaviour{
SendSuccessfully: false,
}
err := SendCloudEvents(tc.taskRun, NewFakeClient(&unsuccessfulBehaviour), logger)
err := SendCloudEvents(tc.taskRun, newFakeClient(&unsuccessfulBehaviour), logger)
if err == nil {
t.Fatalf("Unexpected success sending cloud events: %v", err)
}
Expand Down
Loading