diff --git a/.travis.yml b/.travis.yml index 69f81a7e558..712955ca017 100644 --- a/.travis.yml +++ b/.travis.yml @@ -48,4 +48,5 @@ script: - cd $TRAVIS_BUILD_DIR/backend/src - gimme -f 1.11.4 - source ~/.gimme/envs/go1.11.4.env + - go vet -all -shadow ./... - go test ./... diff --git a/backend/src/agent/persistence/worker/metrics_reporter_test.go b/backend/src/agent/persistence/worker/metrics_reporter_test.go index 8ff8334c624..b04b711c162 100644 --- a/backend/src/agent/persistence/worker/metrics_reporter_test.go +++ b/backend/src/agent/persistence/worker/metrics_reporter_test.go @@ -118,12 +118,12 @@ func TestReportMetrics_Succeed(t *testing.T) { &api.RunMetric{ Name: "accuracy", NodeId: "node-1", - Value: &api.RunMetric_NumberValue{0.77}, + Value: &api.RunMetric_NumberValue{NumberValue: 0.77}, }, &api.RunMetric{ Name: "logloss", NodeId: "node-1", - Value: &api.RunMetric_NumberValue{1.2}, + Value: &api.RunMetric_NumberValue{NumberValue: 1.2}, }, }, } @@ -301,12 +301,12 @@ func TestReportMetrics_InvalidMetricsJSON_PartialFail(t *testing.T) { &api.RunMetric{ Name: "accuracy", NodeId: "node-2", - Value: &api.RunMetric_NumberValue{0.77}, + Value: &api.RunMetric_NumberValue{NumberValue: 0.77}, }, &api.RunMetric{ Name: "logloss", NodeId: "node-2", - Value: &api.RunMetric_NumberValue{1.2}, + Value: &api.RunMetric_NumberValue{NumberValue: 1.2}, }, }, } diff --git a/backend/src/apiserver/client/scheduled_workflow.go b/backend/src/apiserver/client/scheduled_workflow.go index 5d30c9cb427..82ed4b24b21 100644 --- a/backend/src/apiserver/client/scheduled_workflow.go +++ b/backend/src/apiserver/client/scheduled_workflow.go @@ -27,7 +27,6 @@ import ( // creates a new client for the Kubernetes ScheduledWorkflow CRD. func CreateScheduledWorkflowClientOrFatal(namespace string, initConnectionTimeout time.Duration) v1alpha1.ScheduledWorkflowInterface { var swfClient v1alpha1.ScheduledWorkflowInterface - var err error var operation = func() error { restConfig, err := rest.InClusterConfig() if err != nil { @@ -37,12 +36,12 @@ func CreateScheduledWorkflowClientOrFatal(namespace string, initConnectionTimeou swfClient = swfClientSet.ScheduledworkflowV1alpha1().ScheduledWorkflows(namespace) return nil } + b := backoff.NewExponentialBackOff() b.MaxElapsedTime = initConnectionTimeout - err = backoff.Retry(operation, b) - - if err != nil { + if err := backoff.Retry(operation, b); err != nil { glog.Fatalf("Failed to create scheduled workflow client. Error: %v", err) } + return swfClient } diff --git a/backend/src/apiserver/main.go b/backend/src/apiserver/main.go index 333fdc0d573..b1568d837ad 100644 --- a/backend/src/apiserver/main.go +++ b/backend/src/apiserver/main.go @@ -151,23 +151,23 @@ func loadSamples(resourceManager *resource.ResourceManager) error { File string } var configs []config - if err := json.Unmarshal(configBytes, &configs); err != nil { - return errors.New(fmt.Sprintf("Failed to read sample configurations. Err: %v", err.Error())) + if err = json.Unmarshal(configBytes, &configs); err != nil { + return errors.New(fmt.Sprintf("Failed to read sample configurations. Err: %v", err)) } for _, config := range configs { - reader, err := os.Open(config.File) - if err != nil { - return errors.New(fmt.Sprintf("Failed to load sample %s. Error: %v", config.Name, err.Error())) + reader, configErr := os.Open(config.File) + if configErr != nil { + return errors.New(fmt.Sprintf("Failed to load sample %s. Error: %v", config.Name, configErr)) } - pipelineFile, err := server.ReadPipelineFile(config.File, reader, server.MaxFileLength) - if err != nil { - return errors.New(fmt.Sprintf("Failed to decompress the file %s. Error: %v", config.Name, err.Error())) + pipelineFile, configErr := server.ReadPipelineFile(config.File, reader, server.MaxFileLength) + if configErr != nil { + return errors.New(fmt.Sprintf("Failed to decompress the file %s. Error: %v", config.Name, configErr)) } - _, err = resourceManager.CreatePipeline(config.Name, config.Description, pipelineFile) - if err != nil { + _, configErr = resourceManager.CreatePipeline(config.Name, config.Description, pipelineFile) + if configErr != nil { // Log the error but not fail. The API Server pod can restart and it could potentially cause name collision. // In the future, we might consider loading samples during deployment, instead of when API server starts. - glog.Warningf(fmt.Sprintf("Failed to create pipeline for %s. Error: %v", config.Name, err.Error())) + glog.Warningf(fmt.Sprintf("Failed to create pipeline for %s. Error: %v", config.Name, configErr)) continue } diff --git a/backend/src/apiserver/resource/resource_manager.go b/backend/src/apiserver/resource/resource_manager.go index a002c5bc15a..3371918ed94 100644 --- a/backend/src/apiserver/resource/resource_manager.go +++ b/backend/src/apiserver/resource/resource_manager.go @@ -194,15 +194,14 @@ func (r *ResourceManager) CreateRun(apiRun *api.Run) (*model.RunDetail, error) { return nil, util.Wrap(err, "Failed to fetch workflow spec.") } var workflow util.Workflow - err = json.Unmarshal(workflowSpecManifestBytes, &workflow) - if err != nil { + if err = json.Unmarshal(workflowSpecManifestBytes, &workflow); err != nil { return nil, util.NewInternalServerError(err, "Failed to unmarshal workflow spec manifest. Workflow bytes: %s", string(workflowSpecManifestBytes)) } parameters := toParametersMap(apiRun.GetPipelineSpec().GetParameters()) // Verify no additional parameter provided - if err := workflow.VerifyParameters(parameters); err != nil { + if err = workflow.VerifyParameters(parameters); err != nil { return nil, util.Wrap(err, "Failed to verify parameters.") } // Append provided parameter diff --git a/backend/src/apiserver/server/api_converter_test.go b/backend/src/apiserver/server/api_converter_test.go index 2e9105656e7..4ab7c45d01d 100644 --- a/backend/src/apiserver/server/api_converter_test.go +++ b/backend/src/apiserver/server/api_converter_test.go @@ -115,13 +115,13 @@ func TestToApiRuns(t *testing.T) { apiMetric1 := &api.RunMetric{ Name: metric1.Name, NodeId: metric1.NodeID, - Value: &api.RunMetric_NumberValue{metric1.NumberValue}, + Value: &api.RunMetric_NumberValue{NumberValue: metric1.NumberValue}, Format: api.RunMetric_RAW, } apiMetric2 := &api.RunMetric{ Name: metric2.Name, NodeId: metric2.NodeID, - Value: &api.RunMetric_NumberValue{metric2.NumberValue}, + Value: &api.RunMetric_NumberValue{NumberValue: metric2.NumberValue}, Format: api.RunMetric_PERCENTAGE, } modelRun1 := model.Run{ diff --git a/backend/src/apiserver/storage/db_status_store.go b/backend/src/apiserver/storage/db_status_store.go index 5535ae914fe..261cc1c5b3b 100644 --- a/backend/src/apiserver/storage/db_status_store.go +++ b/backend/src/apiserver/storage/db_status_store.go @@ -52,15 +52,16 @@ func (s *DBStatusStore) InitializeDBStatusTable() error { // The table is not initialized if !rows.Next() { - sql, args, err := sq. + sql, args, queryErr := sq. Insert("db_statuses"). SetMap(defaultDBStatus). ToSql() - if err != nil { + if queryErr != nil { tx.Rollback() - return util.NewInternalServerError(err, "Error creating query to initialize database status table.") + return util.NewInternalServerError(queryErr, "Error creating query to initialize database status table.") } + _, err = tx.Exec(sql, args...) if err != nil { tx.Rollback() diff --git a/backend/src/cmd/ml/cmd/root.go b/backend/src/cmd/ml/cmd/root.go index ff24e790d69..8f9d858a0fc 100644 --- a/backend/src/cmd/ml/cmd/root.go +++ b/backend/src/cmd/ml/cmd/root.go @@ -41,22 +41,22 @@ func NewRootCmd(factory ClientFactoryInterface) *RootCommand { pipelineClient, err := factory.CreatePipelineClient(root.ClientConfig(), root.debug) if err != nil { - fmt.Errorf("Could not create pipelineClient: %v", err) + return fmt.Errorf("Could not create pipelineClient: %v", err) } jobClient, err := factory.CreateJobClient(root.ClientConfig(), root.debug) if err != nil { - fmt.Errorf("Could not create jobClient: %v", err) + return fmt.Errorf("Could not create jobClient: %v", err) } runClient, err := factory.CreateRunClient(root.ClientConfig(), root.debug) if err != nil { - fmt.Errorf("Could not create runClient: %v", err) + return fmt.Errorf("Could not create runClient: %v", err) } experimentClient, err := factory.CreateExperimentClient(root.ClientConfig(), root.debug) if err != nil { - fmt.Errorf("Could not create experimentClient: %v", err) + return fmt.Errorf("Could not create experimentClient: %v", err) } root.pipelineUploadClient = pipelineUploadClient root.pipelineClient = pipelineClient diff --git a/backend/src/common/util/error.go b/backend/src/common/util/error.go index c2d691b768c..abd9c40a74f 100644 --- a/backend/src/common/util/error.go +++ b/backend/src/common/util/error.go @@ -262,28 +262,28 @@ func ToGRPCError(err error) error { case *UserError: userError := err.(*UserError) stat := status.New(userError.externalStatusCode, userError.internalError.Error()) - statWithDetail, err := stat. + statWithDetail, statErr := stat. WithDetails(&api.Error{ ErrorMessage: userError.externalMessage, ErrorDetails: userError.internalError.Error(), }) - if err != nil { + if statErr != nil { // Failed to stream error message as proto. glog.Errorf("Failed to stream gRpc error. Error to be streamed: %v Error: %v", - userError.String(), err.Error()) + userError.String(), statErr) return stat.Err() } return statWithDetail.Err() default: externalMessage := fmt.Sprintf("Internal error: %+v", err) stat := status.New(codes.Internal, externalMessage) - statWithDetail, err := stat. + statWithDetail, statErr := stat. WithDetails(&api.Error{ErrorMessage: externalMessage, ErrorDetails: externalMessage}) - if err != nil { + if statErr != nil { // Failed to stream error message as proto. glog.Errorf("Failed to stream gRpc error. Error to be streamed: %v Error: %v", - externalMessage, err.Error()) + externalMessage, statErr) return stat.Err() } return statWithDetail.Err() diff --git a/backend/src/crd/pkg/apis/scheduledworkflow/v1alpha1/types.go b/backend/src/crd/pkg/apis/scheduledworkflow/v1alpha1/types.go index 0ca85f8278e..1966ce65534 100644 --- a/backend/src/crd/pkg/apis/scheduledworkflow/v1alpha1/types.go +++ b/backend/src/crd/pkg/apis/scheduledworkflow/v1alpha1/types.go @@ -68,7 +68,7 @@ type ScheduledWorkflowSpec struct { // Specification of the workflow to schedule. // +optional - Workflow *WorkflowResource `json:"workflow, omitempty"` + Workflow *WorkflowResource `json:"workflow,omitempty"` // TODO: support additional resource types: K8 jobs, etc. @@ -189,13 +189,13 @@ type ScheduledWorkflowCondition struct { type TriggerStatus struct { // Time of the last creation of a workflow. - LastTriggeredTime *metav1.Time `json:lastTriggeredTime,omitempty` + LastTriggeredTime *metav1.Time `json:"lastTriggeredTime,omitempty"` // Time of the next creation of a workflow (assuming that the schedule is enabled). - NextTriggeredTime *metav1.Time `json:nextTriggeredTime,omitempty` + NextTriggeredTime *metav1.Time `json:"nextTriggeredTime,omitempty"` // Index of the last workflow created. - LastIndex *int64 `json:lastWorkflowIndex,omitempty` + LastIndex *int64 `json:"lastWorkflowIndex,omitempty"` } type WorkflowHistory struct { @@ -220,7 +220,7 @@ type WorkflowStatus struct { UID types.UID `json:"uid,omitempty"` // Phase is a high level summary of the status of the workflow. - Phase v1alpha1.NodePhase `json:phase,omitempty` + Phase v1alpha1.NodePhase `json:"phase,omitempty"` // A human readable message indicating details about why the workflow is in // this condition.