Skip to content

Commit

Permalink
add nonamedreturns linter
Browse files Browse the repository at this point in the history
Signed-off-by: Mikhail Scherba <[email protected]>
  • Loading branch information
miklezzzz committed Dec 29, 2024
1 parent 7c39f50 commit cd2f876
Show file tree
Hide file tree
Showing 20 changed files with 204 additions and 170 deletions.
5 changes: 5 additions & 0 deletions .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ linters:
- ineffassign
- misspell
- nolintlint
- nonamedreturns
- prealloc
- revive
- sloglint
Expand Down Expand Up @@ -74,6 +75,10 @@ linters-settings:
desc: "The 'github.com/evanphx/json-patch' package is superseded. Use pkg/utils/jsonpatch.go instead."
- pkg: "gopkg.in/satori/go.uuid.v1"
desc: "Use https://github.com/gofrs/uuid instead. Satori/go.uuid is no longer maintained and has critical vulnerabilities."
nonamedreturns:
# Report named error if it is assigned inside defer.
# Default: false
report-error-in-defer: false
issues:
exclude:
# Using underscores is a common practice, refactor in the future
Expand Down
5 changes: 2 additions & 3 deletions pkg/addon-operator/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func (op *AddonOperator) bootstrap() error {
return nil
}

func (op *AddonOperator) Assemble(debugServer *debug.Server) (err error) {
func (op *AddonOperator) Assemble(debugServer *debug.Server) error {
op.registerDefaultRoutes()
if app.AdmissionServerEnabled {
op.AdmissionServer.start(op.ctx)
Expand All @@ -58,8 +58,7 @@ func (op *AddonOperator) Assemble(debugServer *debug.Server) (err error) {
op.RegisterDebugModuleRoutes(debugServer)
op.RegisterDiscoveryRoute(debugServer)

err = op.InitModuleManager()
if err != nil {
if err := op.InitModuleManager(); err != nil {
return err
}

Expand Down
103 changes: 59 additions & 44 deletions pkg/addon-operator/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -738,13 +738,16 @@ func (op *AddonOperator) CreatePurgeTasks(modulesToPurge []string, t sh_task.Tas
return newTasks
}

// HandleApplyKubeConfigValues ...
func (op *AddonOperator) HandleApplyKubeConfigValues(t sh_task.Task, logLabels map[string]string) (res queue.TaskResult) {
var handleErr error
// ApplyKubeConfigValues
func (op *AddonOperator) HandleApplyKubeConfigValues(t sh_task.Task, logLabels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "HandleApplyKubeConfigValues").End()
logEntry := utils.EnrichLoggerWithLabels(op.Logger, logLabels)

hm := task.HookMetadataAccessor(t)
var (
handleErr error
res queue.TaskResult
logEntry = utils.EnrichLoggerWithLabels(op.Logger, logLabels)
hm = task.HookMetadataAccessor(t)
)

op.KubeConfigManager.SafeReadConfig(func(config *config.KubeConfig) {
handleErr = op.ModuleManager.ApplyNewKubeConfigValues(config, hm.GlobalValuesChanged)
Expand All @@ -764,12 +767,14 @@ func (op *AddonOperator) HandleApplyKubeConfigValues(t sh_task.Task, logLabels m
res.Status = queue.Success

logEntry.Debug("HandleApplyKubeConfigValues success")
return
return res
}

// HandleConvergeModules is a multi-phase task.
func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "ConvergeModules").End()

var res queue.TaskResult
logEntry := utils.EnrichLoggerWithLabels(op.Logger, logLabels)

taskEvent, ok := t.GetProp(converge.ConvergeEventProp).(converge.ConvergeEvent)
Expand Down Expand Up @@ -802,7 +807,7 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str
res.HeadTasks = tasks
res.Status = queue.Keep
op.logTaskAdd(logEntry, "head", res.HeadTasks...)
return
return res
}
}

Expand All @@ -829,7 +834,7 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str
res.HeadTasks = tasks
res.Status = queue.Keep
op.logTaskAdd(logEntry, "head", res.HeadTasks...)
return
return res
}
}
}
Expand All @@ -844,7 +849,7 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str
res.HeadTasks = tasks
res.Status = queue.Keep
op.logTaskAdd(logEntry, "head", res.HeadTasks...)
return
return res
}
}
}
Expand All @@ -871,6 +876,7 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str

logEntry.Debug("ConvergeModules success")
res.Status = queue.Success

return res
}

Expand Down Expand Up @@ -913,6 +919,7 @@ func (op *AddonOperator) CreateBeforeAllTasks(logLabels map[string]string, event
})
tasks = append(tasks, newTask.WithQueuedAt(queuedAt))
}

return tasks
}

Expand Down Expand Up @@ -969,6 +976,7 @@ func (op *AddonOperator) CreateAndStartQueue(queueName string) bool {
}
op.engine.TaskQueues.NewNamedQueue(queueName, op.TaskHandler)
op.engine.TaskQueues.GetByName(queueName).Start()

return true
}

Expand Down Expand Up @@ -1457,9 +1465,10 @@ func (op *AddonOperator) UpdateWaitInQueueMetric(t sh_task.Task) {
}

// HandleGlobalHookEnableKubernetesBindings add Synchronization tasks.
func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task, labels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task, labels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "DiscoverHelmReleases").End()

var res queue.TaskResult
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)
logEntry.Debug("Global hook enable kubernetes bindings")

Expand Down Expand Up @@ -1532,7 +1541,7 @@ func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task
t.UpdateFailureMessage(err.Error())
t.WithQueuedAt(queuedAt)
res.Status = queue.Fail
return
return res
}
// Substitute current task with Synchronization tasks for the main queue.
// Other Synchronization tasks are queued into specified queues.
Expand Down Expand Up @@ -1571,13 +1580,14 @@ func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task

res.Status = queue.Success

return
return res
}

// HandleDiscoverHelmReleases runs RefreshStateFromHelmReleases to detect modules state at start.
func (op *AddonOperator) HandleDiscoverHelmReleases(t sh_task.Task, labels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleDiscoverHelmReleases(t sh_task.Task, labels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "DiscoverHelmReleases").End()

var res queue.TaskResult
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)
logEntry.Debug("Discover Helm releases state")

Expand All @@ -1589,20 +1599,21 @@ func (op *AddonOperator) HandleDiscoverHelmReleases(t sh_task.Task, labels map[s
log.Err(err))
t.UpdateFailureMessage(err.Error())
t.WithQueuedAt(time.Now())
return
return res
}

res.Status = queue.Success
tasks := op.CreatePurgeTasks(state.ModulesToPurge, t)
res.AfterTasks = tasks
op.logTaskAdd(logEntry, "after", res.AfterTasks...)
return
return res
}

// HandleModulePurge run helm purge for unknown module.
func (op *AddonOperator) HandleModulePurge(t sh_task.Task, labels map[string]string) (status queue.TaskStatus) {
func (op *AddonOperator) HandleModulePurge(t sh_task.Task, labels map[string]string) queue.TaskStatus {
defer trace.StartRegion(context.Background(), "ModulePurge").End()

var status queue.TaskStatus
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)
logEntry.Debug("Module purge start")

Expand All @@ -1616,18 +1627,17 @@ func (op *AddonOperator) HandleModulePurge(t sh_task.Task, labels map[string]str
}

status = queue.Success
return

return status
}

// HandleModuleDelete deletes helm release for known module.
func (op *AddonOperator) HandleModuleDelete(t sh_task.Task, labels map[string]string) (status queue.TaskStatus) {
func (op *AddonOperator) HandleModuleDelete(t sh_task.Task, labels map[string]string) queue.TaskStatus {
defer trace.StartRegion(context.Background(), "ModuleDelete").End()

var status queue.TaskStatus
hm := task.HookMetadataAccessor(t)
status = queue.Success

baseModule := op.ModuleManager.GetModule(hm.ModuleName)

logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)
logEntry.Debug("Module delete", slog.String("name", hm.ModuleName))

Expand Down Expand Up @@ -1660,18 +1670,18 @@ func (op *AddonOperator) HandleModuleDelete(t sh_task.Task, labels map[string]st
status = queue.Success
}

return
return status
}

// HandleModuleEnsureCRDs ensure CRDs for module.
func (op *AddonOperator) HandleModuleEnsureCRDs(t sh_task.Task, labels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleModuleEnsureCRDs(t sh_task.Task, labels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "ModuleEnsureCRDs").End()

hm := task.HookMetadataAccessor(t)
res.Status = queue.Success

res := queue.TaskResult{
Status: queue.Success,
}
baseModule := op.ModuleManager.GetModule(hm.ModuleName)

logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)
logEntry.Debug("Module ensureCRDs", slog.String("name", hm.ModuleName))

Expand All @@ -1689,12 +1699,14 @@ func (op *AddonOperator) HandleModuleEnsureCRDs(t sh_task.Task, labels map[strin
op.discoveredGVKsLock.Unlock()
}

return
return res
}

// HandleParallelModuleRun runs multiple HandleModuleRun tasks in parallel and aggregates their results
func (op *AddonOperator) HandleParallelModuleRun(t sh_task.Task, labels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleParallelModuleRun(t sh_task.Task, labels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "ParallelModuleRun").End()

var res queue.TaskResult
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)
hm := task.HookMetadataAccessor(t)

Expand Down Expand Up @@ -1785,6 +1797,7 @@ L:
}
op.parallelTaskChannels.Delete(t.GetId())
res.Status = queue.Success

return res
}

Expand All @@ -1794,6 +1807,7 @@ func formatErrorSummary(errors map[string]string) string {
for moduleName, moduleErr := range errors {
errSummary += fmt.Sprintf("\t- %s: %s", moduleName, moduleErr)
}

return errSummary
}

Expand All @@ -1811,17 +1825,18 @@ func formatErrorSummary(errors map[string]string) string {
//
// ModuleRun is restarted if hook or chart is failed.
// After first HandleModuleRun success, no onStartup and kubernetes.Synchronization tasks will run.
func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "ModuleRun").End()
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)

var res queue.TaskResult
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)
hm := task.HookMetadataAccessor(t)
baseModule := op.ModuleManager.GetModule(hm.ModuleName)

// Break error loop when module becomes disabled.
if !op.ModuleManager.IsModuleEnabled(baseModule.GetName()) {
res.Status = queue.Success
return
return res
}

metricLabels := map[string]string{
Expand Down Expand Up @@ -1986,7 +2001,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin
res.HeadTasks = mainSyncTasks
res.Status = queue.Keep
op.logTaskAdd(logEntry, "head", mainSyncTasks...)
return
return res
}
}
}
Expand All @@ -2010,7 +2025,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin
logEntry.Debug("Synchronization not completed, keep ModuleRun task in repeat mode")
t.WithQueuedAt(time.Now())
res.Status = queue.Repeat
return
return res
}
}

Expand Down Expand Up @@ -2059,14 +2074,15 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin
logEntry.Info("ModuleRun success, module is ready")
}
}
return

return res
}

func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "ModuleHookRun").End()

var res queue.TaskResult
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)

hm := task.HookMetadataAccessor(t)
baseModule := op.ModuleManager.GetModule(hm.ModuleName)
// TODO: check if module exists
Expand All @@ -2075,7 +2091,7 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s
// Prevent hook running in parallel queue if module is disabled in "main" queue.
if !op.ModuleManager.IsModuleEnabled(baseModule.GetName()) {
res.Status = queue.Success
return
return res
}

err := taskHook.RateLimitWait(context.Background())
Expand All @@ -2084,7 +2100,7 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s
// canceled, or the expected wait time exceeds the Context's Deadline.
// The best we can do without proper context usage is to repeat the task.
res.Status = queue.Repeat
return
return res
}

metricLabels := map[string]string{
Expand Down Expand Up @@ -2262,11 +2278,11 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s
return res
}

func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]string) (res queue.TaskResult) {
func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]string) queue.TaskResult {
defer trace.StartRegion(context.Background(), "GlobalHookRun").End()

var res queue.TaskResult
logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels)

hm := task.HookMetadataAccessor(t)
taskHook := op.ModuleManager.GetGlobalHook(hm.HookName)

Expand All @@ -2275,9 +2291,8 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s
// This could happen when the Context is
// canceled, or the expected wait time exceeds the Context's Deadline.
// The best we can do without proper context usage is to repeat the task.
return queue.TaskResult{
Status: "Repeat",
}
res.Status = "Repeat"
return res
}

metricLabels := map[string]string{
Expand Down
Loading

0 comments on commit cd2f876

Please sign in to comment.