Skip to content

Commit

Permalink
chore: enable context-as-argument from revive
Browse files Browse the repository at this point in the history
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
  • Loading branch information
mmorel-35 committed Feb 8, 2025
1 parent 0bf2252 commit ec10d6b
Show file tree
Hide file tree
Showing 20 changed files with 39 additions and 37 deletions.
3 changes: 2 additions & 1 deletion .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,8 @@ linters-settings:
- name: blank-imports
disabled: true
- name: context-as-argument
disabled: true
arguments:
- allowTypesBefore: "*testing.T"
- name: context-keys-type
- name: dot-imports
disabled: true
Expand Down
1 change: 1 addition & 0 deletions pkg/client/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
)

//nolint:revive //FIXME
func CreateRetryGenerateName(client kbclient.Client, ctx context.Context, obj kbclient.Object) error {
retryCreateFn := func() error {
// needed to ensure that the name from the failed create isn't left on the object between retries
Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/backup_repository_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel

log.Info("Running maintenance on backup repository")

job, err := funcStartMaintenanceJob(r.Client, ctx, req, r.repoMaintenanceConfig, r.maintenanceJobResources, r.logLevel, r.logFormat, log)
job, err := funcStartMaintenanceJob(ctx, r.Client, req, r.repoMaintenanceConfig, r.maintenanceJobResources, r.logLevel, r.logFormat, log)
if err != nil {
log.WithError(err).Warn("Starting repo maintenance failed")
return r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) {
Expand All @@ -437,7 +437,7 @@ func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel

// when WaitMaintenanceJobComplete fails, the maintenance result will be left aside temporarily
// If the maintenenance still completes later, recallMaintenance recalls the left once and update LastMaintenanceTime and history
status, err := funcWaitMaintenanceJobComplete(r.Client, ctx, job, r.namespace, log)
status, err := funcWaitMaintenanceJobComplete(ctx, r.Client, job, r.namespace, log)
if err != nil {
return errors.Wrapf(err, "error waiting repo maintenance completion status")
}
Expand Down
14 changes: 7 additions & 7 deletions pkg/controller/backup_repository_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,25 +118,25 @@ func TestCheckNotReadyRepo(t *testing.T) {
assert.Equal(t, "s3:test.amazonaws.com/bucket/restic/volume-ns-1", rr.Spec.ResticIdentifier)
}

func startMaintenanceJobFail(client.Client, context.Context, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
func startMaintenanceJobFail(context.Context, client.Client, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
return "", errors.New("fake-start-error")
}

func startMaintenanceJobSucceed(client.Client, context.Context, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
func startMaintenanceJobSucceed(context.Context, client.Client, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) {
return "fake-job-name", nil
}

func waitMaintenanceJobCompleteFail(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
func waitMaintenanceJobCompleteFail(context.Context, client.Client, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
return velerov1api.BackupRepositoryMaintenanceStatus{}, errors.New("fake-wait-error")
}

func waitMaintenanceJobCompleteFunc(now time.Time, result velerov1api.BackupRepositoryMaintenanceResult, message string) func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
func waitMaintenanceJobCompleteFunc(now time.Time, result velerov1api.BackupRepositoryMaintenanceResult, message string) func(context.Context, client.Client, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
completionTimeStamp := &metav1.Time{Time: now.Add(time.Hour)}
if result == velerov1api.BackupRepositoryMaintenanceFailed {
completionTimeStamp = nil
}

return func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
return func(context.Context, client.Client, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
return velerov1api.BackupRepositoryMaintenanceStatus{
StartTimestamp: &metav1.Time{Time: now},
CompleteTimestamp: completionTimeStamp,
Expand Down Expand Up @@ -185,8 +185,8 @@ func TestRunMaintenanceIfDue(t *testing.T) {
tests := []struct {
name string
repo *velerov1api.BackupRepository
startJobFunc func(client.Client, context.Context, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error)
waitJobFunc func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error)
startJobFunc func(context.Context, client.Client, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error)
waitJobFunc func(context.Context, client.Client, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error)
expectedMaintenanceTime time.Time
expectedHistory []velerov1api.BackupRepositoryMaintenanceStatus
expectedErr string
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/data_download_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ type ddResumeTestHelper struct {
asyncBR datapath.AsyncBR
}

func (dt *ddResumeTestHelper) resumeCancellableDataPath(_ *DataUploadReconciler, _ context.Context, _ *velerov2alpha1api.DataUpload, _ logrus.FieldLogger) error {
func (dt *ddResumeTestHelper) resumeCancellableDataPath(_ context.Context, _ *DataUploadReconciler, _ *velerov2alpha1api.DataUpload, _ logrus.FieldLogger) error {
return dt.resumeErr
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/pod_volume_backup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
OnProgress: r.OnDataPathProgress,
}

fsBackup, err := r.dataPathMgr.CreateFileSystemBR(pvb.Name, pVBRRequestor, ctx, r.Client, pvb.Namespace, callbacks, log)
fsBackup, err := r.dataPathMgr.CreateFileSystemBR(ctx, pvb.Name, pVBRRequestor, r.Client, pvb.Namespace, callbacks, log)

if err != nil {
if err == datapath.ConcurrentLimitExceed {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/pod_volume_restore_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ func (c *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req
OnProgress: c.OnDataPathProgress,
}

fsRestore, err := c.dataPathMgr.CreateFileSystemBR(pvr.Name, pVBRRequestor, ctx, c.Client, pvr.Namespace, callbacks, log)
fsRestore, err := c.dataPathMgr.CreateFileSystemBR(ctx, pvr.Name, pVBRRequestor, c.Client, pvr.Namespace, callbacks, log)
if err != nil {
if err == datapath.ConcurrentLimitExceed {
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil
Expand Down
2 changes: 1 addition & 1 deletion pkg/datamover/backup_micro_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ func (r *BackupMicroService) RunCancelableDataPath(ctx context.Context) (string,
OnProgress: r.OnDataUploadProgress,
}

fsBackup, err := r.dataPathMgr.CreateFileSystemBR(du.Name, dataUploadDownloadRequestor, ctx, r.client, du.Namespace, callbacks, log)
fsBackup, err := r.dataPathMgr.CreateFileSystemBR(ctx, du.Name, dataUploadDownloadRequestor, r.client, du.Namespace, callbacks, log)
if err != nil {
return "", errors.Wrap(err, "error to create data path")
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/datamover/restore_micro_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func (r *RestoreMicroService) RunCancelableDataPath(ctx context.Context) (string
OnProgress: r.OnDataDownloadProgress,
}

fsRestore, err := r.dataPathMgr.CreateFileSystemBR(dd.Name, dataUploadDownloadRequestor, ctx, r.client, dd.Namespace, callbacks, log)
fsRestore, err := r.dataPathMgr.CreateFileSystemBR(ctx, dd.Name, dataUploadDownloadRequestor, r.client, dd.Namespace, callbacks, log)
if err != nil {
return "", errors.Wrap(err, "error to create data path")
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/datapath/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func NewManager(cocurrentNum int) *Manager {
}

// CreateFileSystemBR creates a new file system backup/restore data path instance
func (m *Manager) CreateFileSystemBR(jobName string, requestorType string, ctx context.Context, client client.Client, namespace string, callbacks Callbacks, log logrus.FieldLogger) (AsyncBR, error) {
func (m *Manager) CreateFileSystemBR(ctx context.Context, jobName string, requestorType string, client client.Client, namespace string, callbacks Callbacks, log logrus.FieldLogger) (AsyncBR, error) {
m.trackerLock.Lock()
defer m.trackerLock.Unlock()

Expand Down
6 changes: 3 additions & 3 deletions pkg/datapath/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ import (
func TestCreateFileSystemBR(t *testing.T) {
m := NewManager(2)

async_job_1, err := m.CreateFileSystemBR("job-1", "test", context.TODO(), nil, "velero", Callbacks{}, nil)
async_job_1, err := m.CreateFileSystemBR(context.TODO(), "job-1", "test", nil, "velero", Callbacks{}, nil)
assert.NoError(t, err)

_, err = m.CreateFileSystemBR("job-2", "test", context.TODO(), nil, "velero", Callbacks{}, nil)
_, err = m.CreateFileSystemBR(context.TODO(), "job-2", "test", nil, "velero", Callbacks{}, nil)
assert.NoError(t, err)

_, err = m.CreateFileSystemBR("job-3", "test", context.TODO(), nil, "velero", Callbacks{}, nil)
_, err = m.CreateFileSystemBR(context.TODO(), "job-3", "test", nil, "velero", Callbacks{}, nil)
assert.Equal(t, ConcurrentLimitExceed, err)

ret := m.GetAsyncBR("job-0")
Expand Down
8 changes: 4 additions & 4 deletions pkg/repository/maintenance/maintenance.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ func getJobConfig(
}

// WaitJobComplete waits the completion of the specified maintenance job and return the BackupRepositoryMaintenanceStatus
func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string, logger logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
func WaitJobComplete(ctx context.Context, cli client.Client, jobName, ns string, logger logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) {
log := logger.WithField("job name", jobName)

maintenanceJob, err := waitForJobComplete(ctx, cli, ns, jobName, logger)
Expand Down Expand Up @@ -360,7 +360,7 @@ func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1a
}

// StartNewJob creates a new maintenance job
func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, repoMaintenanceJobConfig string,
func StartNewJob(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, repoMaintenanceJobConfig string,
podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag, logger logrus.FieldLogger) (string, error) {
bsl := &velerov1api.BackupStorageLocation{}
if err := cli.Get(ctx, client.ObjectKey{Namespace: repo.Namespace, Name: repo.Spec.BackupStorageLocation}, bsl); err != nil {
Expand Down Expand Up @@ -391,7 +391,7 @@ func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.Backu

log.Info("Starting maintenance repo")

maintenanceJob, err := buildJob(cli, ctx, repo, bsl.Name, jobConfig, podResources, logLevel, logFormat)
maintenanceJob, err := buildJob(ctx, cli, repo, bsl.Name, jobConfig, podResources, logLevel, logFormat)
if err != nil {
return "", errors.Wrap(err, "error to build maintenance job")
}
Expand All @@ -407,7 +407,7 @@ func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.Backu
return maintenanceJob.Name, nil
}

func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, bslName string, config *JobConfigs,
func buildJob(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, bslName string, config *JobConfigs,
podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag) (*batchv1.Job, error) {
// Get the Velero server deployment
deployment := &appsv1.Deployment{}
Expand Down
2 changes: 1 addition & 1 deletion pkg/repository/maintenance/maintenance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1057,7 +1057,7 @@ func TestBuildJob(t *testing.T) {
cli := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build()

// Call the function to test
job, err := buildJob(cli, context.TODO(), param.BackupRepo, param.BackupLocation.Name, tc.m, *tc.m.PodResources, tc.logLevel, tc.logFormat)
job, err := buildJob(context.TODO(), cli, param.BackupRepo, param.BackupLocation.Name, tc.m, *tc.m.PodResources, tc.logLevel, tc.logFormat)

// Check the error
if tc.expectedError {
Expand Down
2 changes: 1 addition & 1 deletion pkg/uploader/provider/kopia.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ type kopiaProvider struct {

// NewKopiaUploaderProvider initialized with open or create a repository
func NewKopiaUploaderProvider(
requestorType string,
ctx context.Context,
requestorType string,
credGetter *credentials.CredentialGetter,
backupRepo *velerov1api.BackupRepository,
log logrus.FieldLogger,
Expand Down
2 changes: 1 addition & 1 deletion pkg/uploader/provider/kopia_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ func TestNewKopiaUploaderProvider(t *testing.T) {
return tc.mockBackupRepoService
}
// Call the function being tested.
_, err := NewKopiaUploaderProvider(requestorType, ctx, credGetter, backupRepo, mockLog)
_, err := NewKopiaUploaderProvider(ctx, requestorType, credGetter, backupRepo, mockLog)

// Assertions
if tc.expectedError != "" {
Expand Down
2 changes: 1 addition & 1 deletion pkg/uploader/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func NewUploaderProvider(
return nil, errors.New("uninitialized FileStore credential is not supported")
}
if uploaderType == uploader.KopiaType {
return NewKopiaUploaderProvider(requesterType, ctx, credGetter, backupRepo, log)
return NewKopiaUploaderProvider(ctx, requesterType, credGetter, backupRepo, log)
} else {
return NewResticUploaderProvider(repoIdentifier, bsl, credGetter, repoKeySelector, log)
}
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/basic/namespace-mapping.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ func (n *NamespaceMapping) Verify() error {
for index, ns := range n.MappedNamespaceList {
n.kibishiiData.Levels = len(*n.NSIncluded) + index
By(fmt.Sprintf("Verify workload %s after restore ", ns), func() {
Expect(KibishiiVerifyAfterRestore(n.Client, ns,
n.Ctx, n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
Expect(KibishiiVerifyAfterRestore(n.Ctx, n.Client, ns,
n.kibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
})
}
for _, ns := range *n.NSIncluded {
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/migration/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -398,9 +398,9 @@ func (m *migrationE2E) Restore() error {
func (m *migrationE2E) Verify() error {
By(fmt.Sprintf("Verify workload %s after restore on standby cluster", m.CaseBaseName), func() {
Expect(kibishii.KibishiiVerifyAfterRestore(
m.Ctx,
*m.VeleroCfg.StandbyClient,
m.CaseBaseName,
m.Ctx,
&m.kibishiiData,
"",
)).To(Succeed(), "Fail to verify workload after restore")
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/upgrade/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,8 +249,8 @@ func BackupUpgradeRestoreTest(useVolumeSnapshots bool, veleroCLI2Version VeleroC
})

By(fmt.Sprintf("Verify workload %s after restore ", upgradeNamespace), func() {
Expect(KibishiiVerifyAfterRestore(*veleroCfg.ClientToInstallVelero, upgradeNamespace,
oneHourTimeout, DefaultKibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
Expect(KibishiiVerifyAfterRestore(oneHourTimeout, *veleroCfg.ClientToInstallVelero, upgradeNamespace,
DefaultKibishiiData, "")).To(Succeed(), "Fail to verify workload after restore")
})
})
})
Expand Down
10 changes: 5 additions & 5 deletions test/util/kibishii/kibishii_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ func RunKibishiiTests(
}

fmt.Printf("KibishiiVerifyAfterRestore %s\n", time.Now().Format("2006-01-02 15:04:05"))
if err := KibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout, DefaultKibishiiData, fileName); err != nil {
if err := KibishiiVerifyAfterRestore(oneHourTimeout, client, kibishiiNamespace, DefaultKibishiiData, fileName); err != nil {
return errors.Wrapf(err, "Error verifying kibishii after restore")
}

Expand Down Expand Up @@ -415,20 +415,20 @@ func KibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client TestClie
return nil
}

func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, oneHourTimeout context.Context,
func KibishiiVerifyAfterRestore(ctx context.Context, client TestClient, kibishiiNamespace string,
kibishiiData *KibishiiData, incrementalFileName string) error {
if kibishiiData == nil {
kibishiiData = DefaultKibishiiData
}
// wait for kibishii pod startup
// TODO - Fix kibishii so we can check that it is ready to go
fmt.Printf("Waiting for kibishii pods to be ready\n")
if err := waitForKibishiiPods(oneHourTimeout, client, kibishiiNamespace); err != nil {
if err := waitForKibishiiPods(ctx, client, kibishiiNamespace); err != nil {
return errors.Wrapf(err, "Failed to wait for ready status of kibishii pods in %s", kibishiiNamespace)
}
if incrementalFileName != "" {
for _, pod := range KibishiiPodNameList {
exist, err := FileExistInPV(oneHourTimeout, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName)
exist, err := FileExistInPV(ctx, kibishiiNamespace, pod, "kibishii", "data", incrementalFileName)
if err != nil {
return errors.Wrapf(err, fmt.Sprintf("fail to get file %s", incrementalFileName))
}
Expand All @@ -441,7 +441,7 @@ func KibishiiVerifyAfterRestore(client TestClient, kibishiiNamespace string, one

// TODO - check that namespace exists
fmt.Printf("running kibishii verify\n")
if err := verifyData(oneHourTimeout, kibishiiNamespace, kibishiiData); err != nil {
if err := verifyData(ctx, kibishiiNamespace, kibishiiData); err != nil {
return errors.Wrap(err, "Failed to verify data generated by kibishii")
}
return nil
Expand Down

0 comments on commit ec10d6b

Please sign in to comment.