diff --git a/controllers/masteruserrecord/masteruserrecord_controller.go b/controllers/masteruserrecord/masteruserrecord_controller.go index ce109d7f1..d1bdb9a66 100644 --- a/controllers/masteruserrecord/masteruserrecord_controller.go +++ b/controllers/masteruserrecord/masteruserrecord_controller.go @@ -19,7 +19,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/go-logr/logr" errs "github.com/pkg/errors" coputil "github.com/redhat-cop/operator-utils/pkg/util" corev1 "k8s.io/api/core/v1" @@ -78,7 +77,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. // Fetch the MasterUserRecord instance mur := &toolchainv1alpha1.MasterUserRecord{} - err := r.Client.Get(context.TODO(), request.NamespacedName, mur) + err := r.Client.Get(ctx, request.NamespacedName, mur) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. @@ -93,11 +92,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. // If the MUR is not being deleted, create or synchronize UserAccounts. if !coputil.IsBeingDeleted(mur) { // Add the finalizer if it is not present - if err := r.addFinalizer(logger, mur, murFinalizerName); err != nil { + if err := r.addFinalizer(ctx, mur, murFinalizerName); err != nil { return reconcile.Result{}, err } logger.Info("ensuring user accounts") - membersWithUserAccounts, err := r.ensureUserAccounts(logger, mur) + membersWithUserAccounts, err := r.ensureUserAccounts(ctx, mur) if err != nil { return reconcile.Result{}, err } @@ -107,7 +106,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. return reconcile.Result{}, nil } } - requeueTime, err := r.ensureUserAccountsAreNotPresent(logger, mur, r.membersWithoutUserAccount(membersWithUserAccounts)) + requeueTime, err := r.ensureUserAccountsAreNotPresent(ctx, mur, r.membersWithoutUserAccount(membersWithUserAccounts)) if err != nil { return reconcile.Result{}, err } else if requeueTime > 0 { @@ -115,15 +114,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. } // just in case there was no change in the set of UserAccounts and there was no provisioned if len(membersWithUserAccounts) == 0 { - if _, err := alignReadiness(logger, r.Scheme, r.Client, mur); err != nil { + if _, err := alignReadiness(ctx, r.Scheme, r.Client, mur); err != nil { return reconcile.Result{}, err } - return reconcile.Result{}, r.Client.Status().Update(context.TODO(), mur) + return reconcile.Result{}, r.Client.Status().Update(ctx, mur) } // If the MUR is being deleted, delete the UserAccounts in members. } else if coputil.HasFinalizer(mur, murFinalizerName) { - requeueTime, err := r.manageCleanUp(logger, mur) + requeueTime, err := r.manageCleanUp(ctx, mur) if err != nil { return reconcile.Result{}, err } else if requeueTime > 0 { @@ -134,11 +133,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl. return reconcile.Result{}, nil } -func (r *Reconciler) ensureUserAccountsAreNotPresent(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, targetClusters map[string]cluster.Cluster) (time.Duration, error) { +func (r *Reconciler) ensureUserAccountsAreNotPresent(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, targetClusters map[string]cluster.Cluster) (time.Duration, error) { for clusterName, memberCluster := range targetClusters { - requeueTime, err := r.deleteUserAccount(logger, memberCluster, mur) + requeueTime, err := r.deleteUserAccount(ctx, memberCluster, mur) if err != nil { - return 0, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToDeleteUserAccountsReason), err, + return 0, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToDeleteUserAccountsReason), err, "failed to delete UserAccount in the member cluster '%s'", clusterName) } else if requeueTime > 0 { return requeueTime, nil @@ -157,14 +156,14 @@ func (r *Reconciler) membersWithoutUserAccount(membersWithUserAccounts map[strin return membersWithout } -func (r *Reconciler) ensureUserAccounts(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord) (map[string]bool, error) { +func (r *Reconciler) ensureUserAccounts(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord) (map[string]bool, error) { spaceBindings := &toolchainv1alpha1.SpaceBindingList{} - if err := r.Client.List(context.TODO(), spaceBindings, + if err := r.Client.List(ctx, spaceBindings, runtimeclient.InNamespace(mur.GetNamespace()), runtimeclient.MatchingLabels{ toolchainv1alpha1.SpaceBindingMasterUserRecordLabelKey: mur.Name, }); err != nil { - return nil, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToCreateUserAccountReason), err, + return nil, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToCreateUserAccountReason), err, "unable to list SpaceBindings for the MasterUserRecord") } // let's keep the list of target clusters the UserAccounts should be provisioned to in a map - the value defines if the account was just created or updated @@ -172,15 +171,15 @@ func (r *Reconciler) ensureUserAccounts(logger logr.Logger, mur *toolchainv1alph for _, binding := range spaceBindings.Items { if !coputil.IsBeingDeleted(&binding) { // nolint:gosec space := &toolchainv1alpha1.Space{} - if err := r.Client.Get(context.TODO(), namespacedName(mur.Namespace, binding.Spec.Space), space); err != nil { - return nil, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToCreateUserAccountReason), err, + if err := r.Client.Get(ctx, namespacedName(mur.Namespace, binding.Spec.Space), space); err != nil { + return nil, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToCreateUserAccountReason), err, "unable to get Space '%s' for the SpaceBinding '%s'", binding.Spec.Space, binding.Name) } if !coputil.IsBeingDeleted(space) && space.Spec.TargetCluster != "" { // todo - right now we provision only one UserAccount. It's provisioned in the same cluster where the default space is created // todo - as soon as the other components (reg-service & proxy) are updated to support more UserAccounts per MUR, then this should be changed as well if space.Labels[toolchainv1alpha1.SpaceCreatorLabelKey] == mur.Labels[toolchainv1alpha1.MasterUserRecordOwnerLabelKey] { - if createdOrUpdated, err := r.ensureUserAccount(logger, mur, space.Spec.TargetCluster); err != nil || createdOrUpdated { + if createdOrUpdated, err := r.ensureUserAccount(ctx, mur, space.Spec.TargetCluster); err != nil || createdOrUpdated { targetClusters[space.Spec.TargetCluster] = true return targetClusters, err } @@ -193,12 +192,14 @@ func (r *Reconciler) ensureUserAccounts(logger logr.Logger, mur *toolchainv1alph return targetClusters, nil } -func (r *Reconciler) addFinalizer(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, finalizer string) error { +func (r *Reconciler) addFinalizer(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, finalizer string) error { + logger := log.FromContext(ctx) + // Add the finalizer if it is not present if !coputil.HasFinalizer(mur, finalizer) { coputil.AddFinalizer(mur, finalizer) - if err := r.Client.Update(context.TODO(), mur); err != nil { - return r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToAddFinalizerReason), err, + if err := r.Client.Update(ctx, mur); err != nil { + return r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToAddFinalizerReason), err, "failed while updating with added finalizer") } logger.Info("MasterUserRecord now has finalizer") @@ -212,11 +213,11 @@ func (r *Reconciler) addFinalizer(logger logr.Logger, mur *toolchainv1alpha1.Mas // If the UserAccount resource already exists, then this latter is synchronized using the given `murAccount` and the associated `mur` status is also updated to reflect // the UserAccount specs. // Returns bool as the first argument if the UserAccount was either created or updated -func (r *Reconciler) ensureUserAccount(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, targetCluster string) (bool, error) { +func (r *Reconciler) ensureUserAccount(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, targetCluster string) (bool, error) { // get & check member cluster memberCluster, found := r.MemberClusters[targetCluster] if !found { - return false, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordTargetClusterNotReadyReason), + return false, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordTargetClusterNotReadyReason), fmt.Errorf("unknown target member cluster '%s'", targetCluster), "failed to get the member cluster '%s'", targetCluster) } @@ -224,7 +225,7 @@ func (r *Reconciler) ensureUserAccount(logger logr.Logger, mur *toolchainv1alpha // get UserAccount from member nsdName := namespacedName(memberCluster.OperatorNamespace, mur.Name) userAccount := &toolchainv1alpha1.UserAccount{} - if err := memberCluster.Client.Get(context.TODO(), nsdName, userAccount); err != nil { + if err := memberCluster.Client.Get(ctx, nsdName, userAccount); err != nil { if errors.IsNotFound(err) { // does not exist - should create userAccount = newUserAccount(nsdName, mur) @@ -232,21 +233,22 @@ func (r *Reconciler) ensureUserAccount(logger logr.Logger, mur *toolchainv1alpha // Remove this after all users have been migrated to new IdP client userAccount.Spec.OriginalSub = mur.Spec.OriginalSub - if err := memberCluster.Client.Create(context.TODO(), userAccount); err != nil { - return false, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToCreateUserAccountReason), err, + if err := memberCluster.Client.Create(ctx, userAccount); err != nil { + return false, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToCreateUserAccountReason), err, "failed to create UserAccount in the member cluster '%s'", targetCluster) } - return true, updateStatusConditions(logger, r.Client, mur, toBeNotReady(toolchainv1alpha1.MasterUserRecordProvisioningReason, "")) + return true, updateStatusConditions(ctx, r.Client, mur, toBeNotReady(toolchainv1alpha1.MasterUserRecordProvisioningReason, "")) } // another/unexpected error occurred while trying to fetch the user account on the member cluster - return false, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToGetUserAccountReason), err, + return false, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToGetUserAccountReason), err, "failed to get userAccount '%s' from cluster '%s'", mur.Name, targetCluster) } // if the UserAccount is being deleted (by accident?), then we should wait until is has been totally deleted, and this controller will recreate it again + logger := log.FromContext(ctx) if coputil.IsBeingDeleted(userAccount) { logger.Info("UserAccount is being deleted. Waiting until deletion is complete", "member_cluster", memberCluster.Name) - return true, updateStatusConditions(logger, r.Client, mur, toBeNotReady(toolchainv1alpha1.MasterUserRecordProvisioningReason, "recovering deleted UserAccount")) + return true, updateStatusConditions(ctx, r.Client, mur, toBeNotReady(toolchainv1alpha1.MasterUserRecordProvisioningReason, "recovering deleted UserAccount")) } sync := Synchronizer{ @@ -257,31 +259,31 @@ func (r *Reconciler) ensureUserAccount(logger logr.Logger, mur *toolchainv1alpha logger: logger, scheme: r.Scheme, } - updated, err := sync.synchronizeSpec() + updated, err := sync.synchronizeSpec(ctx) if err != nil { // note: if we got an error while sync'ing the spec, then we may not be able to update the MUR status it here neither. - return false, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToSynchronizeUserAccountSpecReason), err, + return false, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToSynchronizeUserAccountSpecReason), err, "update of the UserAccount.spec in the cluster '%s' failed", targetCluster) } - if err := sync.synchronizeStatus(); err != nil { + if err := sync.synchronizeStatus(ctx); err != nil { err = errs.Wrapf(err, "update of the MasterUserRecord failed while synchronizing with UserAccount status from the cluster '%s'", targetCluster) // note: if we got an error while updating the status, then we probably can't update it here neither. - return false, r.wrapErrorWithStatusUpdate(logger, mur, r.useExistingConditionOfType(toolchainv1alpha1.ConditionReady), err, "") + return false, r.wrapErrorWithStatusUpdate(ctx, mur, r.useExistingConditionOfType(toolchainv1alpha1.ConditionReady), err, "") } // nothing done and no error occurred logger.Info("user account on member cluster was already present", "target_cluster", targetCluster, "updated", updated) return updated, nil } -type statusUpdater func(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, message string) error +type statusUpdater func(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, message string) error // wrapErrorWithStatusUpdate wraps the error and update the user account status. If the update failed then logs the error. -func (r *Reconciler) wrapErrorWithStatusUpdate(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, updateStatus statusUpdater, err error, format string, args ...interface{}) error { +func (r *Reconciler) wrapErrorWithStatusUpdate(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, updateStatus statusUpdater, err error, format string, args ...interface{}) error { if err == nil { return nil } - if err := updateStatus(logger, mur, err.Error()); err != nil { - logger.Error(err, "status update failed") + if err := updateStatus(ctx, mur, err.Error()); err != nil { + log.FromContext(ctx).Error(err, "status update failed") } if format != "" { return errs.Wrapf(err, format, args...) @@ -290,9 +292,9 @@ func (r *Reconciler) wrapErrorWithStatusUpdate(logger logr.Logger, mur *toolchai } func (r *Reconciler) setStatusFailed(reason string) statusUpdater { - return func(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, message string) error { + return func(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, message string) error { return updateStatusConditions( - logger, + ctx, r.Client, mur, toBeNotReady(reason, message)) @@ -300,7 +302,7 @@ func (r *Reconciler) setStatusFailed(reason string) statusUpdater { } func (r *Reconciler) useExistingConditionOfType(condType toolchainv1alpha1.ConditionType) statusUpdater { - return func(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, message string) error { + return func(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, message string) error { cond := toolchainv1alpha1.Condition{Type: condType} for _, con := range mur.Status.Conditions { if con.Type == condType { @@ -309,27 +311,29 @@ func (r *Reconciler) useExistingConditionOfType(condType toolchainv1alpha1.Condi } } cond.Message = message - return updateStatusConditions(logger, r.Client, mur, cond) + return updateStatusConditions(ctx, r.Client, mur, cond) } } -func (r *Reconciler) manageCleanUp(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord) (time.Duration, error) { - if requeue, err := r.ensureUserAccountsAreNotPresent(logger, mur, r.MemberClusters); err != nil || requeue > 0 { +func (r *Reconciler) manageCleanUp(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord) (time.Duration, error) { + if requeue, err := r.ensureUserAccountsAreNotPresent(ctx, mur, r.MemberClusters); err != nil || requeue > 0 { return requeue, err } // Remove finalizer from MasterUserRecord coputil.RemoveFinalizer(mur, murFinalizerName) if err := r.Client.Update(context.Background(), mur); err != nil { - return 0, r.wrapErrorWithStatusUpdate(logger, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToRemoveFinalizerReason), err, + return 0, r.wrapErrorWithStatusUpdate(ctx, mur, r.setStatusFailed(toolchainv1alpha1.MasterUserRecordUnableToRemoveFinalizerReason), err, "failed to update MasterUserRecord while deleting finalizer") } domain := metrics.GetEmailDomain(mur) + logger := log.FromContext(ctx) counter.DecrementMasterUserRecordCount(logger, domain) logger.Info("Finalizer removed from MasterUserRecord") return 0, nil } -func (r *Reconciler) deleteUserAccount(logger logr.Logger, memberCluster cluster.Cluster, mur *toolchainv1alpha1.MasterUserRecord) (time.Duration, error) { +func (r *Reconciler) deleteUserAccount(ctx context.Context, memberCluster cluster.Cluster, mur *toolchainv1alpha1.MasterUserRecord) (time.Duration, error) { + logger := log.FromContext(ctx) requeueTime := 10 * time.Second userAcc := &toolchainv1alpha1.UserAccount{} @@ -344,16 +348,16 @@ func (r *Reconciler) deleteUserAccount(logger logr.Logger, memberCluster cluster // Get the User associated with the UserAccount namespacedName := types.NamespacedName{Namespace: memberCluster.OperatorNamespace, Name: mur.Name} - if err := memberCluster.Client.Get(context.TODO(), namespacedName, userAcc); err != nil { + if err := memberCluster.Client.Get(ctx, namespacedName, userAcc); err != nil { if errors.IsNotFound(err) { logger.Info(fmt.Sprintf("UserAccount is not present in '%s' - making sure that it's not in the MasterUserRecord.Status", memberCluster.Name)) - return 0, sync.removeAccountFromStatus() + return 0, sync.removeAccountFromStatus(ctx) } return 0, err } if coputil.IsBeingDeleted(userAcc) { - if err := sync.synchronizeStatus(); err != nil { + if err := sync.synchronizeStatus(ctx); err != nil { return 0, err } // if the UserAccount is being deleted, allow up to 1 minute of retries before reporting an error @@ -364,7 +368,7 @@ func (r *Reconciler) deleteUserAccount(logger logr.Logger, memberCluster cluster return requeueTime, nil } propagationPolicy := metav1.DeletePropagationForeground - err := memberCluster.Client.Delete(context.TODO(), userAcc, &runtimeclient.DeleteOptions{ + err := memberCluster.Client.Delete(ctx, userAcc, &runtimeclient.DeleteOptions{ PropagationPolicy: &propagationPolicy, }) if err != nil { @@ -408,7 +412,9 @@ func toBeProvisionedNotificationCreated() toolchainv1alpha1.Condition { } // updateStatusConditions updates user account status conditions with the new conditions -func updateStatusConditions(logger logr.Logger, cl runtimeclient.Client, mur *toolchainv1alpha1.MasterUserRecord, newConditions ...toolchainv1alpha1.Condition) error { +func updateStatusConditions(ctx context.Context, cl runtimeclient.Client, mur *toolchainv1alpha1.MasterUserRecord, newConditions ...toolchainv1alpha1.Condition) error { + logger := log.FromContext(ctx) + var updated bool mur.Status.Conditions, updated = condition.AddOrUpdateStatusConditions(mur.Status.Conditions, newConditions...) if !updated { @@ -417,7 +423,7 @@ func updateStatusConditions(logger logr.Logger, cl runtimeclient.Client, mur *to return nil } logger.Info("updating MUR status conditions", "generation", mur.Generation, "resource_version", mur.ResourceVersion) - err := cl.Status().Update(context.TODO(), mur) + err := cl.Status().Update(ctx, mur) logger.Info("updated MUR status conditions", "generation", mur.Generation, "resource_version", mur.ResourceVersion) return err } diff --git a/controllers/masteruserrecord/masteruserrecord_controller_test.go b/controllers/masteruserrecord/masteruserrecord_controller_test.go index 80d9693b6..73902104f 100644 --- a/controllers/masteruserrecord/masteruserrecord_controller_test.go +++ b/controllers/masteruserrecord/masteruserrecord_controller_test.go @@ -20,7 +20,6 @@ import ( spacetest "github.com/codeready-toolchain/toolchain-common/pkg/test/space" uatest "github.com/codeready-toolchain/toolchain-common/pkg/test/useraccount" commonsignup "github.com/codeready-toolchain/toolchain-common/pkg/test/usersignup" - "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -31,6 +30,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -786,12 +786,14 @@ func TestCreateSynchronizeOrDeleteUserAccountFailed(t *testing.T) { memberClient := commontest.NewFakeClient(t) cntrl := newController(hostClient, s, ClusterClient(commontest.MemberClusterName, memberClient)) - statusUpdater := func(logger logr.Logger, mur *toolchainv1alpha1.MasterUserRecord, message string) error { + statusUpdater := func(ctx context.Context, mur *toolchainv1alpha1.MasterUserRecord, message string) error { return fmt.Errorf("unable to update status") } // when - err := cntrl.wrapErrorWithStatusUpdate(testLog, mur, statusUpdater, + ctx := context.Background() + log.IntoContext(ctx, testLog) + err := cntrl.wrapErrorWithStatusUpdate(ctx, mur, statusUpdater, apierros.NewBadRequest("oopsy woopsy"), "failed to create %s", "user bob") // then diff --git a/controllers/masteruserrecord/sync.go b/controllers/masteruserrecord/sync.go index 35ad293eb..8b267e119 100644 --- a/controllers/masteruserrecord/sync.go +++ b/controllers/masteruserrecord/sync.go @@ -20,6 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" ) type Synchronizer struct { @@ -32,11 +33,11 @@ type Synchronizer struct { } // synchronizeSpec synchronizes the useraccount in the MasterUserRecord with the corresponding UserAccount on the member cluster. -func (s *Synchronizer) synchronizeSpec() (bool, error) { +func (s *Synchronizer) synchronizeSpec(ctx context.Context) (bool, error) { if !s.isSynchronized() { s.logger.Info("synchronizing specs") - if err := updateStatusConditions(s.logger, s.hostClient, s.record, toBeNotReady(toolchainv1alpha1.MasterUserRecordUpdatingReason, "")); err != nil { + if err := updateStatusConditions(ctx, s.hostClient, s.record, toBeNotReady(toolchainv1alpha1.MasterUserRecordUpdatingReason, "")); err != nil { return false, err } s.memberUserAcc.Spec.Disabled = s.record.Spec.Disabled @@ -55,7 +56,7 @@ func (s *Synchronizer) synchronizeSpec() (bool, error) { } s.memberUserAcc.Annotations[toolchainv1alpha1.UserEmailAnnotationKey] = s.record.Annotations[toolchainv1alpha1.MasterUserRecordEmailAnnotationKey] - err := s.memberCluster.Client.Update(context.TODO(), s.memberUserAcc) + err := s.memberCluster.Client.Update(ctx, s.memberUserAcc) if err != nil { s.logger.Error(err, "synchronizing failed") return false, err @@ -73,28 +74,28 @@ func (s *Synchronizer) isSynchronized() bool { s.memberUserAcc.Labels != nil && s.memberUserAcc.Labels[toolchainv1alpha1.TierLabelKey] == s.record.Spec.TierName && s.memberUserAcc.Annotations != nil && s.memberUserAcc.Annotations[toolchainv1alpha1.UserEmailAnnotationKey] == s.record.Annotations[toolchainv1alpha1.MasterUserRecordEmailAnnotationKey] } -func (s *Synchronizer) removeAccountFromStatus() error { +func (s *Synchronizer) removeAccountFromStatus(ctx context.Context) error { for i := range s.record.Status.UserAccounts { if s.record.Status.UserAccounts[i].Cluster.Name == s.memberCluster.Name { s.record.Status.UserAccounts = append(s.record.Status.UserAccounts[:i], s.record.Status.UserAccounts[i+1:]...) if !util.IsBeingDeleted(s.record) { - if _, err := alignReadiness(s.logger, s.scheme, s.hostClient, s.record); err != nil { + if _, err := alignReadiness(ctx, s.scheme, s.hostClient, s.record); err != nil { return err } } s.logger.Info("updating MUR status") - return s.hostClient.Status().Update(context.TODO(), s.record) + return s.hostClient.Status().Update(ctx, s.record) } } return nil } -func (s *Synchronizer) synchronizeStatus() error { +func (s *Synchronizer) synchronizeStatus(ctx context.Context) error { recordStatusUserAcc, index := getUserAccountStatus(s.memberCluster.Name, s.record) expectedRecordStatus := *(&recordStatusUserAcc).DeepCopy() expectedRecordStatus.UserAccountStatus = s.memberUserAcc.Status - expectedRecordStatus, err := s.withClusterDetails(expectedRecordStatus) + expectedRecordStatus, err := s.withClusterDetails(ctx, expectedRecordStatus) if err != nil { return err } @@ -109,7 +110,7 @@ func (s *Synchronizer) synchronizeStatus() error { s.record.Status.UserAccounts[index] = recordStatusUserAcc } - ready, err := alignReadiness(s.logger, s.scheme, s.hostClient, s.record) + ready, err := alignReadiness(ctx, s.scheme, s.hostClient, s.record) if err != nil { return err } @@ -118,7 +119,7 @@ func (s *Synchronizer) synchronizeStatus() error { s.alignDisabled() } - err = s.hostClient.Status().Update(context.TODO(), s.record) + err = s.hostClient.Status().Update(ctx, s.record) if err != nil { // if there is an error during status update then we "roll back" all the status changes we did above // (don't add new user account status if it was added or don't change it if it's updated) @@ -133,20 +134,20 @@ func (s *Synchronizer) synchronizeStatus() error { // Align readiness even if the user account statuses were not changed. // We need to do it to cleanup outdated errors (for example if the target cluster was unavailable) if any - _, err = alignReadiness(s.logger, s.scheme, s.hostClient, s.record) + _, err = alignReadiness(ctx, s.scheme, s.hostClient, s.record) if err != nil { return err } s.logger.Info("updating MUR status") - return s.hostClient.Status().Update(context.TODO(), s.record) + return s.hostClient.Status().Update(ctx, s.record) } // withClusterDetails returns the given user account status with additional information about // the target cluster such as API endpoint and Console URL if not set yet -func (s *Synchronizer) withClusterDetails(status toolchainv1alpha1.UserAccountStatusEmbedded) (toolchainv1alpha1.UserAccountStatusEmbedded, error) { +func (s *Synchronizer) withClusterDetails(ctx context.Context, status toolchainv1alpha1.UserAccountStatusEmbedded) (toolchainv1alpha1.UserAccountStatusEmbedded, error) { if status.Cluster.Name != "" { toolchainStatus := &toolchainv1alpha1.ToolchainStatus{} - if err := s.hostClient.Get(context.TODO(), types.NamespacedName{Namespace: s.record.Namespace, Name: toolchainconfig.ToolchainStatusName}, toolchainStatus); err != nil { + if err := s.hostClient.Get(ctx, types.NamespacedName{Namespace: s.record.Namespace, Name: toolchainconfig.ToolchainStatusName}, toolchainStatus); err != nil { return status, errs.Wrapf(err, "unable to read ToolchainStatus resource") } @@ -188,7 +189,7 @@ func (s *Synchronizer) alignDisabled() { } // alignReadiness updates the status to Provisioned and returns true if all the embedded UserAccounts are ready -func alignReadiness(logger logr.Logger, scheme *runtime.Scheme, hostClient runtimeclient.Client, mur *toolchainv1alpha1.MasterUserRecord) (bool, error) { +func alignReadiness(ctx context.Context, scheme *runtime.Scheme, hostClient runtimeclient.Client, mur *toolchainv1alpha1.MasterUserRecord) (bool, error) { // Lookup the UserSignup for _, uaStatus := range mur.Status.UserAccounts { if !condition.IsTrue(uaStatus.Conditions, toolchainv1alpha1.ConditionReady) { @@ -222,7 +223,7 @@ func alignReadiness(logger logr.Logger, scheme *runtime.Scheme, hostClient runti } opts := runtimeclient.MatchingLabels(labels) notificationList := &toolchainv1alpha1.NotificationList{} - if err := hostClient.List(context.TODO(), notificationList, opts); err != nil { + if err := hostClient.List(ctx, notificationList, opts); err != nil { return false, err } // if there is no existing notification with these labels @@ -239,7 +240,7 @@ func alignReadiness(logger logr.Logger, scheme *runtime.Scheme, hostClient runti // Lookup the UserSignup userSignup := &toolchainv1alpha1.UserSignup{} - err = hostClient.Get(context.TODO(), types.NamespacedName{ + err = hostClient.Get(ctx, types.NamespacedName{ Namespace: mur.Namespace, Name: mur.Labels[toolchainv1alpha1.MasterUserRecordOwnerLabelKey], }, userSignup) @@ -259,7 +260,7 @@ func alignReadiness(logger logr.Logger, scheme *runtime.Scheme, hostClient runti return false, err } } else { - logger.Info(fmt.Sprintf("The %s notification for user %s was not created because it already exists: %v", + log.FromContext(ctx).Info(fmt.Sprintf("The %s notification for user %s was not created because it already exists: %v", toolchainv1alpha1.NotificationTypeProvisioned, mur.Name, notificationList.Items[0])) } mur.Status.Conditions, _ = condition.AddOrUpdateStatusConditions(mur.Status.Conditions, toBeProvisionedNotificationCreated()) diff --git a/controllers/masteruserrecord/sync_test.go b/controllers/masteruserrecord/sync_test.go index 46aeacb99..8fc14efb7 100644 --- a/controllers/masteruserrecord/sync_test.go +++ b/controllers/masteruserrecord/sync_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) @@ -163,7 +164,7 @@ func TestSynchronizeSpec(t *testing.T) { sync, memberClient := prepareSynchronizer(t, userAccount, mur, hostClient) // when - createdOrUpdated, err := sync.synchronizeSpec() + createdOrUpdated, err := sync.synchronizeSpec(context.TODO()) // then require.NoError(t, err) @@ -189,7 +190,7 @@ func TestSynchronizeAnnotation(t *testing.T) { sync, memberClient := prepareSynchronizer(t, userAccount, mur, hostClient) // when - createdOrUpdated, err := sync.synchronizeSpec() + createdOrUpdated, err := sync.synchronizeSpec(context.TODO()) // then require.NoError(t, err) @@ -217,7 +218,7 @@ func TestSynchronizeStatus(t *testing.T) { sync, memberClient := prepareSynchronizer(t, userAccount, mur, hostClient) // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.NoError(t, err) @@ -234,7 +235,7 @@ func TestSynchronizeStatus(t *testing.T) { sync, _ := prepareSynchronizer(t, userAccount, mur, hostClient) // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.Error(t, err) @@ -271,7 +272,7 @@ func TestSyncMurStatusWithUserAccountStatusWhenUpdated(t *testing.T) { sync, memberClient := prepareSynchronizer(t, updatingUserAccount, mur, hostClient) // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.NoError(t, err) @@ -287,7 +288,7 @@ func TestSyncMurStatusWithUserAccountStatusWhenUpdated(t *testing.T) { // when preSyncTime := metav1.Now() - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.NoError(t, err) @@ -305,7 +306,7 @@ func TestSyncMurStatusWithUserAccountStatusWhenUpdated(t *testing.T) { sync, _ := prepareSynchronizer(t, userAccount, mur, hostClient) // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.Error(t, err) @@ -336,7 +337,7 @@ func TestSyncMurStatusWithUserAccountStatusWhenDisabled(t *testing.T) { sync, memberClient := prepareSynchronizer(t, userAccount, mur, hostClient) // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.NoError(t, err) @@ -353,7 +354,7 @@ func TestSyncMurStatusWithUserAccountStatusWhenDisabled(t *testing.T) { sync, _ := prepareSynchronizer(t, userAccount, mur, hostClient) // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.Error(t, err) @@ -387,7 +388,9 @@ func TestAlignReadiness(t *testing.T) { }, } - log := zap.New(zap.UseDevMode(true)) + l := zap.New(zap.UseDevMode(true)) + ctx := context.TODO() + log.IntoContext(ctx, l) t.Run("ready propagated and notification created", func(t *testing.T) { // given @@ -396,7 +399,7 @@ func TestAlignReadiness(t *testing.T) { // when preAlignTime := metav1.Now() - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -420,7 +423,7 @@ func TestAlignReadiness(t *testing.T) { // when preAlignTime := metav1.Now() - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -443,7 +446,7 @@ func TestAlignReadiness(t *testing.T) { hostClient := test.NewFakeClient(t, userSignup, mur, readyToolchainStatus) // when - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -467,7 +470,7 @@ func TestAlignReadiness(t *testing.T) { hostClient := test.NewFakeClient(t, userSignup, mur, readyToolchainStatus) // when - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -489,7 +492,7 @@ func TestAlignReadiness(t *testing.T) { hostClient := test.NewFakeClient(t, userSignup, mur, readyToolchainStatus) // when - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -511,7 +514,7 @@ func TestAlignReadiness(t *testing.T) { hostClient := test.NewFakeClient(t, userSignup, mur, readyToolchainStatus) // when - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -533,7 +536,7 @@ func TestAlignReadiness(t *testing.T) { hostClient := test.NewFakeClient(t, userSignup, mur, readyToolchainStatus) // when - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -559,7 +562,7 @@ func TestAlignReadiness(t *testing.T) { // when preAlignTime := metav1.Now() - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -583,7 +586,7 @@ func TestAlignReadiness(t *testing.T) { // when preAlignTime := metav1.Now() - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -614,7 +617,7 @@ func TestAlignReadiness(t *testing.T) { // when preAlignTime := metav1.Now() - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -635,7 +638,7 @@ func TestAlignReadiness(t *testing.T) { // when preAlignTime := metav1.Now() - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.NoError(t, err) @@ -657,7 +660,7 @@ func TestAlignReadiness(t *testing.T) { } // when - ready, err := alignReadiness(log, s, hostClient, mur) + ready, err := alignReadiness(ctx, s, hostClient, mur) // then require.Error(t, err) @@ -698,7 +701,7 @@ func TestSynchronizeUserAccountFailed(t *testing.T) { } // when - createdOrUpdated, err := sync.synchronizeSpec() + createdOrUpdated, err := sync.synchronizeSpec(context.TODO()) // then require.Error(t, err) @@ -728,7 +731,7 @@ func TestSynchronizeUserAccountFailed(t *testing.T) { t.Run("with empty set of UserAccounts statuses", func(t *testing.T) { // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.Error(t, err) @@ -746,7 +749,7 @@ func TestSynchronizeUserAccountFailed(t *testing.T) { provisionedMur.Status.UserAccounts = []toolchainv1alpha1.UserAccountStatusEmbedded{additionalUserAcc} // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.Error(t, err) @@ -765,7 +768,7 @@ func TestSynchronizeUserAccountFailed(t *testing.T) { provisionedMur.Status.UserAccounts = []toolchainv1alpha1.UserAccountStatusEmbedded{toBeModified} // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.Error(t, err) @@ -808,7 +811,7 @@ func TestSynchronizeUserAccountFailed(t *testing.T) { } // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then assert.Error(t, err) @@ -838,7 +841,7 @@ func TestRemoveAccountFromStatus(t *testing.T) { sync, _ := prepareSynchronizer(t, nil, mur, hostClient) // when - err := sync.removeAccountFromStatus() + err := sync.removeAccountFromStatus(context.TODO()) // then require.NoError(t, err) @@ -858,7 +861,7 @@ func TestRemoveAccountFromStatus(t *testing.T) { sync, _ := prepareSynchronizer(t, nil, mur, hostClient) // when - err := sync.removeAccountFromStatus() + err := sync.removeAccountFromStatus(context.TODO()) // then require.NoError(t, err) @@ -877,7 +880,7 @@ func TestRemoveAccountFromStatus(t *testing.T) { sync, _ := prepareSynchronizer(t, nil, mur, hostClient) // when - err := sync.removeAccountFromStatus() + err := sync.removeAccountFromStatus(context.TODO()) // then require.NoError(t, err) @@ -916,7 +919,7 @@ func TestRoutes(t *testing.T) { } // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.NoError(t, err) @@ -950,7 +953,7 @@ func TestRoutes(t *testing.T) { } // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.NoError(t, err) @@ -984,7 +987,7 @@ func TestRoutes(t *testing.T) { } // when - err := sync.synchronizeStatus() + err := sync.synchronizeStatus(context.TODO()) // then require.Error(t, err)