diff --git a/Makefile b/Makefile index ed3763c100..9d38c35671 100644 --- a/Makefile +++ b/Makefile @@ -491,10 +491,13 @@ post-install-hook: x509-cert: ## Create X.509 cert at path tmp/x509/ (see docs/x509-user.md) go run scripts/create_x509.go -clean: ## Clean built binaries +.PHONY: clean-gen-crds +clean-gen-crds: ## Clean only generated CRD files + rm -f config/generated/crd/bases/crds.yaml + +clean: clean-gen-crds ## Clean built binaries rm -rf bin/* rm -rf config/manifests/bases/ - rm -f config/generated/crd/bases/crds.yaml rm -f config/crd/bases/*.yaml rm -f helm-charts/atlas-operator-crds/templates/*.yaml rm -f config/rbac/clusterwide/role.yaml @@ -607,7 +610,7 @@ clear-e2e-leftovers: ## Clear the e2e test leftovers quickly install-crds: manifests ## Install CRDs in Kubernetes kubectl apply -k config/crd ifdef EXPERIMENTAL - $(MAKE) clean gen-crds + $(MAKE) regen-crds kubectl apply -f config/generated/crd/bases/crds.yaml endif @@ -881,6 +884,9 @@ gen-crds: tools/openapi2crd/bin/openapi2crd --output $(realpath .)/config/generated/crd/bases/crds.yaml cp $(realpath .)/config/generated/crd/bases/crds.yaml $(realpath .)/internal/generated/crds/crds.yaml +.PHONY: regen-crds +regen-crds: clean-gen-crds gen-crds ## Clean and regenerate CRDs + gen-go-types: @echo "==> Generating Go models from CRDs..." $(CRD2GO) --input $(realpath .)/config/generated/crd/bases/crds.yaml \ diff --git a/config/samples/atlas_generated_v1_flexcluster.yaml b/config/samples/atlas_generated_v1_flexcluster.yaml new file mode 100644 index 0000000000..ab62e54200 --- /dev/null +++ b/config/samples/atlas_generated_v1_flexcluster.yaml @@ -0,0 +1,16 @@ +apiVersion: atlas.generated.mongodb.com/v1 +kind: FlexCluster +metadata: + name: flexy +spec: + connectionSecretRef: + name: mongodb-atlas-operator-api-key + v20250312: + groupId: "60f1b3c4e4b0e8b8c8b8c8b" + entry: + name: flexy + terminationProtectionEnabled: true + providerSettings: + backingProviderName: GCP + regionName: CENTRAL_US + diff --git a/config/samples/atlas_generated_v1_flexcluster_with_groupref.yaml b/config/samples/atlas_generated_v1_flexcluster_with_groupref.yaml new file mode 100644 index 0000000000..b69b42c0c7 --- /dev/null +++ b/config/samples/atlas_generated_v1_flexcluster_with_groupref.yaml @@ -0,0 +1,29 @@ +apiVersion: atlas.generated.mongodb.com/v1 +kind: Group +metadata: + name: my-group-for-flexcluster +spec: + connectionSecretRef: + name: mongodb-atlas-operator-api-key + v20250312: + entry: + orgId: "60f1b3c4e4b0e8b8c8b8c8b" + name: my-group-for-flexcluster +--- +apiVersion: atlas.generated.mongodb.com/v1 +kind: FlexCluster +metadata: + name: flexy-with-groupref + annotations: + some-tag: tag +spec: + v20250312: + groupRef: + name: my-group-for-flexcluster + entry: + name: flexy-with-groupref + terminationProtectionEnabled: true + providerSettings: + backingProviderName: GCP + regionName: CENTRAL_US + diff --git a/config/samples/atlas_generated_v1_group.yaml b/config/samples/atlas_generated_v1_group.yaml new file mode 100644 index 0000000000..71e3d4a6ac --- /dev/null +++ b/config/samples/atlas_generated_v1_group.yaml @@ -0,0 +1,12 @@ +apiVersion: atlas.generated.mongodb.com/v1 +kind: Group +metadata: + name: my-group +spec: + connectionSecretRef: + name: mongodb-atlas-operator-api-key + v20250312: + entry: + orgId: "60f1b3c4e4b0e8b8c8b8c8b" + name: my-group + diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 240068e4b1..bf7cfdb5fb 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -20,4 +20,7 @@ resources: - atlas_v1_atlasbackupcompliancepolicy.yaml - atlas_v1_atlascustomrole.yaml - atlas_v1_atlasthirdpartyintegration.yaml + - atlas_generated_v1_group.yaml + - atlas_generated_v1_flexcluster.yaml + - atlas_generated_v1_flexcluster_with_groupref.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/internal/generated/controller/flexcluster/handler_v20250312.go b/internal/generated/controller/flexcluster/handler_v20250312.go index dcfabd9d49..9ad18382ee 100644 --- a/internal/generated/controller/flexcluster/handler_v20250312.go +++ b/internal/generated/controller/flexcluster/handler_v20250312.go @@ -20,6 +20,7 @@ import ( "fmt" v20250312sdk "go.mongodb.org/atlas-sdk/v20250312009/admin" + apierrors "k8s.io/apimachinery/pkg/api/errors" controllerruntime "sigs.k8s.io/controller-runtime" builder "sigs.k8s.io/controller-runtime/pkg/builder" client "sigs.k8s.io/controller-runtime/pkg/client" @@ -71,6 +72,26 @@ func (h *Handlerv20250312) getDependencies(ctx context.Context, flexcluster *ako return result, nil } +// getMinimalGroupFromStatusOrSpec creates a minimal Group object with group ID from status (preferred) or spec (fallback). +// Returns nil if no group ID is available. This allows deletion to proceed even if the Group CR is gone from Kubernetes. +func (h *Handlerv20250312) getMinimalGroupFromStatusOrSpec(flexcluster *akov2generated.FlexCluster) *akov2generated.Group { + var groupID *string + if flexcluster.Status.V20250312 != nil { + groupID = flexcluster.Status.V20250312.GroupId + } + if groupID == nil && flexcluster.Spec.V20250312 != nil { + groupID = flexcluster.Spec.V20250312.GroupId + } + if groupID == nil || *groupID == "" { + return nil + } + return &akov2generated.Group{ + Status: akov2generated.GroupStatus{ + V20250312: &akov2generated.GroupStatusV20250312{Id: groupID}, + }, + } +} + // HandleInitial handles the initial state for version v20250312 func (h *Handlerv20250312) HandleInitial(ctx context.Context, flexcluster *akov2generated.FlexCluster) (ctrlstate.Result, error) { deps, err := h.getDependencies(ctx, flexcluster) @@ -165,7 +186,18 @@ func (h *Handlerv20250312) HandleDeletionRequested(ctx context.Context, flexclus deps, err := h.getDependencies(ctx, flexcluster) if err != nil { - return result.Error(state.StateDeletionRequested, fmt.Errorf("failed to get dependencies: %w", err)) + // Race condition: Group CR may be deleted from K8s before FlexCluster finishes deletion. + // If Group is not found but we have group ID in status, use it to proceed with deletion. + var statusErr *apierrors.StatusError + if errors.As(err, &statusErr) && apierrors.IsNotFound(statusErr) { + if group := h.getMinimalGroupFromStatusOrSpec(flexcluster); group != nil { + deps = []client.Object{group} + } else { + return result.Error(state.StateDeletionRequested, fmt.Errorf("failed to get dependencies: %w", err)) + } + } else { + return result.Error(state.StateDeletionRequested, fmt.Errorf("failed to get dependencies: %w", err)) + } } params := &v20250312sdk.DeleteFlexClusterApiParams{} @@ -189,7 +221,18 @@ func (h *Handlerv20250312) HandleDeletionRequested(ctx context.Context, flexclus func (h *Handlerv20250312) HandleDeleting(ctx context.Context, flexcluster *akov2generated.FlexCluster) (ctrlstate.Result, error) { deps, err := h.getDependencies(ctx, flexcluster) if err != nil { - return result.Error(state.StateDeleting, fmt.Errorf("failed to get dependencies: %w", err)) + // Race condition: Group CR may be deleted from K8s before FlexCluster finishes deletion. + // If Group is not found but we have group ID in status, use it to proceed with deletion. + var statusErr *apierrors.StatusError + if errors.As(err, &statusErr) && apierrors.IsNotFound(statusErr) { + if group := h.getMinimalGroupFromStatusOrSpec(flexcluster); group != nil { + deps = []client.Object{group} + } else { + return result.Error(state.StateDeleting, fmt.Errorf("failed to get dependencies: %w", err)) + } + } else { + return result.Error(state.StateDeleting, fmt.Errorf("failed to get dependencies: %w", err)) + } } params := &v20250312sdk.GetFlexClusterApiParams{} diff --git a/internal/generated/controller/flexcluster/handler_v20250312_test.go b/internal/generated/controller/flexcluster/handler_v20250312_test.go index d89d15a6a9..1f9deac3d7 100644 --- a/internal/generated/controller/flexcluster/handler_v20250312_test.go +++ b/internal/generated/controller/flexcluster/handler_v20250312_test.go @@ -1114,7 +1114,8 @@ func withGeneration(flexCluster *akov2generated.FlexCluster, generation int64) * // withObservedGeneration sets the observed generation in status conditions func withObservedGeneration(flexCluster *akov2generated.FlexCluster, observedGen int64) *akov2generated.FlexCluster { if flexCluster.Status.Conditions == nil { - flexCluster.Status.Conditions = &[]metav1.Condition{} + // Allocate a new empty slice pointer to avoid storing a pointer to a temporary value + flexCluster.Status.Conditions = new([]metav1.Condition) } conditions := *flexCluster.Status.Conditions conditions = append(conditions, metav1.Condition{ @@ -1122,10 +1123,11 @@ func withObservedGeneration(flexCluster *akov2generated.FlexCluster, observedGen ObservedGeneration: observedGen, Status: metav1.ConditionTrue, }) - // Allocate a new slice to avoid storing a pointer to a local variable + // Allocate a new slice pointer that persists beyond function scope newConditions := make([]metav1.Condition, len(conditions)) copy(newConditions, conditions) - flexCluster.Status.Conditions = &newConditions + flexCluster.Status.Conditions = new([]metav1.Condition) + *flexCluster.Status.Conditions = newConditions return flexCluster } diff --git a/test/e2e2/e2e2_suite_test.go b/test/e2e2/e2e2_suite_test.go index ad52a27c6c..64676f2416 100644 --- a/test/e2e2/e2e2_suite_test.go +++ b/test/e2e2/e2e2_suite_test.go @@ -15,8 +15,11 @@ package e2e2_test import ( + "context" "fmt" "os" + "os/signal" + "syscall" "testing" "time" @@ -75,6 +78,7 @@ func initTestLogging(t *testing.T) { ctrllog.SetLogger(logrLogger.WithName("test")) } +// nolint:unparam func runTestAKO(globalCreds, ns string, deletionprotection bool) operator.Operator { args := []string{ "--log-level=-9", @@ -85,3 +89,35 @@ func runTestAKO(globalCreds, ns string, deletionprotection bool) operator.Operat args = append(args, fmt.Sprintf("--object-deletion-protection=%v", deletionprotection)) return operator.NewOperator(operator.AllNamespacesOperatorEnv(ns), os.Stdout, os.Stderr, args...) } + +// SetupTerminationHandling sets up signal handling and context cancellation for an operator. +// It handles SIGINT/SIGTERM signals and context cancellation to properly stop the operator. +// Returns a cleanup function that should be called with DeferCleanup. +func SetupTerminationHandling(ctx context.Context, op operator.Operator, cancel context.CancelFunc) func() { + // Wire up signal handling to cancel context and stop operator on SIGINT/SIGTERM + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) + go func() { + defer GinkgoRecover() + <-sigChan + cancel() + // Stop the operator immediately when Ctrl+C is pressed + if op != nil { + op.Stop(GinkgoT()) + } + }() + + // Also stop operator when context is canceled (e.g., from cleanup) + go func() { + defer GinkgoRecover() + <-ctx.Done() + if op != nil { + op.Stop(GinkgoT()) + } + }() + + // Return cleanup function to stop signal handling + return func() { + signal.Stop(sigChan) + } +} diff --git a/test/e2e2/flexcluster_test.go b/test/e2e2/flexcluster_test.go new file mode 100644 index 0000000000..ca482f23f2 --- /dev/null +++ b/test/e2e2/flexcluster_test.go @@ -0,0 +1,330 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e2_test + +import ( + "context" + "os" + "time" + + k8s "github.com/crd2go/crd2go/k8s" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + nextapiv1 "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/generated/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/pointer" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/control" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e/utils" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/kube" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/operator" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/resources" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/samples" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/testparams" +) + +const ( + FlexClusterCRDName = "flexclusters.atlas.generated.mongodb.com" + GroupCRDName = "groups.atlas.generated.mongodb.com" +) + +// prepareFunc is a function type for mutating objects during test setup. +type prepareFunc func(objs []client.Object, params *testparams.TestParams) *nextapiv1.FlexCluster + +// updateFunc is a function type for mutating objects during test updates. +type updateFunc func(cluster *nextapiv1.FlexCluster) + +var _ = Describe("FlexCluster CRUD", Ordered, Label("flexcluster-ctlr"), func() { + var ctx context.Context + var kubeClient client.Client + var ako operator.Operator + var testNamespace *corev1.Namespace + var sharedGroupNamespace *corev1.Namespace + var testGroup *nextapiv1.Group + var groupID string + var orgID string + var sharedTestParams *testparams.TestParams + + _ = BeforeAll(func() { + if !version.IsExperimental() { + Skip("FlexCluster is an experimental CRD and controller. Skipping test as experimental features are not enabled.") + } + + orgID = os.Getenv("MCLI_ORG_ID") + Expect(orgID).NotTo(BeEmpty(), "MCLI_ORG_ID environment variable must be set") + + // Create context first so we can monitor it for cancellation + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(context.Background()) + DeferCleanup(cancel) + + // Start operator + deletionProtectionOff := false + ako = runTestAKO(DefaultGlobalCredentials, control.MustEnvVar("OPERATOR_NAMESPACE"), deletionProtectionOff) + ako.Start(GinkgoT()) + + // Set up termination handling (signal handling and context cancellation) + cleanupTermination := SetupTerminationHandling(ctx, ako, cancel) + DeferCleanup(cleanupTermination) + + testClient, err := kube.NewTestClient() + Expect(err).To(Succeed()) + kubeClient = testClient + Expect(kube.AssertCRDNames(ctx, kubeClient, FlexClusterCRDName, GroupCRDName)).To(Succeed()) + + By("Create namespace and credentials for shared test Group", func() { + sharedGroupNamespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: utils.RandomName("flex-shared-grp-ns"), + }} + Expect(kubeClient.Create(ctx, sharedGroupNamespace)).To(Succeed()) + Expect(resources.CopyCredentialsToNamespace(ctx, kubeClient, DefaultGlobalCredentials, control.MustEnvVar("OPERATOR_NAMESPACE"), sharedGroupNamespace.Name, GinkGoFieldOwner)).To(Succeed()) + }) + + By("Create test Group", func() { + groupName := utils.RandomName("flexcluster-test-group") + // Set up shared test params + sharedTestParams = testparams.New(orgID, sharedGroupNamespace.Name, DefaultGlobalCredentials). + WithGroupName(groupName) + + // Load sample Group YAML and apply mutations + objs := samples.MustLoadSampleObjects("atlas_generated_v1_group.yaml") + Expect(len(objs)).To(Equal(1)) + testGroup = objs[0].(*nextapiv1.Group) + sharedTestParams.WithNamespace(sharedGroupNamespace.Name).ApplyToGroup(testGroup) + Expect(kubeClient.Create(ctx, testGroup)).To(Succeed()) + }) + + By("Wait for Group to be Ready and get its ID", func() { + Eventually(func(g Gomega) { + g.Expect(resources.CheckResourceReady(ctx, kubeClient, testGroup)).To(Succeed()) + }).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + Expect(testGroup.Status.V20250312).NotTo(BeNil()) + Expect(testGroup.Status.V20250312.Id).NotTo(BeNil()) + groupID = *testGroup.Status.V20250312.Id + Expect(groupID).NotTo(BeEmpty()) + // Update shared test params with groupID now that it's available + sharedTestParams = sharedTestParams.WithGroupID(groupID) + }) + }) + + _ = AfterAll(func() { + if kubeClient != nil && testGroup != nil { + By("Clean up test Group", func() { + Expect(kubeClient.Delete(ctx, testGroup)).To(Succeed()) + Eventually(func(g Gomega) error { + err := kubeClient.Get(ctx, client.ObjectKeyFromObject(testGroup), testGroup) + return err + }).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).NotTo(Succeed()) + }) + } + if kubeClient != nil && sharedGroupNamespace != nil { + By("Clean up shared group namespace", func() { + Expect(kubeClient.Delete(ctx, sharedGroupNamespace)).To(Succeed()) + Eventually(func(g Gomega) bool { + return kubeClient.Get(ctx, client.ObjectKeyFromObject(sharedGroupNamespace), sharedGroupNamespace) == nil + }).WithContext(ctx).WithTimeout(time.Minute).WithPolling(time.Second).To(BeFalse()) + }) + } + if ako != nil { + ako.Stop(GinkgoT()) + } + }) + + _ = BeforeEach(func() { + testNamespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: utils.RandomName("flexcluster-ctlr-ns"), + }} + Expect(kubeClient.Create(ctx, testNamespace)).To(Succeed()) + Expect(ako.Running()).To(BeTrue(), "Operator must be running") + }) + + _ = AfterEach(func() { + if kubeClient == nil { + return + } + Expect( + kubeClient.Delete(ctx, testNamespace), + ).To(Succeed()) + Eventually(func(g Gomega) bool { + return kubeClient.Get(ctx, client.ObjectKeyFromObject(testNamespace), testNamespace) == nil + }).WithContext(ctx).WithTimeout(time.Minute).WithPolling(time.Second).To(BeFalse()) + }) + + DescribeTable("FlexCluster CRUD lifecycle", + func(sampleFile string, createMutation prepareFunc, updateMutation updateFunc, clusterName string) { + // Generate randomized group name for this test run (cluster names are unique per group) + groupName := utils.RandomName("flex-grp") + + // Set up test params for this test case (reuse shared values, override groupName and namespace) + testParams := sharedTestParams.WithGroupName(groupName).WithNamespace(testNamespace.Name) + + // Track created objects for cleanup + var createdObjects []client.Object + var cluster *nextapiv1.FlexCluster + + By("Copy credentials secret to test namespace", func() { + Expect(resources.CopyCredentialsToNamespace(ctx, kubeClient, DefaultGlobalCredentials, control.MustEnvVar("OPERATOR_NAMESPACE"), testNamespace.Name, GinkGoFieldOwner)).To(Succeed()) + }) + + By("Load sample YAML and apply mutations for create", func() { + objs := samples.MustLoadSampleObjects(sampleFile) + + // Apply create mutation function + cluster = createMutation(objs, testParams) + Expect(cluster).NotTo(BeNil()) + + // Apply all objects to namespace + var err error + createdObjects, err = resources.ApplyObjectsToNamespace(ctx, kubeClient, objs, testNamespace.Name, GinkGoFieldOwner) + Expect(err).NotTo(HaveOccurred()) + Expect(createdObjects).NotTo(BeEmpty()) + Expect(createdObjects).To(ContainElement(cluster)) + }) + + By("Wait for Group to be Ready (if using groupRef)", func() { + // Check if any Group objects were created + for _, obj := range createdObjects { + if group, ok := obj.(*nextapiv1.Group); ok { + groupObj := &nextapiv1.Group{ + ObjectMeta: metav1.ObjectMeta{Name: group.Name, Namespace: testNamespace.Name}, + } + Eventually(func(g Gomega) { + g.Expect(resources.CheckResourceReady(ctx, kubeClient, groupObj)).To(Succeed()) + }).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + } + } + }) + + By("Wait for FlexCluster to be Ready", func() { + Eventually(func(g Gomega) { + g.Expect(resources.CheckResourceReady(ctx, kubeClient, cluster)).To(Succeed()) + }).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + }) + + By("Verify cluster was created", func() { + Expect(cluster.Status.V20250312).NotTo(BeNil()) + Expect(cluster.Status.V20250312.Id).NotTo(BeNil()) + Expect(*cluster.Status.V20250312.Id).NotTo(BeEmpty()) + }) + + By("Update FlexCluster", func() { + // Create a fresh object for SSA (like kubectl apply -f) - no managedFields + // This simulates applying a fresh YAML file + updatedCluster := freshFlexCluster(cluster) + updateMutation(updatedCluster) + // Use SSA to simulate kubectl apply -f + Expect(kubeClient.Patch(ctx, updatedCluster, client.Apply, client.ForceOwnership, GinkGoFieldOwner)).To(Succeed()) + // Update cluster reference for subsequent checks + cluster = updatedCluster + }) + + By("Wait for FlexCluster to be Ready & updated", func() { + Eventually(func(g Gomega) { + g.Expect(resources.CheckResourceUpdated(ctx, kubeClient, cluster)).To(Succeed()) + }).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + }) + + By("Delete all created resources", func() { + for _, obj := range createdObjects { + _ = kubeClient.Delete(ctx, obj) + } + }) + + By("Wait for all resources to be deleted", func() { + for _, obj := range createdObjects { + Eventually(func(g Gomega) { + g.Expect(resources.CheckResourceDeleted(ctx, kubeClient, obj)).To(Succeed()) + }).WithContext(ctx).WithTimeout(5 * time.Minute).WithPolling(5 * time.Second).Should(Succeed()) + } + }) + }, + Entry("With direct groupId", + "atlas_generated_v1_flexcluster.yaml", + prepareFlexClusterWithGroupId, + updateFlexClusterTerminationProtection, + "flexy", + ), + Entry("With groupRef", + "atlas_generated_v1_flexcluster_with_groupref.yaml", + prepareFlexClusterWithGroupRef, + updateFlexClusterTerminationProtection, + "flexy", + ), + ) +}) + +// prepareFlexClusterWithGroupId prepares a FlexCluster object to use direct groupId. +// Returns the mutated FlexCluster if found, nil otherwise. +func prepareFlexClusterWithGroupId(objs []client.Object, params *testparams.TestParams) *nextapiv1.FlexCluster { + for _, obj := range objs { + if cluster, ok := obj.(*nextapiv1.FlexCluster); ok { + cluster.SetNamespace(params.Namespace) + cluster.Spec.ConnectionSecretRef = &k8s.LocalReference{ + Name: params.CredentialsSecretName, + } + cluster.Spec.V20250312.GroupId = ¶ms.GroupID + cluster.Spec.V20250312.GroupRef = nil + return cluster + } + } + return nil +} + +// prepareFlexClusterWithGroupRef prepares a FlexCluster object to use groupRef. +// This also mutates any Group objects in the same list to use test params. +// Returns the mutated FlexCluster if found, nil otherwise. +func prepareFlexClusterWithGroupRef(objs []client.Object, params *testparams.TestParams) *nextapiv1.FlexCluster { + var cluster *nextapiv1.FlexCluster + for _, obj := range objs { + switch o := obj.(type) { + case *nextapiv1.Group: + params.ApplyToGroup(o) + case *nextapiv1.FlexCluster: + o.SetNamespace(params.Namespace) + o.Spec.ConnectionSecretRef = &k8s.LocalReference{ + Name: params.CredentialsSecretName, + } + o.Spec.V20250312.GroupRef = &k8s.LocalReference{ + Name: params.GroupName, + } + o.Spec.V20250312.GroupId = nil + cluster = o + } + } + return cluster +} + +// updateFlexClusterTerminationProtection mutates a FlexCluster for the update scenario. +// This changes terminationProtectionEnabled from true to false. +func updateFlexClusterTerminationProtection(cluster *nextapiv1.FlexCluster) { + cluster.Spec.V20250312.Entry.TerminationProtectionEnabled = pointer.MakePtr(false) +} + +func freshFlexCluster(cluster *nextapiv1.FlexCluster) *nextapiv1.FlexCluster { + return &nextapiv1.FlexCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "atlas.generated.mongodb.com/v1", + Kind: "FlexCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.Name, + Namespace: cluster.Namespace, + }, + Spec: cluster.Spec, + } +} diff --git a/test/helper/e2e2/kube/kube.go b/test/helper/e2e2/kube/kube.go index d5041f8a93..a6179dc1b9 100644 --- a/test/helper/e2e2/kube/kube.go +++ b/test/helper/e2e2/kube/kube.go @@ -29,6 +29,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" akov2 "github.com/mongodb/mongodb-atlas-kubernetes/v2/api/v1" + generatedv1 "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/generated/v1" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/version" ) const ( @@ -40,6 +42,18 @@ type ObjectWithStatus interface { GetConditions() []metav1.Condition } +// AssertCRDNames check that the given names are CRDs installed in the accesible cluster +func AssertCRDNames(ctx context.Context, kubeClient client.Client, crdNames ...string) error { + crds := make([]*apiextensionsv1.CustomResourceDefinition, 0, len(crdNames)) + for _, crdName := range crdNames { + crd := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{Name: crdName}, + } + crds = append(crds, crd) + } + return AssertCRDs(ctx, kubeClient, crds...) +} + // AssertCRDs check that the given CRDs are installed in the accesible cluster func AssertCRDs(ctx context.Context, kubeClient client.Client, crds ...*apiextensionsv1.CustomResourceDefinition) error { for _, targetCRD := range crds { @@ -59,6 +73,10 @@ func NewTestClient() (client.Client, error) { utilruntime.Must(apiextensionsv1.AddToScheme(testScheme)) utilruntime.Must(akov2.AddToScheme(testScheme)) utilruntime.Must(appsv1.AddToScheme(testScheme)) + // Add experimental nextapi types (e.g., FlexCluster, Group) only when experimental features are enabled + if version.IsExperimental() { + utilruntime.Must(generatedv1.AddToScheme(testScheme)) + } return getKubeClient(testScheme) } diff --git a/test/helper/e2e2/operator/embedded.go b/test/helper/e2e2/operator/embedded.go index c2bc159418..eeea59f042 100644 --- a/test/helper/e2e2/operator/embedded.go +++ b/test/helper/e2e2/operator/embedded.go @@ -74,5 +74,4 @@ func (e *EmbeddedOperator) Stop(t testingT) { t.Logf("canceling operator context to force it to stop") e.cancelFn() e.Wait(t) - return } diff --git a/test/helper/e2e2/operator/operator.go b/test/helper/e2e2/operator/operator.go index b0966d9a4f..1da508fbb9 100644 --- a/test/helper/e2e2/operator/operator.go +++ b/test/helper/e2e2/operator/operator.go @@ -149,6 +149,12 @@ func (o *OperatorProcess) Wait(t testingT) { } func (o *OperatorProcess) Stop(t testingT) { + // Check if process is already terminated + if !o.Running() { + // Process has already terminated, nothing to do + return + } + // Ensure child process is killed on cleanup - send the negative of the pid, which is the process group id. // See https://medium.com/@felixge/killing-a-child-process-and-all-of-its-children-in-go-54079af94773 pid := 0 @@ -160,6 +166,11 @@ func (o *OperatorProcess) Stop(t testingT) { terminated := false if pid != 0 { if err := syscall.Kill(pid, syscall.SIGTERM); err != nil { + // If process doesn't exist, it's already gone - that's fine + if err == syscall.ESRCH { + // Process doesn't exist (already terminated), which is what we want + return + } t.Errorf("error trying to kill command: %v", err) } terminated = true diff --git a/test/helper/e2e2/resources/resources.go b/test/helper/e2e2/resources/resources.go new file mode 100644 index 0000000000..eebe045a82 --- /dev/null +++ b/test/helper/e2e2/resources/resources.go @@ -0,0 +1,134 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resources + +import ( + "context" + "errors" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/pkg/state" + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/kube" +) + +// CopySecretToNamespace copies a secret from one namespace to another. +// Returns the copied secret ready to be applied to the target namespace. +func CopySecretToNamespace(ctx context.Context, kubeClient client.Client, key client.ObjectKey, targetNamespace string) (*corev1.Secret, error) { + secret := corev1.Secret{} + if err := kubeClient.Get(ctx, key, &secret); err != nil { + return nil, fmt.Errorf("failed to load original secret %v: %w", key, err) + } + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: targetNamespace, + Labels: secret.Labels, + }, + Data: secret.Data, + }, nil +} + +// CopyCredentialsToNamespace copies the credentials secret from the operator namespace +// to the target namespace. The secret is applied with the specified field owner. +func CopyCredentialsToNamespace(ctx context.Context, kubeClient client.Client, credentialsName, operatorNamespace, targetNamespace string, fieldOwner client.FieldOwner) error { + globalCredsKey := client.ObjectKey{ + Name: credentialsName, + Namespace: operatorNamespace, + } + credentialsSecret, err := CopySecretToNamespace(ctx, kubeClient, globalCredsKey, targetNamespace) + if err != nil { + return err + } + return kubeClient.Patch(ctx, credentialsSecret, client.Apply, client.ForceOwnership, fieldOwner) +} + +// ApplyObjectsToNamespace applies a list of objects to a namespace. +// All objects will be set to the specified namespace before applying. +func ApplyObjectsToNamespace(ctx context.Context, kubeClient client.Client, objs []client.Object, namespace string, fieldOwner client.FieldOwner) ([]client.Object, error) { + for _, obj := range objs { + obj.SetNamespace(namespace) + if err := kubeClient.Patch(ctx, obj, client.Apply, client.ForceOwnership, fieldOwner); err != nil { + return nil, fmt.Errorf("failed to apply object %s/%s: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + return objs, nil +} + +var ( + // ErrResourceNotReady indicates the resource is not in Ready state + ErrResourceNotReady = errors.New("resource is not ready") + // ErrResourceNotUpdated indicates the resource is not in Updated state + ErrResourceNotUpdated = errors.New("resource is not updated") + // ErrResourceNotDeleted indicates the resource still exists + ErrResourceNotDeleted = errors.New("resource still exists") +) + +// CheckResourceReady checks if a resource has Ready condition set to True. +// Returns nil if ready, ErrResourceNotReady if not ready, or an error if the resource cannot be fetched. +func CheckResourceReady(ctx context.Context, kubeClient client.Client, obj kube.ObjectWithStatus) error { + key := client.ObjectKeyFromObject(obj) + if err := kubeClient.Get(ctx, key, obj); err != nil { + return fmt.Errorf("failed to get resource %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + if condition := meta.FindStatusCondition(obj.GetConditions(), "Ready"); condition != nil { + if condition.Status == metav1.ConditionTrue { + return nil + } + } + return ErrResourceNotReady +} + +// CheckResourceUpdated checks if a resource is Ready and in Updated state. +// Returns nil if updated, ErrResourceNotUpdated if not updated, or an error if the resource cannot be fetched. +func CheckResourceUpdated(ctx context.Context, kubeClient client.Client, obj kube.ObjectWithStatus) error { + key := client.ObjectKeyFromObject(obj) + if err := kubeClient.Get(ctx, key, obj); err != nil { + return fmt.Errorf("failed to get resource %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + ready := false + if condition := meta.FindStatusCondition(obj.GetConditions(), "Ready"); condition != nil { + ready = (condition.Status == metav1.ConditionTrue) + } + if !ready { + return ErrResourceNotUpdated + } + if condition := meta.FindStatusCondition(obj.GetConditions(), "State"); condition != nil { + if state.ResourceState(condition.Reason) == state.StateUpdated { + return nil + } + } + return ErrResourceNotUpdated +} + +// CheckResourceDeleted checks if a resource has been deleted from the cluster. +// Returns nil if deleted, ErrResourceNotDeleted if still exists, or an error if the check fails. +func CheckResourceDeleted(ctx context.Context, kubeClient client.Client, obj client.Object) error { + key := client.ObjectKeyFromObject(obj) + if err := kubeClient.Get(ctx, key, obj); err != nil { + // Resource not found means it's deleted - success! + if client.IgnoreNotFound(err) == nil { + return nil + } + // Other errors are unexpected + return fmt.Errorf("failed to check if resource %s/%s is deleted: %w", obj.GetNamespace(), obj.GetName(), err) + } + return ErrResourceNotDeleted +} diff --git a/test/helper/e2e2/samples/samples.go b/test/helper/e2e2/samples/samples.go new file mode 100644 index 0000000000..9d005fe7ee --- /dev/null +++ b/test/helper/e2e2/samples/samples.go @@ -0,0 +1,73 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package samples + +import ( + "os" + "path/filepath" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/mongodb/mongodb-atlas-kubernetes/v2/test/helper/e2e2/yml" +) + +// findRepoRoot finds the repository root by looking for go.mod file. +func findRepoRoot() (string, error) { + dir, err := os.Getwd() + if err != nil { + return "", err + } + + for { + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + return dir, nil + } + parent := filepath.Dir(dir) + if parent == dir { + // Reached filesystem root + break + } + dir = parent + } + + // Fallback: try current directory + return ".", nil +} + +// LoadSampleObjects loads and parses YAML objects from config/samples. +// It finds the repository root and loads files from there. +func LoadSampleObjects(filename string) ([]client.Object, error) { + repoRoot, err := findRepoRoot() + if err != nil { + return nil, err + } + + absPath := filepath.Join(repoRoot, "config", "samples", filename) + f, err := os.Open(filepath.Clean(absPath)) + if err != nil { + return nil, err + } + defer f.Close() + return yml.ParseObjects(f) +} + +// MustLoadSampleObjects loads and parses YAML objects, panicking on error. +func MustLoadSampleObjects(filename string) []client.Object { + objs, err := LoadSampleObjects(filename) + if err != nil { + panic(err) + } + return objs +} diff --git a/test/helper/e2e2/testparams/testparams.go b/test/helper/e2e2/testparams/testparams.go new file mode 100644 index 0000000000..f75ac13395 --- /dev/null +++ b/test/helper/e2e2/testparams/testparams.go @@ -0,0 +1,100 @@ +// Copyright 2025 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testparams + +import ( + k8s "github.com/crd2go/crd2go/k8s" + + nextapiv1 "github.com/mongodb/mongodb-atlas-kubernetes/v2/internal/nextapi/generated/v1" +) + +// TestParams holds all test parameters for test isolation purposes. +// Shared values (OrgID, OperatorNamespace, CredentialsSecretName) are typically +// set from input config and remain constant across tests. Per-test values +// (GroupID, GroupName, Namespace) are set per test case. +type TestParams struct { + // GroupID is the Atlas group ID, assigned by Atlas after Group creation. + // This is per-test and may be empty initially. + GroupID string + // OrgID is the Atlas organization ID, set from input config (e.g., MCLI_ORG_ID env var). + OrgID string + // GroupName is a randomized name for test isolation, per test case. + GroupName string + // Namespace is the Kubernetes namespace where test resources are created, per test case. + Namespace string + // OperatorNamespace is the namespace where the operator is running, set from input config. + OperatorNamespace string + // CredentialsSecretName is the name of the credentials secret, set from input config. + CredentialsSecretName string +} + +// New creates a new TestParams struct with shared configuration values. +// These values are typically constant across all tests in a suite: +// - orgID: Atlas organization ID (from MCLI_ORG_ID env var) +// - operatorNamespace: Namespace where operator is running (from OPERATOR_NAMESPACE env var) +// - credentialsSecretName: Name of the credentials secret (e.g., DefaultGlobalCredentials) +// +// Per-test values (GroupID, GroupName, Namespace) should be set using WithGroupID, WithGroupName, and WithNamespace. +func New(orgID, operatorNamespace, credentialsSecretName string) *TestParams { + return &TestParams{ + OrgID: orgID, + OperatorNamespace: operatorNamespace, + CredentialsSecretName: credentialsSecretName, + } +} + +// WithGroupID returns a copy of the TestParams with GroupID set. +// GroupID is assigned by Atlas after Group creation and is per-test. +func (p *TestParams) WithGroupID(groupID string) *TestParams { + copy := *p + copy.GroupID = groupID + return © +} + +// WithGroupName returns a copy of the TestParams with GroupName set. +// GroupName should contain a randomized portion for test isolation. +func (p *TestParams) WithGroupName(groupName string) *TestParams { + copy := *p + copy.GroupName = groupName + return © +} + +// WithNamespace returns a copy of the TestParams with Namespace set. +// Namespace is the Kubernetes namespace where test resources are created. +func (p *TestParams) WithNamespace(namespace string) *TestParams { + copy := *p + copy.Namespace = namespace + return © +} + +// ApplyToGroup mutates a Group object with test parameters. +func (p *TestParams) ApplyToGroup(group *nextapiv1.Group) { + group.SetNamespace(p.Namespace) + group.SetName(p.GroupName) + + if group.Spec.ConnectionSecretRef == nil { + group.Spec.ConnectionSecretRef = &k8s.LocalReference{} + } + group.Spec.ConnectionSecretRef.Name = p.CredentialsSecretName + + if group.Spec.V20250312 == nil { + group.Spec.V20250312 = &nextapiv1.V20250312{} + } + if group.Spec.V20250312.Entry == nil { + group.Spec.V20250312.Entry = &nextapiv1.Entry{} + } + group.Spec.V20250312.Entry.OrgId = p.OrgID + group.Spec.V20250312.Entry.Name = p.GroupName +}