From 32b6e13460e4e9648b9b408db1b82b59ba30b636 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 6 Feb 2026 09:14:00 +0100 Subject: [PATCH 1/9] Adding events for passwords and secrets --- pkg/splunk/enterprise/clustermanager.go | 5 ++- pkg/splunk/enterprise/indexercluster.go | 28 ++++++++++++++++ pkg/splunk/enterprise/searchheadcluster.go | 28 ++++++++++++++++ pkg/splunk/enterprise/util.go | 39 ++++++++++++++++++++++ 4 files changed, 99 insertions(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index fa2d9dc62..88cefb947 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -422,7 +422,10 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr //Get the admin password from the secret object adminPwd, foundSecret := defaultSecret.Data["password"] if !foundSecret { - eventPublisher.Warning(ctx, "PushManagerAppsBundle", "could not find admin password while trying to push the manager apps bundle") + if eventPublisher != nil { + eventPublisher.Warning(ctx, "SecretInvalid", + fmt.Sprintf("Secret '%s' missing required fields: %s. Update secret with required data.", defaultSecret.GetName(), "password")) + } return fmt.Errorf("could not find admin password while trying to push the manager apps bundle") } diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index ee868edff..94908663b 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -627,6 +627,15 @@ func SetClusterMaintenanceMode(ctx context.Context, c splcommon.ControllerClient // ApplyIdxcSecret checks if any of the indexer's have a different idxc_secret from namespace scoped secret and changes it func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replicas int32, podExecClient splutil.PodExecClientImpl) error { var indIdxcSecret string + + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + // Get namespace scoped secret namespaceSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, mgr.c, mgr.cr.GetNamespace()) if err != nil { @@ -653,6 +662,7 @@ func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replica nsIdxcSecret := string(namespaceSecret.Data[splcommon.IdxcSecret]) // Loop over all indexer pods and get individual pod's idxc password + howManyPodsHaveSecretChanged := 0 for i := int32(0); i <= replicas-1; i++ { // Get Indexer's name indexerPodName := GetSplunkStatefulsetPodName(SplunkIndexer, mgr.cr.GetName(), i) @@ -718,13 +728,25 @@ func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replica // Change idxc secret key err = idxcClient.SetIdxcSecret(nsIdxcSecret) if err != nil { + // Emit event for password sync failure + if eventPublisher != nil { + eventPublisher.Warning(ctx, "PasswordSyncFailed", + fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", indexerPodName, err.Error())) + } return err } scopedLog.Info("Changed idxc secret") + howManyPodsHaveSecretChanged += 1 + // Restart splunk instance on pod err = idxcClient.RestartSplunk() if err != nil { + // Emit event for password sync failure + if eventPublisher != nil { + eventPublisher.Warning(ctx, "PasswordSyncFailed", + fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", indexerPodName, err.Error())) + } return err } scopedLog.Info("Restarted splunk") @@ -778,6 +800,12 @@ func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replica } } + // Emit event for password sync completed + if eventPublisher != nil { + eventPublisher.Normal(ctx, "PasswordSyncCompleted", + fmt.Sprintf("Password synchronized for %d pods", howManyPodsHaveSecretChanged)) + } + return nil } diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index c4a3ebb25..3a21f21f0 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -260,6 +260,14 @@ func ApplySearchHeadCluster(ctx context.Context, client splcommon.ControllerClie // ApplyShcSecret checks if any of the search heads have a different shc_secret from namespace scoped secret and changes it func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, replicas int32, podExecClient splutil.PodExecClientImpl) error { + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + // Get namespace scoped secret namespaceSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, mgr.c, mgr.cr.GetNamespace()) if err != nil { @@ -289,6 +297,7 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli nsAdminSecret := string(namespaceSecret.Data["password"]) // Loop over all sh pods and get individual pod's shc_secret + howManyPodsHaveSecretChanged := 0 for i := int32(0); i <= replicas-1; i++ { // Get search head pod's name shPodName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), i) @@ -329,14 +338,26 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli _, _, err = podExecClient.RunPodExecCommand(ctx, streamOptions, []string{"/bin/sh"}) if err != nil { + // Emit event for password sync failure + if eventPublisher != nil { + eventPublisher.Warning(ctx, "PasswordSyncFailed", + fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", shPodName, err.Error())) + } return err } scopedLog.Info("shcSecret changed") + howManyPodsHaveSecretChanged += 1 + // Get client for Pod and restart splunk instance on pod shClient := mgr.getClient(ctx, i) err = shClient.RestartSplunk() if err != nil { + // Emit event for password sync failure + if eventPublisher != nil { + eventPublisher.Warning(ctx, "PasswordSyncFailed", + fmt.Sprintf("Password sync failed for pod '%s': %s. Check pod logs and secret format.", shPodName, err.Error())) + } return err } scopedLog.Info("Restarted Splunk") @@ -403,6 +424,7 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli Update the admin password on secret mounted on SHC pod to ensure successful authentication. */ if len(mgr.cr.Status.AdminPasswordChangedSecrets) > 0 { + for podSecretName := range mgr.cr.Status.AdminPasswordChangedSecrets { podSecret, err := splutil.GetSecretByName(ctx, mgr.c, mgr.cr.GetNamespace(), mgr.cr.GetName(), podSecretName) if err != nil { @@ -417,6 +439,12 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli } } + // Emit event for password sync completed + if eventPublisher != nil { + eventPublisher.Normal(ctx, "PasswordSyncCompleted", + fmt.Sprintf("Password synchronized for %d pods", howManyPodsHaveSecretChanged)) + } + return nil } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index d0bf26ba6..fbd98b6e5 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -139,6 +139,14 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("GetRemoteStorageClient").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + remoteDataClient := splclient.SplunkRemoteDataClient{} //use the provider name to get the corresponding function pointer getClientWrapper := splclient.RemoteDataClientsMap[vol.Provider] @@ -156,6 +164,14 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie // Get credentials through the secretRef remoteDataClientSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), appSecretRef) if err != nil { + // Emit event for missing secret + if k8serrors.IsNotFound(err) { + if eventPublisher != nil { + eventPublisher.Warning(ctx, "SecretMissing", + fmt.Sprintf("Required secret '%s' not found in namespace '%s'. Create secret to proceed.", appSecretRef, cr.GetNamespace())) + } + } + return remoteDataClient, err } @@ -398,8 +414,23 @@ func getSearchHeadExtraEnv(cr splcommon.MetaObject, replicas int32) []corev1.Env // GetSmartstoreRemoteVolumeSecrets is used to retrieve S3 access key and secrete keys. func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec) (string, string, string, error) { + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) if err != nil { + // Emit event for missing secret + if k8serrors.IsNotFound(err) { + if eventPublisher != nil { + eventPublisher.Warning(ctx, "SecretMissing", + fmt.Sprintf("Required secret '%s' not found in namespace '%s'. Create secret to proceed.", volume.SecretRef, cr.GetNamespace())) + } + } return "", "", "", err } @@ -409,8 +440,16 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. splutil.SetSecretOwnerRef(ctx, client, volume.SecretRef, cr) if accessKey == "" { + if eventPublisher != nil { + eventPublisher.Warning(ctx, "SecretInvalid", + fmt.Sprintf("Secret '%s' missing required fields: %s. Update secret with required data.", namespaceScopedSecret.GetName(), "accessKey")) + } return "", "", "", fmt.Errorf("s3 Access Key is missing") } else if secretKey == "" { + if eventPublisher != nil { + eventPublisher.Warning(ctx, "SecretInvalid", + fmt.Sprintf("Secret '%s' missing required fields: %s. Update secret with required data.", namespaceScopedSecret.GetName(), "s3SecretKey")) + } return "", "", "", fmt.Errorf("s3 Secret Key is missing") } From 80cc489ae71ca81b07f18297a863f7fb4bea137f Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 6 Feb 2026 11:09:32 +0100 Subject: [PATCH 2/9] Adding events for CM, scaling ops, appfw and RF --- pkg/splunk/enterprise/indexercluster.go | 92 ++++++++++++++++++- .../enterprise/searchheadclusterpodmanager.go | 33 ++++++- pkg/splunk/enterprise/standalone.go | 19 ++++ pkg/splunk/enterprise/upgrade.go | 5 + pkg/splunk/enterprise/util.go | 5 + pkg/splunk/splkcontroller/statefulset.go | 12 +++ 6 files changed, 162 insertions(+), 4 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 94908663b..800dd2988 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -814,6 +814,17 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con var err error + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + + // Track last successful replica count to emit scale events after completion + previousReplicas := mgr.cr.Status.Replicas + // Assign client if mgr.c == nil { mgr.c = c @@ -846,7 +857,27 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con } // manage scaling and updates - return splctrl.UpdateStatefulSetPods(ctx, c, statefulSet, mgr, desiredReplicas) + phase, err := splctrl.UpdateStatefulSetPods(ctx, c, statefulSet, mgr, desiredReplicas) + if err != nil { + return phase, err + } + + // Emit ScaledUp event only after a successful scale-up has completed + if phase == enterpriseApi.PhaseReady { + if desiredReplicas > previousReplicas && mgr.cr.Status.Replicas == desiredReplicas { + if eventPublisher != nil { + eventPublisher.Normal(ctx, "ScaledUp", + fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas)) + } + } else if desiredReplicas < previousReplicas && mgr.cr.Status.Replicas == desiredReplicas { + if eventPublisher != nil { + eventPublisher.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas)) + } + } + } + + return phase, nil } // PrepareScaleDown for indexerClusterPodManager prepares indexer pod to be removed via scale down event; it returns true when ready @@ -992,6 +1023,14 @@ func getSiteRepFactorOriginCount(siteRepFactor string) int32 { // verifyRFPeers verifies the number of peers specified in the replicas section // of IndexerClsuster CR. If it is less than RF, than we set it to RF. func (mgr *indexerClusterPodManager) verifyRFPeers(ctx context.Context, c splcommon.ControllerClient) error { + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + if mgr.c == nil { mgr.c = c } @@ -1008,8 +1047,14 @@ func (mgr *indexerClusterPodManager) verifyRFPeers(ctx context.Context, c splcom replicationFactor = clusterInfo.ReplicationFactor } - if mgr.cr.Spec.Replicas < replicationFactor { - mgr.log.Info("Changing number of replicas as it is less than RF number of peers", "replicas", mgr.cr.Spec.Replicas) + requestedReplicas := mgr.cr.Spec.Replicas + if requestedReplicas < replicationFactor { + mgr.log.Info("Changing number of replicas as it is less than RF number of peers", "replicas", requestedReplicas) + // Emit event indicating scaling below RF is blocked/adjusted + if eventPublisher != nil { + eventPublisher.Warning(ctx, "ScalingBlockedRF", + fmt.Sprintf("Cannot scale below replication factor: %d replicas required, %d requested. Adjust replicationFactor or replicas.", replicationFactor, requestedReplicas)) + } mgr.cr.Spec.Replicas = replicationFactor } return nil @@ -1037,6 +1082,9 @@ func (mgr *indexerClusterPodManager) updateStatus(ctx context.Context, statefulS return fmt.Errorf("waiting for cluster manager to become ready") } + oldInitialized := mgr.cr.Status.Initialized + oldIndexingReady := mgr.cr.Status.IndexingReady + // get indexer cluster info from cluster manager if it's ready clusterInfo, err := GetClusterManagerInfoCall(ctx, mgr) if err != nil { @@ -1077,6 +1125,44 @@ func (mgr *indexerClusterPodManager) updateStatus(ctx context.Context, statefulS mgr.cr.Status.Peers = mgr.cr.Status.Peers[:statefulSet.Status.Replicas] } + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + + // Compute current available peers for quorum-related events + var available int32 + totalPeers := len(mgr.cr.Status.Peers) + for _, p := range mgr.cr.Status.Peers { + if p.Status == "Up" && p.Searchable { + available++ + } + } + + // Emit events only on state transitions + if eventPublisher != nil { + // Cluster just finished initializing when quorum becomes ready + if !oldIndexingReady && mgr.cr.Status.IndexingReady { + if !oldInitialized && mgr.cr.Status.Initialized { + eventPublisher.Normal(ctx, "ClusterInitialized", + fmt.Sprintf("Cluster '%s' initialized with %d peers", mgr.cr.GetName(), totalPeers)) + } + + // Cluster quorum just restored + eventPublisher.Normal(ctx, "ClusterQuorumRestored", + fmt.Sprintf("Cluster quorum restored: %d/%d peers available", available, totalPeers)) + } + + // Cluster quorum lost (transition out of indexing ready) + if oldIndexingReady && !mgr.cr.Status.IndexingReady { + eventPublisher.Warning(ctx, "ClusterQuorumLost", + fmt.Sprintf("Cluster quorum lost: %d/%d peers available. Investigate peer failures immediately.", available, totalPeers)) + } + } + return nil } diff --git a/pkg/splunk/enterprise/searchheadclusterpodmanager.go b/pkg/splunk/enterprise/searchheadclusterpodmanager.go index 093ce9fe9..f238b3b6d 100644 --- a/pkg/splunk/enterprise/searchheadclusterpodmanager.go +++ b/pkg/splunk/enterprise/searchheadclusterpodmanager.go @@ -45,6 +45,17 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon. mgr.c = c } + // Get event publisher from context + var eventPublisher *K8EventPublisher + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(*K8EventPublisher); ok { + eventPublisher = p + } + } + + // Track last successful replica count to emit scale events after completion + previousReplicas := mgr.cr.Status.Replicas + // update statefulset, if necessary _, err := splctrl.ApplyStatefulSet(ctx, mgr.c, statefulSet) if err != nil { @@ -68,7 +79,27 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon. } // manage scaling and updates - return splctrl.UpdateStatefulSetPods(ctx, mgr.c, statefulSet, mgr, desiredReplicas) + phase, err := splctrl.UpdateStatefulSetPods(ctx, mgr.c, statefulSet, mgr, desiredReplicas) + if err != nil { + return phase, err + } + + // Emit ScaledUp event only after a successful scale-up has completed + if phase == enterpriseApi.PhaseReady { + if desiredReplicas > previousReplicas && mgr.cr.Status.Replicas == desiredReplicas { + if eventPublisher != nil { + eventPublisher.Normal(ctx, "ScaledUp", + fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas)) + } + } else if desiredReplicas < previousReplicas && mgr.cr.Status.Replicas == desiredReplicas { + if eventPublisher != nil { + eventPublisher.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", mgr.cr.GetName(), previousReplicas, desiredReplicas)) + } + } + } + + return phase, nil } // PrepareScaleDown for searchHeadClusterPodManager prepares search head pod to be removed via scale down event; it returns true when ready diff --git a/pkg/splunk/enterprise/standalone.go b/pkg/splunk/enterprise/standalone.go index 020fb085e..f406c9dde 100644 --- a/pkg/splunk/enterprise/standalone.go +++ b/pkg/splunk/enterprise/standalone.go @@ -215,6 +215,9 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr return result, err } + // Track last successful replica count to emit scale events after completion + previousReplicas := cr.Status.Replicas + mgr := splctrl.DefaultStatefulSetPodManager{} phase, err := mgr.Update(ctx, client, statefulSet, cr.Spec.Replicas) cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas @@ -225,6 +228,22 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr } cr.Status.Phase = phase + // Emit scale events only after a successful scale operation has completed + if phase == enterpriseApi.PhaseReady { + desiredReplicas := cr.Spec.Replicas + if desiredReplicas > previousReplicas { + if eventPublisher != nil { + eventPublisher.Normal(ctx, "ScaledUp", + fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } else if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas { + if eventPublisher != nil { + eventPublisher.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + } + if cr.Spec.MonitoringConsoleRef.Name != "" { _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, getStandaloneExtraEnv(cr, cr.Spec.Replicas), true) if err != nil { diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index a688e61fe..4e6bb40af 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -148,6 +148,11 @@ ClusterManager: return false, fmt.Errorf("cluster manager %s is not ready (phase: %s). IndexerCluster upgrade is waiting for ClusterManager to be ready", clusterManager.Name, clusterManager.Status.Phase) } if cmImage != spec.Image { + // Emit event when upgrade is blocked due to ClusterManager / IndexerCluster version mismatch + if eventPublisher != nil { + eventPublisher.Warning(ctx, "UpgradeBlockedVersionMismatch", + fmt.Sprintf("Upgrade blocked: ClusterManager version %s != IndexerCluster version %s. Upgrade ClusterManager first.", cmImage, spec.Image)) + } return false, fmt.Errorf("cluster manager %s image (%s) does not match IndexerCluster image (%s). Please upgrade ClusterManager and IndexerCluster together using the operator's RELATED_IMAGE_SPLUNK_ENTERPRISE or upgrade the ClusterManager first", clusterManager.Name, cmImage, spec.Image) } goto IndexerCluster diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index fbd98b6e5..2be9fc39e 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -221,6 +221,11 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie if err != nil { scopedLog.Error(err, "Failed to get the S3 client") + // Emit event when operator cannot connect to the remote app repository + if eventPublisher != nil { + eventPublisher.Warning(ctx, "AppRepositoryConnectionFailed", + fmt.Sprintf("Failed to connect to app repository '%s': %s. Check credentials and network.", vol.Name, err.Error())) + } return remoteDataClient, err } diff --git a/pkg/splunk/splkcontroller/statefulset.go b/pkg/splunk/splkcontroller/statefulset.go index 3028efbdd..858a84dc1 100644 --- a/pkg/splunk/splkcontroller/statefulset.go +++ b/pkg/splunk/splkcontroller/statefulset.go @@ -118,6 +118,18 @@ func UpdateStatefulSetPods(ctx context.Context, c splcommon.ControllerClient, st "name", statefulSet.GetObjectMeta().GetName(), "namespace", statefulSet.GetObjectMeta().GetNamespace()) + // Try to get a generic event publisher from context (if present). + // We intentionally assert only on the Warning method to avoid + // depending on the concrete publisher type and keep packages decoupled. + var eventPublisher interface{ + Warning(context.Context, string, string) + } + if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { + if p, ok := pub.(interface{ Warning(context.Context, string, string) }); ok { + eventPublisher = p + } + } + // Re-fetch the StatefulSet to ensure we have the latest status, especially UpdateRevision. // This addresses a race condition where the StatefulSet controller may not have updated // Status.UpdateRevision yet after a spec change was applied. Without this re-fetch, From adb1a0787a8f1b637a786a7c16b19584e667c756 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 6 Feb 2026 11:37:22 +0100 Subject: [PATCH 3/9] Refactoring --- pkg/splunk/enterprise/clustermanager.go | 18 +------- pkg/splunk/enterprise/clustermaster.go | 14 +----- pkg/splunk/enterprise/indexercluster.go | 46 ++++++------------- pkg/splunk/enterprise/monitoringconsole.go | 7 +-- pkg/splunk/enterprise/searchheadcluster.go | 7 +-- .../enterprise/searchheadclusterpodmanager.go | 7 +-- pkg/splunk/enterprise/standalone.go | 2 +- pkg/splunk/enterprise/upgrade.go | 8 +--- pkg/splunk/enterprise/util.go | 14 +----- pkg/splunk/splkcontroller/statefulset.go | 12 ----- 10 files changed, 25 insertions(+), 110 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 88cefb947..2da14ff64 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -405,12 +405,7 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr scopedLog := reqLogger.WithName("PushManagerApps").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, cr) defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace()) defaultSecret, err := splutil.GetSecretByName(ctx, c, cr.GetNamespace(), cr.GetName(), defaultSecretObjName) @@ -422,10 +417,6 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr //Get the admin password from the secret object adminPwd, foundSecret := defaultSecret.Data["password"] if !foundSecret { - if eventPublisher != nil { - eventPublisher.Warning(ctx, "SecretInvalid", - fmt.Sprintf("Secret '%s' missing required fields: %s. Update secret with required data.", defaultSecret.GetName(), "password")) - } return fmt.Errorf("could not find admin password while trying to push the manager apps bundle") } @@ -491,12 +482,7 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, cr) clusterManagerInstance := &enterpriseApi.ClusterManager{} if len(cr.Spec.ClusterManagerRef.Name) > 0 { diff --git a/pkg/splunk/enterprise/clustermaster.go b/pkg/splunk/enterprise/clustermaster.go index 7430329e1..eab411f70 100644 --- a/pkg/splunk/enterprise/clustermaster.go +++ b/pkg/splunk/enterprise/clustermaster.go @@ -306,12 +306,7 @@ func CheckIfMastersmartstoreConfigMapUpdatedToPod(ctx context.Context, c splcomm scopedLog := reqLogger.WithName("CheckIfMastersmartstoreConfigMapUpdatedToPod").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, cr) command := fmt.Sprintf("cat /mnt/splunk-operator/local/%s", configToken) streamOptions := splutil.NewStreamOptionsObject(command) @@ -392,12 +387,7 @@ func PushMasterAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr scopedLog := reqLogger.WithName("PushMasterApps").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, cr) defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace()) defaultSecret, err := splutil.GetSecretByName(ctx, c, cr.GetNamespace(), cr.GetName(), defaultSecretObjName) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 800dd2988..5844ac6d4 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -629,12 +629,7 @@ func ApplyIdxcSecret(ctx context.Context, mgr *indexerClusterPodManager, replica var indIdxcSecret string // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, mgr.cr) // Get namespace scoped secret namespaceSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, mgr.c, mgr.cr.GetNamespace()) @@ -815,12 +810,7 @@ func (mgr *indexerClusterPodManager) Update(ctx context.Context, c splcommon.Con var err error // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, mgr.cr) // Track last successful replica count to emit scale events after completion previousReplicas := mgr.cr.Status.Replicas @@ -1024,12 +1014,7 @@ func getSiteRepFactorOriginCount(siteRepFactor string) int32 { // of IndexerClsuster CR. If it is less than RF, than we set it to RF. func (mgr *indexerClusterPodManager) verifyRFPeers(ctx context.Context, c splcommon.ControllerClient) error { // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, mgr.cr) if mgr.c == nil { mgr.c = c @@ -1126,24 +1111,19 @@ func (mgr *indexerClusterPodManager) updateStatus(ctx context.Context, statefulS } // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } - - // Compute current available peers for quorum-related events - var available int32 - totalPeers := len(mgr.cr.Status.Peers) - for _, p := range mgr.cr.Status.Peers { - if p.Status == "Up" && p.Searchable { - available++ - } - } + eventPublisher := GetEventPublisher(ctx, mgr.cr) // Emit events only on state transitions if eventPublisher != nil { + // Compute current available peers for quorum-related events + var available int32 + totalPeers := len(mgr.cr.Status.Peers) + for _, p := range mgr.cr.Status.Peers { + if p.Status == "Up" && p.Searchable { + available++ + } + } + // Cluster just finished initializing when quorum becomes ready if !oldIndexingReady && mgr.cr.Status.IndexingReady { if !oldInitialized && mgr.cr.Status.Initialized { diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index ab7ef1c1a..1188309ca 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -380,12 +380,7 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, cr) monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} if len(cr.Spec.MonitoringConsoleRef.Name) > 0 { diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index 3a21f21f0..a32ea4746 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -261,12 +261,7 @@ func ApplySearchHeadCluster(ctx context.Context, client splcommon.ControllerClie // ApplyShcSecret checks if any of the search heads have a different shc_secret from namespace scoped secret and changes it func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, replicas int32, podExecClient splutil.PodExecClientImpl) error { // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, mgr.cr) // Get namespace scoped secret namespaceSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, mgr.c, mgr.cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/searchheadclusterpodmanager.go b/pkg/splunk/enterprise/searchheadclusterpodmanager.go index f238b3b6d..7b3a19d30 100644 --- a/pkg/splunk/enterprise/searchheadclusterpodmanager.go +++ b/pkg/splunk/enterprise/searchheadclusterpodmanager.go @@ -46,12 +46,7 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon. } // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, mgr.cr) // Track last successful replica count to emit scale events after completion previousReplicas := mgr.cr.Status.Replicas diff --git a/pkg/splunk/enterprise/standalone.go b/pkg/splunk/enterprise/standalone.go index f406c9dde..dd498ce33 100644 --- a/pkg/splunk/enterprise/standalone.go +++ b/pkg/splunk/enterprise/standalone.go @@ -231,7 +231,7 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr // Emit scale events only after a successful scale operation has completed if phase == enterpriseApi.PhaseReady { desiredReplicas := cr.Spec.Replicas - if desiredReplicas > previousReplicas { + if desiredReplicas > previousReplicas && cr.Status.Replicas == desiredReplicas { if eventPublisher != nil { eventPublisher.Normal(ctx, "ScaledUp", fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index 4e6bb40af..7098a2411 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -38,12 +38,8 @@ func UpgradePathValidation(ctx context.Context, c splcommon.ControllerClient, cr scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, mgr.cr) + kind := cr.GroupVersionKind().Kind scopedLog.Info("kind is set to ", "kind", kind) // start from standalone first diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 2be9fc39e..11ed17f16 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -140,12 +140,7 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie scopedLog := reqLogger.WithName("GetRemoteStorageClient").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, cr) remoteDataClient := splclient.SplunkRemoteDataClient{} //use the provider name to get the corresponding function pointer @@ -420,12 +415,7 @@ func getSearchHeadExtraEnv(cr splcommon.MetaObject, replicas int32) []corev1.Env // GetSmartstoreRemoteVolumeSecrets is used to retrieve S3 access key and secrete keys. func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec) (string, string, string, error) { // Get event publisher from context - var eventPublisher *K8EventPublisher - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(*K8EventPublisher); ok { - eventPublisher = p - } - } + eventPublisher := GetEventPublisher(ctx, cr) namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) if err != nil { diff --git a/pkg/splunk/splkcontroller/statefulset.go b/pkg/splunk/splkcontroller/statefulset.go index 858a84dc1..3028efbdd 100644 --- a/pkg/splunk/splkcontroller/statefulset.go +++ b/pkg/splunk/splkcontroller/statefulset.go @@ -118,18 +118,6 @@ func UpdateStatefulSetPods(ctx context.Context, c splcommon.ControllerClient, st "name", statefulSet.GetObjectMeta().GetName(), "namespace", statefulSet.GetObjectMeta().GetNamespace()) - // Try to get a generic event publisher from context (if present). - // We intentionally assert only on the Warning method to avoid - // depending on the concrete publisher type and keep packages decoupled. - var eventPublisher interface{ - Warning(context.Context, string, string) - } - if pub := ctx.Value(splcommon.EventPublisherKey); pub != nil { - if p, ok := pub.(interface{ Warning(context.Context, string, string) }); ok { - eventPublisher = p - } - } - // Re-fetch the StatefulSet to ensure we have the latest status, especially UpdateRevision. // This addresses a race condition where the StatefulSet controller may not have updated // Status.UpdateRevision yet after a spec change was applied. Without this re-fetch, From 33abd0b78150be328c356735c2da4a267ca52205 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 6 Feb 2026 12:13:50 +0100 Subject: [PATCH 4/9] Fixing syntax and tests --- pkg/splunk/enterprise/indexercluster.go | 2 +- pkg/splunk/enterprise/upgrade.go | 4 ++-- pkg/splunk/enterprise/util_test.go | 3 +++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 5844ac6d4..bd1e90564 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -1123,7 +1123,7 @@ func (mgr *indexerClusterPodManager) updateStatus(ctx context.Context, statefulS available++ } } - + // Cluster just finished initializing when quorum becomes ready if !oldIndexingReady && mgr.cr.Status.IndexingReady { if !oldInitialized && mgr.cr.Status.Initialized { diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index 7098a2411..f113df436 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -38,8 +38,8 @@ func UpgradePathValidation(ctx context.Context, c splcommon.ControllerClient, cr scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Get event publisher from context - eventPublisher := GetEventPublisher(ctx, mgr.cr) - + eventPublisher := GetEventPublisher(ctx, cr) + kind := cr.GroupVersionKind().Kind scopedLog.Info("kind is set to ", "kind", kind) // start from standalone first diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 565fd418e..30d71a679 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2612,6 +2612,8 @@ func TestUpdateReconcileRequeueTime(t *testing.T) { } func TestUpdateCRStatus(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + sch := pkgruntime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(sch)) utilruntime.Must(corev1.AddToScheme(sch)) @@ -3229,6 +3231,7 @@ func TestGetLicenseMasterURL(t *testing.T) { } } func TestGetCurrentImage(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() current := enterpriseApi.ClusterManager{ From 6359a5628bd5ea60c7776de4b2ccfb0828e5f1aa Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 6 Feb 2026 15:03:02 +0100 Subject: [PATCH 5/9] Tests --- pkg/splunk/enterprise/indexercluster_test.go | 250 ++++++++++++++++++ .../enterprise/searchheadcluster_test.go | 81 ++++++ 2 files changed, 331 insertions(+) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 92f562c5a..f97f96334 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2020,3 +2020,253 @@ func TestImageUpdatedTo9(t *testing.T) { t.Errorf("Should not have detected an upgrade from 8 to 9, there is no version") } } + +func TestPasswordSyncCompleted(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + builder := fake.NewClientBuilder(). + WithScheme(sch). + WithStatusSubresource(&enterpriseApi.ClusterManager{}). + WithStatusSubresource(&enterpriseApi.IndexerCluster{}) + + client := builder.Build() + ctx := context.TODO() + + // Create a mock event recorder to capture events + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + + cm := enterpriseApi.ClusterManager{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterManager", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cm", + Namespace: "test", + }, + } + cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager")) + + err := client.Create(ctx, &cm) + if err != nil { + t.Fatalf("Failed to create ClusterManager: %v", err) + } + + idxc := enterpriseApi.IndexerCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IndexerCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "idxc", + Namespace: cm.GetNamespace(), + }, + Spec: enterpriseApi.IndexerClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ClusterManagerRef: corev1.ObjectReference{ + Name: cm.GetName(), + }, + }, + }, + } + idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster")) + + err = client.Create(ctx, &idxc) + if err != nil { + t.Fatalf("Failed to create IndexerCluster: %v", err) + } + + // Create namespace scoped secret so ApplyIdxcSecret has something to work with + nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, client, cm.GetNamespace()) + if err != nil { + t.Fatalf("Failed to apply namespace scoped secret: %v", err) + } + + // Set CR status resource version to a stale value so ApplyIdxcSecret does not early-return + idxc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old" + + // Initialize a minimal pod manager for ApplyIdxcSecret + mgr := &indexerClusterPodManager{ + c: client, + log: logt.WithName("TestPasswordSyncCompleted"), + cr: &idxc, + } + + // Use a mock PodExec client; replicas will be 0 so it won't be exercised + var mockPodExecClient *spltest.MockPodExecClient = &spltest.MockPodExecClient{} + + // Add event publisher to context so ApplyIdxcSecret can emit events + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + // Call ApplyIdxcSecret; with 0 replicas it will complete without touching pods, + // but still emit the PasswordSyncCompleted event + err = ApplyIdxcSecret(ctx, mgr, 0, mockPodExecClient) + if err != nil { + t.Errorf("Couldn't apply idxc secret %s", err.Error()) + } + + // Check that PasswordSyncCompleted event was published + foundEvent := false + for _, event := range recorder.events { + if event.reason == "PasswordSyncCompleted" { + foundEvent = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type, got %s", event.eventType) + } + break + } + } + if !foundEvent { + t.Errorf("Expected PasswordSyncCompleted event to be published") + } +} + +func TestClusterQuorumRestored(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + builder := fake.NewClientBuilder(). + WithScheme(sch). + WithStatusSubresource(&enterpriseApi.ClusterManager{}). + WithStatusSubresource(&enterpriseApi.IndexerCluster{}) + + client := builder.Build() + ctx := context.TODO() + + // Create a mock event recorder to capture events + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + + cm := enterpriseApi.ClusterManager{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterManager", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "manager1", + Namespace: "test", + }, + } + cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager")) + + err := client.Create(ctx, &cm) + if err != nil { + t.Fatalf("Failed to create ClusterManager: %v", err) + } + + idxc := enterpriseApi.IndexerCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IndexerCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "idxc", + Namespace: cm.GetNamespace(), + }, + Spec: enterpriseApi.IndexerClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ClusterManagerRef: corev1.ObjectReference{ + Name: cm.GetName(), + }, + }, + }, + } + idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster")) + + err = client.Create(ctx, &idxc) + if err != nil { + t.Fatalf("Failed to create IndexerCluster: %v", err) + } + + // Build mock HTTP handlers for a healthy cluster manager info/peers response + mockHandlers := []spltest.MockHTTPHandler{ + { + Method: "GET", + URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json", + Status: 200, + Err: nil, + Body: splcommon.TestIndexerClusterPodManagerInfo, + }, + { + Method: "GET", + URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json", + Status: 200, + Err: nil, + Body: splcommon.TestIndexerClusterPodManagerPeer, + }, + } + + // Create mock Splunk client and indexerClusterPodManager using existing helper + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandlers(mockHandlers...) + + mgr := getIndexerClusterPodManager("TestClusterQuorumRestored", mockHandlers, mockSplunkClient, 3) + replicas := int32(3) + ss := &appsv1.StatefulSet{ + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + }, + } + + // Wire a mock k8s client and event publisher into context + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + // Use a mock k8s client as in other updateStatus tests + c := spltest.NewMockClient() + mgr.c = c + + // Ensure initial status is not indexing ready so we see a transition + mgr.cr.Status.IndexingReady = false + + // Call updateStatus, which should transition to indexing ready and emit the event + err = mgr.updateStatus(ctx, ss) + if err != nil { + t.Fatalf("updateStatus returned unexpected error: %v", err) + } + + // Check that ClusterQuorumRestored event was published + foundEvent := false + for _, event := range recorder.events { + if event.reason == "ClusterQuorumRestored" { + foundEvent = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type, got %s", event.eventType) + } + break + } + } + if !foundEvent { + t.Errorf("Expected ClusterQuorumRestored event to be published") + } +} + +// mockEvent stores event details for testing +type mockEvent struct { + eventType string + reason string + message string +} + +// mockEventRecorder implements record.EventRecorder for testing +type mockEventRecorder struct { + events []mockEvent +} + +func (m *mockEventRecorder) Event(object pkgruntime.Object, eventType, reason, message string) { + m.events = append(m.events, mockEvent{eventType: eventType, reason: reason, message: message}) +} + +func (m *mockEventRecorder) Eventf(object pkgruntime.Object, eventType, reason, messageFmt string, args ...interface{}) { + m.events = append(m.events, mockEvent{eventType: eventType, reason: reason, message: fmt.Sprintf(messageFmt, args...)}) +} + +func (m *mockEventRecorder) AnnotatedEventf(object pkgruntime.Object, annotations map[string]string, eventType, reason, messageFmt string, args ...interface{}) { + m.events = append(m.events, mockEvent{eventType: eventType, reason: reason, message: fmt.Sprintf(messageFmt, args...)}) +} diff --git a/pkg/splunk/enterprise/searchheadcluster_test.go b/pkg/splunk/enterprise/searchheadcluster_test.go index 569d0be8a..0121f7057 100644 --- a/pkg/splunk/enterprise/searchheadcluster_test.go +++ b/pkg/splunk/enterprise/searchheadcluster_test.go @@ -662,6 +662,87 @@ func TestApplyShcSecret(t *testing.T) { } } +func TestShcPasswordSyncCompleted(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + builder := fake.NewClientBuilder(). + WithScheme(sch). + WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) + + client := builder.Build() + ctx := context.TODO() + + // Create a mock event recorder to capture events + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + + shc := enterpriseApi.SearchHeadCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "SearchHeadCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "shc", + Namespace: "test", + }, + } + shc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("SearchHeadCluster")) + + err := client.Create(ctx, &shc) + if err != nil { + t.Fatalf("Failed to create SearchHeadCluster: %v", err) + } + + // Create namespace scoped secret so ApplyShcSecret has something to work with + nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, client, shc.GetNamespace()) + if err != nil { + t.Fatalf("Failed to apply namespace scoped secret: %v", err) + } + + // Set CR status resource version to a stale value so ApplyShcSecret does not early-return + shc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old" + shc.Status.AdminPasswordChangedSecrets = make(map[string]bool) + + // Initialize a minimal pod manager for ApplyShcSecret + mgr := &searchHeadClusterPodManager{ + c: client, + log: logt.WithName("TestShcPasswordSyncCompleted"), + cr: &shc, + } + + // Use a mock PodExec client; replicas will be 0 so it won't be exercised + var mockPodExecClient *spltest.MockPodExecClient = &spltest.MockPodExecClient{} + + // Add event publisher to context so ApplyShcSecret can emit events + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + // Call ApplyShcSecret; with 0 replicas it will complete without touching pods, + // but still emit the PasswordSyncCompleted event + err = ApplyShcSecret(ctx, mgr, 0, mockPodExecClient) + if err != nil { + t.Errorf("Couldn't apply shc secret %s", err.Error()) + } + + // Check that PasswordSyncCompleted event was published + foundEvent := false + for _, event := range recorder.events { + if event.reason == "PasswordSyncCompleted" { + foundEvent = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type, got %s", event.eventType) + } + break + } + } + if !foundEvent { + t.Errorf("Expected PasswordSyncCompleted event to be published") + } +} + func TestGetSearchHeadStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() From 611164427d05e05ca3702f57254401422eff5a96 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Mon, 9 Feb 2026 09:57:59 +0100 Subject: [PATCH 6/9] Tests --- pkg/splunk/enterprise/indexercluster_test.go | 31 +++++++++++++++----- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index f97f96334..259fc7bec 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2125,7 +2125,7 @@ func TestPasswordSyncCompleted(t *testing.T) { } } -func TestClusterQuorumRestored(t *testing.T) { +func TestClusterQuorumRestoredClusterInitialized(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") sch := pkgruntime.NewScheme() @@ -2206,7 +2206,7 @@ func TestClusterQuorumRestored(t *testing.T) { mockSplunkClient := &spltest.MockHTTPClient{} mockSplunkClient.AddHandlers(mockHandlers...) - mgr := getIndexerClusterPodManager("TestClusterQuorumRestored", mockHandlers, mockSplunkClient, 3) + mgr := getIndexerClusterPodManager("TestClusterQuorumRestoredClusterInitialized", mockHandlers, mockSplunkClient, 3) replicas := int32(3) ss := &appsv1.StatefulSet{ Status: appsv1.StatefulSetStatus{ @@ -2231,18 +2231,33 @@ func TestClusterQuorumRestored(t *testing.T) { t.Fatalf("updateStatus returned unexpected error: %v", err) } - // Check that ClusterQuorumRestored event was published - foundEvent := false + // Check that both ClusterInitialized and ClusterQuorumRestored events were published + clusterInitialized := false + quorumRestored := false for _, event := range recorder.events { + if event.reason == "ClusterInitialized" { + clusterInitialized = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type for ClusterInitialized, got %s", event.eventType) + } + if quorumRestored { + break + } + } if event.reason == "ClusterQuorumRestored" { - foundEvent = true + quorumRestored = true if event.eventType != corev1.EventTypeNormal { - t.Errorf("Expected Normal event type, got %s", event.eventType) + t.Errorf("Expected Normal event type for ClusterQuorumRestored, got %s", event.eventType) + } + if clusterInitialized { + break } - break } } - if !foundEvent { + if !clusterInitialized { + t.Errorf("Expected ClusterInitialized event to be published") + } + if !quorumRestored { t.Errorf("Expected ClusterQuorumRestored event to be published") } } From c68a4fbc1bb12d0bb34b845f10329f934c1687db Mon Sep 17 00:00:00 2001 From: "igor.grzankowski" <@splunk.com> Date: Mon, 9 Feb 2026 14:12:37 +0100 Subject: [PATCH 7/9] Indexercluster tests --- pkg/splunk/enterprise/indexercluster_test.go | 397 +++++++++++++++++++ 1 file changed, 397 insertions(+) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 259fc7bec..8880f002d 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2117,6 +2117,9 @@ func TestPasswordSyncCompleted(t *testing.T) { if event.eventType != corev1.EventTypeNormal { t.Errorf("Expected Normal event type, got %s", event.eventType) } + if !strings.Contains(event.message, "Password synchronized") { + t.Errorf("Expected event message to contain 'Password synchronized', got: %s", event.message) + } break } } @@ -2262,6 +2265,400 @@ func TestClusterQuorumRestoredClusterInitialized(t *testing.T) { } } +func TestClusterQuorumLostEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + builder := fake.NewClientBuilder(). + WithScheme(sch). + WithStatusSubresource(&enterpriseApi.ClusterManager{}). + WithStatusSubresource(&enterpriseApi.IndexerCluster{}) + + client := builder.Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + + cm := enterpriseApi.ClusterManager{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, + ObjectMeta: metav1.ObjectMeta{Name: "manager1", Namespace: "test"}, + } + cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager")) + if err := client.Create(ctx, &cm); err != nil { + t.Fatalf("Failed to create ClusterManager: %v", err) + } + + idxc := enterpriseApi.IndexerCluster{ + TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"}, + ObjectMeta: metav1.ObjectMeta{Name: "idxc", Namespace: cm.GetNamespace()}, + Spec: enterpriseApi.IndexerClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ClusterManagerRef: corev1.ObjectReference{Name: cm.GetName()}, + }, + }, + } + idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster")) + if err := client.Create(ctx, &idxc); err != nil { + t.Fatalf("Failed to create IndexerCluster: %v", err) + } + + // First call: set initial state to indexing ready using healthy cluster response + mockHandlers := []spltest.MockHTTPHandler{ + { + Method: "GET", + URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json", + Status: 200, + Body: splcommon.TestIndexerClusterPodManagerInfo, + }, + { + Method: "GET", + URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json", + Status: 200, + Body: splcommon.TestIndexerClusterPodManagerPeer, + }, + } + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandlers(mockHandlers...) + + mgr := getIndexerClusterPodManager("TestClusterQuorumLostEvent", mockHandlers, mockSplunkClient, 3) + replicas := int32(3) + ss := &appsv1.StatefulSet{ + Status: appsv1.StatefulSetStatus{Replicas: replicas, ReadyReplicas: replicas}, + } + + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + c := spltest.NewMockClient() + mgr.c = c + + mgr.cr.Status.IndexingReady = false + mgr.cr.Status.Initialized = false + err := mgr.updateStatus(ctx, ss) + if err != nil { + t.Fatalf("First updateStatus returned unexpected error: %v", err) + } + if !mgr.cr.Status.IndexingReady { + t.Fatal("Expected IndexingReady to be true after first updateStatus") + } + + // Reset recorder and prepare second call with indexing_ready=false + recorder.events = []mockEvent{} + quorumLostInfo := `{"entry":[{"content":{"initialized_flag":true,"indexing_ready_flag":false,"service_ready_flag":true,"maintenance_mode":false,"rolling_restart_flag":false,"label":"splunk-manager1-cluster-manager-0","active_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7571-1583870198.bundle","checksum":"ABC123","timestamp":1583870198},"latest_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7571-1583870198.bundle","checksum":"ABC123","timestamp":1583870198},"multisite":"false","replication_factor":3,"site_replication_factor":"origin:2,total:3"}}]}` + quorumLostHandlers := []spltest.MockHTTPHandler{ + {Method: "GET", URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/info?count=0&output_mode=json", Status: 200, Body: quorumLostInfo}, + {Method: "GET", URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/manager/peers?count=0&output_mode=json", Status: 200, Body: splcommon.TestIndexerClusterPodManagerPeer}, + } + mockSplunkClient2 := &spltest.MockHTTPClient{} + mockSplunkClient2.AddHandlers(quorumLostHandlers...) + mgr.newSplunkClient = func(managementURI, username, password string) *splclient.SplunkClient { + sc := splclient.NewSplunkClient(managementURI, username, password) + sc.Client = mockSplunkClient2 + return sc + } + + err = mgr.updateStatus(ctx, ss) + if err != nil { + t.Fatalf("Second updateStatus returned unexpected error: %v", err) + } + + found := false + for _, event := range recorder.events { + if event.reason == "ClusterQuorumLost" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for ClusterQuorumLost, got %s", event.eventType) + } + if !strings.Contains(event.message, "quorum") { + t.Errorf("Expected event message to mention quorum, got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected ClusterQuorumLost event to be published") + } +} + +func TestScalingBlockedRFEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + // Use the same fixture and URL as TestVerifyRFPeers + mockHandlers := []spltest.MockHTTPHandler{ + { + Method: "GET", + URL: "https://splunk-manager1-cluster-manager-service.test.svc.cluster.local:8089/services/cluster/config?count=0&output_mode=json", + Status: 200, + Body: loadFixture(t, "service_stack1_indexer_service.json"), + }, + } + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandlers(mockHandlers...) + + // replicas=1 which is less than RF=3 in the fixture + mgr := getIndexerClusterPodManager("TestScalingBlockedRFEvent", mockHandlers, mockSplunkClient, 1) + + // Use spltest.NewMockClient which handles the Get call for the CM pod + c := spltest.NewMockClient() + err := mgr.verifyRFPeers(ctx, c) + if err != nil { + t.Fatalf("verifyRFPeers returned unexpected error: %v", err) + } + + found := false + for _, event := range recorder.events { + if event.reason == "ScalingBlockedRF" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for ScalingBlockedRF, got %s", event.eventType) + } + if !strings.Contains(event.message, "replication factor") { + t.Errorf("Expected event message to mention replication factor, got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected ScalingBlockedRF event to be published") + } + if mgr.cr.Spec.Replicas == 1 { + t.Errorf("Expected replicas to be adjusted from 1 to replication factor") + } +} + +func TestIdxcScaledUpScaledDownEvent(t *testing.T) { + ctx := context.TODO() + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + crName := "test-idxc" + cr := &enterpriseApi.IndexerCluster{ + ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"}, + } + + // Simulate ScaledUp: previousReplicas=1, desiredReplicas=3, phase=PhaseReady, Status.Replicas=3 + previousReplicas := int32(1) + desiredReplicas := int32(3) + cr.Status.Replicas = desiredReplicas + phase := enterpriseApi.PhaseReady + + // Replicate the production conditional from indexerClusterPodManager.Update() + ep := GetEventPublisher(ctx, cr) + if phase == enterpriseApi.PhaseReady { + if desiredReplicas > previousReplicas && cr.Status.Replicas == desiredReplicas { + ep.Normal(ctx, "ScaledUp", + fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + + found := false + for _, event := range recorder.events { + if event.reason == "ScaledUp" { + found = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type for ScaledUp, got %s", event.eventType) + } + if !strings.Contains(event.message, crName) { + t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message) + } + if !strings.Contains(event.message, "1") || !strings.Contains(event.message, "3") { + t.Errorf("Expected event message to contain replica counts, got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected ScaledUp event to be published") + } + + // Simulate ScaledDown: previousReplicas=3, desiredReplicas=1, phase=PhaseReady, Status.Replicas=1 + recorder.events = []mockEvent{} + previousReplicas = int32(3) + desiredReplicas = int32(1) + cr.Status.Replicas = desiredReplicas + + if phase == enterpriseApi.PhaseReady { + if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas { + ep.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + + found = false + for _, event := range recorder.events { + if event.reason == "ScaledDown" { + found = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type for ScaledDown, got %s", event.eventType) + } + if !strings.Contains(event.message, crName) { + t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message) + } + break + } + } + if !found { + t.Errorf("Expected ScaledDown event to be published") + } + + // Negative: no event when phase is not PhaseReady + recorder.events = []mockEvent{} + phase = enterpriseApi.PhasePending + if phase == enterpriseApi.PhaseReady { + if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas { + ep.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + if len(recorder.events) != 0 { + t.Errorf("Expected no events when phase is not PhaseReady, got %d events", len(recorder.events)) + } + + // Negative: no event when replicas haven't converged + recorder.events = []mockEvent{} + phase = enterpriseApi.PhaseReady + cr.Status.Replicas = int32(2) // not yet at desiredReplicas + if phase == enterpriseApi.PhaseReady { + if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas { + ep.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + if len(recorder.events) != 0 { + t.Errorf("Expected no events when replicas haven't converged, got %d events", len(recorder.events)) + } +} + +func TestIdxcPasswordSyncFailedEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + builder := fake.NewClientBuilder(). + WithScheme(sch). + WithStatusSubresource(&enterpriseApi.ClusterManager{}). + WithStatusSubresource(&enterpriseApi.IndexerCluster{}) + + c := builder.Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + // Create namespace scoped secret + nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, c, "test") + if err != nil { + t.Fatalf("Failed to apply namespace scoped secret: %v", err) + } + + idxc := enterpriseApi.IndexerCluster{ + TypeMeta: metav1.TypeMeta{Kind: "IndexerCluster"}, + ObjectMeta: metav1.ObjectMeta{Name: "idxc", Namespace: "test"}, + Spec: enterpriseApi.IndexerClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + ClusterManagerRef: corev1.ObjectReference{Name: "cm"}, + }, + }, + } + idxc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster")) + // Set stale resource version so ApplyIdxcSecret doesn't early-return + idxc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old" + // Pre-set MaintenanceMode to skip the maintenance mode setup path + idxc.Status.MaintenanceMode = true + idxc.Status.IdxcPasswordChangedSecrets = make(map[string]bool) + + // Create the indexer pod with a secret volume mount + podSecretName := "splunk-idxc-indexer-secret-v1" + indexerPodName := "splunk-idxc-indexer-0" + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: indexerPodName, Namespace: "test"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "splunk", Image: "splunk/splunk:latest"}}, + Volumes: []corev1.Volume{ + { + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: podSecretName}, + }, + }, + }, + }, + } + if err := c.Create(ctx, pod); err != nil { + t.Fatalf("Failed to create pod: %v", err) + } + + // Create the pod's secret with a DIFFERENT idxc_secret than namespace secret + podSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: podSecretName, Namespace: "test"}, + Data: map[string][]byte{ + "password": []byte("admin-password"), + "idxc_secret": []byte("old-idxc-secret"), + }, + } + if err := c.Create(ctx, podSecret); err != nil { + t.Fatalf("Failed to create pod secret: %v", err) + } + + // Create a mock HTTP client that returns an error on SetIdxcSecret POST + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandlers(spltest.MockHTTPHandler{ + Method: "POST", + URL: fmt.Sprintf("https://splunk-idxc-indexer-0.splunk-idxc-indexer-headless.test.svc.cluster.local:8089/services/cluster/config/config?secret=%s", string(nsSecret.Data["idxc_secret"])), + Status: 500, + Err: fmt.Errorf("mock SetIdxcSecret failure"), + }) + + mgr := &indexerClusterPodManager{ + c: c, + log: logt.WithName("TestIdxcPasswordSyncFailedEvent"), + cr: &idxc, + newSplunkClient: func(managementURI, username, password string) *splclient.SplunkClient { + sc := splclient.NewSplunkClient(managementURI, username, password) + sc.Client = mockSplunkClient + return sc + }, + } + + mockPodExecClient := &spltest.MockPodExecClient{} + + // Call ApplyIdxcSecret — should fail at SetIdxcSecret and emit PasswordSyncFailed + err = ApplyIdxcSecret(ctx, mgr, 1, mockPodExecClient) + if err == nil { + t.Errorf("Expected error from ApplyIdxcSecret when SetIdxcSecret fails") + } + + found := false + for _, event := range recorder.events { + if event.reason == "PasswordSyncFailed" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for PasswordSyncFailed, got %s", event.eventType) + } + if !strings.Contains(event.message, indexerPodName) { + t.Errorf("Expected event message to contain pod name '%s', got: %s", indexerPodName, event.message) + } + break + } + } + if !found { + t.Errorf("Expected PasswordSyncFailed event to be published") + } +} + // mockEvent stores event details for testing type mockEvent struct { eventType string From 1a01abdd2448b7d11baa5496bd2dc096d5439efd Mon Sep 17 00:00:00 2001 From: "igor.grzankowski" Date: Mon, 9 Feb 2026 14:31:32 +0100 Subject: [PATCH 8/9] util test --- pkg/splunk/enterprise/util_test.go | 249 +++++++++++++++++++++++++++++ 1 file changed, 249 insertions(+) diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 30d71a679..67e708895 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -3281,3 +3281,252 @@ func TestGetCurrentImage(t *testing.T) { } } + +func TestSecretMissingEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + client := fake.NewClientBuilder().WithScheme(sch).Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"}, + } + + volume := enterpriseApi.VolumeSpec{ + Name: "test-vol", + SecretRef: "nonexistent-secret", + } + + _, _, _, err := GetSmartstoreRemoteVolumeSecrets(ctx, volume, client, cr, &enterpriseApi.SmartStoreSpec{}) + if err == nil { + t.Errorf("Expected error when secret does not exist") + } + + found := false + for _, event := range recorder.events { + if event.reason == "SecretMissing" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for SecretMissing, got %s", event.eventType) + } + if !strings.Contains(event.message, "nonexistent-secret") { + t.Errorf("Expected event message to contain secret name 'nonexistent-secret', got: %s", event.message) + } + if !strings.Contains(event.message, "test") { + t.Errorf("Expected event message to contain namespace 'test', got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected SecretMissing event to be published") + } +} + +func TestSecretInvalidEmptyAccessKeyEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + client := fake.NewClientBuilder().WithScheme(sch).Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"}, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: "test"}, + Data: map[string][]byte{ + "s3_access_key": {}, + "s3_secret_key": []byte("some-secret-key"), + }, + } + if err := client.Create(ctx, secret); err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + + volume := enterpriseApi.VolumeSpec{ + Name: "test-vol", + SecretRef: "test-secret", + } + + _, _, _, err := GetSmartstoreRemoteVolumeSecrets(ctx, volume, client, cr, &enterpriseApi.SmartStoreSpec{}) + if err == nil { + t.Errorf("Expected error when access key is empty") + } + + found := false + for _, event := range recorder.events { + if event.reason == "SecretInvalid" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for SecretInvalid, got %s", event.eventType) + } + if !strings.Contains(event.message, "test-secret") { + t.Errorf("Expected event message to contain secret name 'test-secret', got: %s", event.message) + } + if !strings.Contains(event.message, "accessKey") { + t.Errorf("Expected event message to mention missing field 'accessKey', got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected SecretInvalid event to be published for empty access key") + } +} + +func TestSecretInvalidEmptySecretKeyEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + client := fake.NewClientBuilder().WithScheme(sch).Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"}, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test-secret-sk", Namespace: "test"}, + Data: map[string][]byte{ + "s3_access_key": []byte("some-access-key"), + "s3_secret_key": {}, + }, + } + if err := client.Create(ctx, secret); err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + + volume := enterpriseApi.VolumeSpec{ + Name: "test-vol", + SecretRef: "test-secret-sk", + } + + _, _, _, err := GetSmartstoreRemoteVolumeSecrets(ctx, volume, client, cr, &enterpriseApi.SmartStoreSpec{}) + if err == nil { + t.Errorf("Expected error when secret key is empty") + } + + found := false + for _, event := range recorder.events { + if event.reason == "SecretInvalid" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for SecretInvalid, got %s", event.eventType) + } + if !strings.Contains(event.message, "test-secret-sk") { + t.Errorf("Expected event message to contain secret name 'test-secret-sk', got: %s", event.message) + } + if !strings.Contains(event.message, "s3SecretKey") { + t.Errorf("Expected event message to mention missing field 's3SecretKey', got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected SecretInvalid event to be published for empty secret key") + } +} + +func TestAppRepositoryConnectionFailedEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + client := fake.NewClientBuilder().WithScheme(sch).Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"}, + } + + // Create a secret with valid credentials so GetRemoteStorageClient reaches the getClient call + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "test-s3-secret", Namespace: "test"}, + Data: map[string][]byte{ + "s3_access_key": []byte("AKIAIOSFODNN7EXAMPLE"), + "s3_secret_key": []byte("wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"), + }, + } + if err := client.Create(ctx, secret); err != nil { + t.Fatalf("Failed to create secret: %v", err) + } + + // Register a mock provider that always returns an error from getClient + mockProvider := "mock-failing-provider" + splclient.RemoteDataClientsMap[mockProvider] = splclient.GetRemoteDataClientWrapper{ + GetRemoteDataClient: func(ctx context.Context, bucket, accessKeyID, secretAccessKey, prefix, startAfter, region, endpoint string, fn splclient.GetInitFunc) (splclient.RemoteDataClient, error) { + return nil, fmt.Errorf("mock connection timeout") + }, + GetInitFunc: func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { + return nil + }, + } + defer delete(splclient.RemoteDataClientsMap, mockProvider) + + vol := &enterpriseApi.VolumeSpec{ + Name: "test-vol", + Provider: mockProvider, + Path: "test-bucket/apps", + SecretRef: "test-s3-secret", + } + + // Call GetRemoteStorageClient — should fail at getClient and emit AppRepositoryConnectionFailed + _, err := GetRemoteStorageClient(ctx, client, cr, &enterpriseApi.AppFrameworkSpec{}, vol, "apps", nil) + if err == nil { + t.Errorf("Expected error from GetRemoteStorageClient when getClient fails") + } + + found := false + for _, event := range recorder.events { + if event.reason == "AppRepositoryConnectionFailed" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for AppRepositoryConnectionFailed, got %s", event.eventType) + } + if !strings.Contains(event.message, "test-vol") { + t.Errorf("Expected event message to contain volume name 'test-vol', got: %s", event.message) + } + if !strings.Contains(event.message, "mock connection timeout") { + t.Errorf("Expected event message to contain error details, got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected AppRepositoryConnectionFailed event to be published") + } +} From 6618c016f90bdf36758275f89d531ecb4ee98d12 Mon Sep 17 00:00:00 2001 From: "igor.grzankowski" Date: Tue, 10 Feb 2026 09:57:47 +0100 Subject: [PATCH 9/9] shc/upgrade tests --- .../enterprise/searchheadcluster_test.go | 201 ++++++++++++++++++ pkg/splunk/enterprise/upgrade_test.go | 95 +++++++++ 2 files changed, 296 insertions(+) diff --git a/pkg/splunk/enterprise/searchheadcluster_test.go b/pkg/splunk/enterprise/searchheadcluster_test.go index 0121f7057..3d11a539e 100644 --- a/pkg/splunk/enterprise/searchheadcluster_test.go +++ b/pkg/splunk/enterprise/searchheadcluster_test.go @@ -735,6 +735,9 @@ func TestShcPasswordSyncCompleted(t *testing.T) { if event.eventType != corev1.EventTypeNormal { t.Errorf("Expected Normal event type, got %s", event.eventType) } + if !strings.Contains(event.message, "Password synchronized") { + t.Errorf("Expected event message to contain 'Password synchronized', got: %s", event.message) + } break } } @@ -2155,3 +2158,201 @@ func TestSetDeployerConfig(t *testing.T) { t.Errorf("Failed to set deployer resources properly, requests are off") } } + +func TestShcPasswordSyncFailedEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + builder := fake.NewClientBuilder(). + WithScheme(sch). + WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) + + c := builder.Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + // Create namespace scoped secret + nsSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, c, "test") + if err != nil { + t.Fatalf("Failed to apply namespace scoped secret: %v", err) + } + + shc := enterpriseApi.SearchHeadCluster{ + TypeMeta: metav1.TypeMeta{Kind: "SearchHeadCluster"}, + ObjectMeta: metav1.ObjectMeta{Name: "shc", Namespace: "test"}, + } + shc.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("SearchHeadCluster")) + // Set stale resource version so ApplyShcSecret doesn't early-return + shc.Status.NamespaceSecretResourceVersion = nsSecret.ResourceVersion + "-old" + shc.Status.AdminPasswordChangedSecrets = make(map[string]bool) + + // Create the search head pod with a secret volume mount + podSecretName := "splunk-shc-search-head-secret-v1" + shPodName := "splunk-shc-search-head-0" + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: shPodName, Namespace: "test"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "splunk", Image: "splunk/splunk:latest"}}, + Volumes: []corev1.Volume{ + { + Name: "mnt-splunk-secrets", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: podSecretName}, + }, + }, + }, + }, + } + if err := c.Create(ctx, pod); err != nil { + t.Fatalf("Failed to create pod: %v", err) + } + + // Create the pod's secret with a DIFFERENT shc_secret than namespace secret + podSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: podSecretName, Namespace: "test"}, + Data: map[string][]byte{ + "password": []byte("admin-password"), + "shc_secret": []byte("old-shc-secret"), + }, + } + if err := c.Create(ctx, podSecret); err != nil { + t.Fatalf("Failed to create pod secret: %v", err) + } + + mgr := &searchHeadClusterPodManager{ + c: c, + log: logt.WithName("TestShcPasswordSyncFailedEvent"), + cr: &shc, + } + + // Configure mock pod exec client to return an error on shcluster-config command + mockPodExecClient := &spltest.MockPodExecClient{} + mockPodExecClient.AddMockPodExecReturnContext(ctx, "shcluster-config", &spltest.MockPodExecReturnContext{ + StdOut: "", + StdErr: "connection refused", + Err: fmt.Errorf("connection refused"), + }) + + // Call ApplyShcSecret — should fail at RunPodExecCommand and emit PasswordSyncFailed + err = ApplyShcSecret(ctx, mgr, 1, mockPodExecClient) + if err == nil { + t.Errorf("Expected error from ApplyShcSecret when pod exec fails") + } + + found := false + for _, event := range recorder.events { + if event.reason == "PasswordSyncFailed" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for PasswordSyncFailed, got %s", event.eventType) + } + if !strings.Contains(event.message, shPodName) { + t.Errorf("Expected event message to contain pod name '%s', got: %s", shPodName, event.message) + } + if !strings.Contains(event.message, "connection refused") { + t.Errorf("Expected event message to contain error details, got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected PasswordSyncFailed event to be published") + } +} + +func TestShcScaledUpScaledDownEvent(t *testing.T) { + ctx := context.TODO() + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + crName := "test-shc" + cr := &enterpriseApi.SearchHeadCluster{ + ObjectMeta: metav1.ObjectMeta{Name: crName, Namespace: "test"}, + } + + // Simulate ScaledUp: previousReplicas=3, desiredReplicas=5, phase=PhaseReady, Status.Replicas=5 + previousReplicas := int32(3) + desiredReplicas := int32(5) + cr.Status.Replicas = desiredReplicas + phase := enterpriseApi.PhaseReady + + // Replicate the production conditional from searchHeadClusterPodManager.Update() + ep := GetEventPublisher(ctx, cr) + if phase == enterpriseApi.PhaseReady { + if desiredReplicas > previousReplicas && cr.Status.Replicas == desiredReplicas { + ep.Normal(ctx, "ScaledUp", + fmt.Sprintf("Successfully scaled %s up from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + + found := false + for _, event := range recorder.events { + if event.reason == "ScaledUp" { + found = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type for ScaledUp, got %s", event.eventType) + } + if !strings.Contains(event.message, crName) { + t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message) + } + if !strings.Contains(event.message, "3") || !strings.Contains(event.message, "5") { + t.Errorf("Expected event message to contain replica counts, got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected ScaledUp event to be published") + } + + // Simulate ScaledDown: previousReplicas=5, desiredReplicas=3, phase=PhaseReady, Status.Replicas=3 + recorder.events = []mockEvent{} + previousReplicas = int32(5) + desiredReplicas = int32(3) + cr.Status.Replicas = desiredReplicas + + if phase == enterpriseApi.PhaseReady { + if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas { + ep.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + + found = false + for _, event := range recorder.events { + if event.reason == "ScaledDown" { + found = true + if event.eventType != corev1.EventTypeNormal { + t.Errorf("Expected Normal event type for ScaledDown, got %s", event.eventType) + } + if !strings.Contains(event.message, crName) { + t.Errorf("Expected event message to contain CR name '%s', got: %s", crName, event.message) + } + break + } + } + if !found { + t.Errorf("Expected ScaledDown event to be published") + } + + // Negative: no event when phase is not PhaseReady + recorder.events = []mockEvent{} + phase = enterpriseApi.PhasePending + if phase == enterpriseApi.PhaseReady { + if desiredReplicas < previousReplicas && cr.Status.Replicas == desiredReplicas { + ep.Normal(ctx, "ScaledDown", + fmt.Sprintf("Successfully scaled %s down from %d to %d replicas", cr.GetName(), previousReplicas, desiredReplicas)) + } + } + if len(recorder.events) != 0 { + t.Errorf("Expected no events when phase is not PhaseReady, got %d events", len(recorder.events)) + } +} diff --git a/pkg/splunk/enterprise/upgrade_test.go b/pkg/splunk/enterprise/upgrade_test.go index b501527fd..f1843f0ae 100644 --- a/pkg/splunk/enterprise/upgrade_test.go +++ b/pkg/splunk/enterprise/upgrade_test.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "runtime/debug" + "strings" "testing" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -625,6 +626,100 @@ func TestUpgradePathValidation(t *testing.T) { } +func TestUpgradeBlockedVersionMismatchEvent(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + sch := pkgruntime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(sch)) + utilruntime.Must(corev1.AddToScheme(sch)) + utilruntime.Must(enterpriseApi.AddToScheme(sch)) + + builder := fake.NewClientBuilder(). + WithScheme(sch). + WithStatusSubresource(&enterpriseApi.ClusterManager{}). + WithStatusSubresource(&enterpriseApi.IndexerCluster{}) + + client := builder.Build() + ctx := context.TODO() + + recorder := &mockEventRecorder{events: []mockEvent{}} + eventPublisher := &K8EventPublisher{recorder: recorder} + + // Create ClusterManager with old image, phase Ready + cm := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cm", Namespace: "test"}, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{Image: "splunk/splunk:old"}, + }, + }, + } + cm.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("ClusterManager")) + if err := client.Create(ctx, &cm); err != nil { + t.Fatalf("Failed to create ClusterManager: %v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + if err := client.Status().Update(ctx, &cm); err != nil { + t.Fatalf("Failed to update ClusterManager status: %v", err) + } + + // Create CM statefulset with old image + cmSS := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "splunk-test-cm-cluster-manager", Namespace: "test"}, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "test"}}, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "splunk", Image: "splunk/splunk:old"}}}, + }, + }, + } + if err := client.Create(ctx, cmSS); err != nil { + t.Fatalf("Failed to create CM StatefulSet: %v", err) + } + + // IndexerCluster CR with NEW image (mismatch with CM) + idx := enterpriseApi.IndexerCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-idx", Namespace: "test"}, + Spec: enterpriseApi.IndexerClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{Image: "splunk/splunk:new"}, + ClusterManagerRef: corev1.ObjectReference{Name: "test-cm"}, + }, + }, + } + idx.SetGroupVersionKind(enterpriseApi.GroupVersion.WithKind("IndexerCluster")) + + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + mgr := &indexerClusterPodManager{} + continueReconcile, err := UpgradePathValidation(ctx, client, &idx, idx.Spec.CommonSplunkSpec, mgr) + + if continueReconcile { + t.Errorf("Expected continueReconcile to be false when CM image mismatches IDX image") + } + if err == nil { + t.Errorf("Expected error when CM image mismatches IDX image") + } + + found := false + for _, event := range recorder.events { + if event.reason == "UpgradeBlockedVersionMismatch" { + found = true + if event.eventType != corev1.EventTypeWarning { + t.Errorf("Expected Warning event type for UpgradeBlockedVersionMismatch, got %s", event.eventType) + } + if !strings.Contains(event.message, "ClusterManager") { + t.Errorf("Expected event message to mention ClusterManager, got: %s", event.message) + } + break + } + } + if !found { + t.Errorf("Expected UpgradeBlockedVersionMismatch event to be published") + } +} + func createPods(t *testing.T, ctx context.Context, client common.ControllerClient, crtype, name, namespace, image string) { stpod := &corev1.Pod{} namespacesName := types.NamespacedName{