diff --git a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml index 8ccaf2e65..f3a9e38f5 100644 --- a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml +++ b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-AL2023-int-test-workflow.yml b/.github/workflows/arm-AL2023-int-test-workflow.yml index bdd7fe563..9003cb439 100644 --- a/.github/workflows/arm-AL2023-int-test-workflow.yml +++ b/.github/workflows/arm-AL2023-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-build-test-push-workflow.yml b/.github/workflows/arm-RHEL-build-test-push-workflow.yml index d108005e7..0f473836e 100644 --- a/.github/workflows/arm-RHEL-build-test-push-workflow.yml +++ b/.github/workflows/arm-RHEL-build-test-push-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-int-test-workflow.yml b/.github/workflows/arm-RHEL-int-test-workflow.yml index 681491b61..1718b316b 100644 --- a/.github/workflows/arm-RHEL-int-test-workflow.yml +++ b/.github/workflows/arm-RHEL-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml index 356812323..8e0d6aa3d 100644 --- a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml +++ b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-Ubuntu-int-test-workflow.yml b/.github/workflows/arm-Ubuntu-int-test-workflow.yml index ebbea6176..3ddeaa82d 100644 --- a/.github/workflows/arm-Ubuntu-int-test-workflow.yml +++ b/.github/workflows/arm-Ubuntu-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml index 6c79f58a9..7e8af7d45 100644 --- a/.github/workflows/build-test-push-workflow.yml +++ b/.github/workflows/build-test-push-workflow.yml @@ -190,6 +190,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-build-test-push-workflow.yml b/.github/workflows/distroless-build-test-push-workflow.yml index c47d72ab7..bb99d1742 100644 --- a/.github/workflows/distroless-build-test-push-workflow.yml +++ b/.github/workflows/distroless-build-test-push-workflow.yml @@ -191,6 +191,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-int-test-workflow.yml b/.github/workflows/distroless-int-test-workflow.yml index da4719183..a73d194c5 100644 --- a/.github/workflows/distroless-int-test-workflow.yml +++ b/.github/workflows/distroless-int-test-workflow.yml @@ -88,6 +88,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 6e83bcc63..d5e58c914 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -65,6 +65,8 @@ jobs: HELM_REPO_PATH: "../../../../helm-chart" INSTALL_OPERATOR: "true" TEST_VPC_ENDPOINT_URL: ${{ secrets.TEST_VPC_ENDPOINT_URL }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - uses: chrisdickinson/setup-yq@3d931309f27270ebbafd53f2daee773a82ea1822 - name: Checking YQ installation diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index e5b12b5dc..c09b6c305 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -84,6 +84,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml index b76b3d515..c042347aa 100644 --- a/.github/workflows/manual-int-test-workflow.yml +++ b/.github/workflows/manual-int-test-workflow.yml @@ -45,6 +45,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: ${{ github.event.inputs.CLUSTER_WIDE }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/namespace-scope-int-workflow.yml b/.github/workflows/namespace-scope-int-workflow.yml index b32dcee92..9153bd950 100644 --- a/.github/workflows/namespace-scope-int-workflow.yml +++ b/.github/workflows/namespace-scope-int-workflow.yml @@ -40,6 +40,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "false" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 4bc4c199c..41fbf3d74 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -81,6 +81,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/PROJECT b/PROJECT index 983f3418b..e87979069 100644 --- a/PROJECT +++ b/PROJECT @@ -128,7 +128,16 @@ resources: controller: true domain: splunk.com group: enterprise - kind: BusConfiguration + kind: Queue + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: ObjectStorage path: github.com/splunk/splunk-operator/api/v4 version: v4 version: "3" diff --git a/api/v4/busconfiguration_types.go b/api/v4/busconfiguration_types.go deleted file mode 100644 index a4b76a00b..000000000 --- a/api/v4/busconfiguration_types.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v4 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -const ( - // BusConfigurationPausedAnnotation is the annotation that pauses the reconciliation (triggers - // an immediate requeue) - BusConfigurationPausedAnnotation = "busconfiguration.enterprise.splunk.com/paused" -) - -// BusConfigurationSpec defines the desired state of BusConfiguration -type BusConfigurationSpec struct { - Type string `json:"type"` - - SQS SQSSpec `json:"sqs"` -} - -type SQSSpec struct { - QueueName string `json:"queueName"` - - AuthRegion string `json:"authRegion"` - - Endpoint string `json:"endpoint"` - - LargeMessageStoreEndpoint string `json:"largeMessageStoreEndpoint"` - - LargeMessageStorePath string `json:"largeMessageStorePath"` - - DeadLetterQueueName string `json:"deadLetterQueueName"` -} - -// BusConfigurationStatus defines the observed state of BusConfiguration. -type BusConfigurationStatus struct { - // Phase of the bus configuration - Phase Phase `json:"phase"` - - // Resource revision tracker - ResourceRevMap map[string]string `json:"resourceRevMap"` - - // Auxillary message describing CR status - Message string `json:"message"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status - -// BusConfiguration is the Schema for a Splunk Enterprise bus configuration -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=busconfigurations,scope=Namespaced,shortName=bus -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus configuration" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus configuration resource" -// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" -// +kubebuilder:storageversion - -// BusConfiguration is the Schema for the busconfigurations API -type BusConfiguration struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - - Spec BusConfigurationSpec `json:"spec"` - Status BusConfigurationStatus `json:"status,omitempty,omitzero"` -} - -// DeepCopyObject implements runtime.Object -func (in *BusConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// +kubebuilder:object:root=true - -// BusConfigurationList contains a list of BusConfiguration -type BusConfigurationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []BusConfiguration `json:"items"` -} - -func init() { - SchemeBuilder.Register(&BusConfiguration{}, &BusConfigurationList{}) -} - -// NewEvent creates a new event associated with the object and ready -// to be published to Kubernetes API -func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: bc.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "BusConfiguration", - Namespace: bc.Namespace, - Name: bc.Name, - UID: bc.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "splunk-busconfiguration-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventType, - ReportingController: "enterprise.splunk.com/busconfiguration-controller", - } -} diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 493aeb0f3..5035c3cff 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,12 +34,20 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.queueRef) || self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.objectStorageRef) || self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef,omitempty"` + // +optional + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` + + // +optional + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` @@ -115,8 +123,11 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Credential secret version to track changes to the secret and trigger rolling restart of indexer cluster peers when the secret is updated + CredentialSecretVersion string `json:"credentialSecretVersion,omitempty"` + + // Service account to track changes to the service account and trigger rolling restart of indexer cluster peers when the service account is updated + ServiceAccount string `json:"serviceAccount,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 364625e97..0dc3ce844 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -28,19 +28,28 @@ const ( IngestorClusterPausedAnnotation = "ingestorcluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IngestorClusterSpec defines the spec of Ingestor Cluster type IngestorClusterSpec struct { // Common Splunk spec CommonSplunkSpec `json:",inline"` // Number of ingestor pods + // +kubebuilder:validation:Minimum=3 + // +kubebuilder:default=3 Replicas int32 `json:"replicas"` // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef"` + // +kubebuilder:validation:Required + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` + + // +kubebuilder:validation:Required + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } // IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -69,8 +78,11 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Credential secret version to track changes to the secret and trigger rolling restart of indexer cluster peers when the secret is updated + CredentialSecretVersion string `json:"credentialSecretVersion,omitempty"` + + // Service account to track changes to the service account and trigger rolling restart of indexer cluster peers when the service account is updated + ServiceAccount string `json:"serviceAccount,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go new file mode 100644 index 000000000..b041ef466 --- /dev/null +++ b/api/v4/objectstorage_types.go @@ -0,0 +1,110 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // ObjectStoragePausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused" +) + +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.s3 == oldSelf.s3",message="s3 is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" +// ObjectStorageSpec defines the desired state of ObjectStorage +type ObjectStorageSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=s3 + // Provider of queue resources + Provider string `json:"provider"` + + // +kubebuilder:validation:Required + // s3 specific inputs + S3 S3Spec `json:"s3"` +} + +type S3Spec struct { + // +optional + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` + // S3-compatible Service endpoint + Endpoint string `json:"endpoint"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(?:s3://)?[a-z0-9.-]{3,63}(?:/[^\s]+)?$` + // S3 bucket path + Path string `json:"path"` +} + +// ObjectStorageStatus defines the observed state of ObjectStorage. +type ObjectStorageStatus struct { + // Phase of the object storage + Phase Phase `json:"phase"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // Auxillary message describing CR status + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ObjectStorage is the Schema for a Splunk Enterprise object storage +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=objectstorages,scope=Namespaced,shortName=os +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of object storage" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of object storage resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// ObjectStorage is the Schema for the objectstorages API +type ObjectStorage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec ObjectStorageSpec `json:"spec"` + Status ObjectStorageStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *ObjectStorage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// ObjectStorageList contains a list of ObjectStorage +type ObjectStorageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ObjectStorage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{}) +} diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go new file mode 100644 index 000000000..2139f43dd --- /dev/null +++ b/api/v4/queue_types.go @@ -0,0 +1,127 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // QueuePausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + QueuePausedAnnotation = "queue.enterprise.splunk.com/paused" +) + +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.name == oldSelf.sqs.name",message="sqs.name is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.authRegion == oldSelf.sqs.authRegion",message="sqs.authRegion is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.dlq == oldSelf.sqs.dlq",message="sqs.dlq is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.endpoint == oldSelf.sqs.endpoint",message="sqs.endpoint is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" +// QueueSpec defines the desired state of Queue +type QueueSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=sqs + // Provider of queue resources + Provider string `json:"provider"` + + // +kubebuilder:validation:Required + // sqs specific inputs + SQS SQSSpec `json:"sqs"` +} + +type SQSSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the queue + Name string `json:"name"` + + // +optional + // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` + // Auth Region of the resources + AuthRegion string `json:"authRegion"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the dead letter queue resource + DLQ string `json:"dlq"` + + // +optional + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` + // Amazon SQS Service endpoint + Endpoint string `json:"endpoint"` + + // +optional + // List of remote storage volumes + VolList []VolumeSpec `json:"volumes,omitempty"` +} + +// QueueStatus defines the observed state of Queue +type QueueStatus struct { + // Phase of the queue + Phase Phase `json:"phase"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // Auxillary message describing CR status + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// Queue is the Schema for a Splunk Enterprise queue +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=queues,scope=Namespaced,shortName=queue +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of queue" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of queue resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// Queue is the Schema for the queues API +type Queue struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec QueueSpec `json:"spec"` + Status QueueStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *Queue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// QueueList contains a list of Queue +type QueueList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Queue `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Queue{}, &QueueList{}) +} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index fa23c996a..c7759fa58 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -180,95 +180,6 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfiguration. -func (in *BusConfiguration) DeepCopy() *BusConfiguration { - if in == nil { - return nil - } - out := new(BusConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationList) DeepCopyInto(out *BusConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]BusConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationList. -func (in *BusConfigurationList) DeepCopy() *BusConfigurationList { - if in == nil { - return nil - } - out := new(BusConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BusConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationSpec) DeepCopyInto(out *BusConfigurationSpec) { - *out = *in - out.SQS = in.SQS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationSpec. -func (in *BusConfigurationSpec) DeepCopy() *BusConfigurationSpec { - if in == nil { - return nil - } - out := new(BusConfigurationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { - *out = *in - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationStatus. -func (in *BusConfigurationStatus) DeepCopy() *BusConfigurationStatus { - if in == nil { - return nil - } - out := new(BusConfigurationStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -600,7 +511,8 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - out.BusConfigurationRef = in.BusConfigurationRef + out.QueueRef = in.QueueRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -633,7 +545,6 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - out.BusConfiguration = in.BusConfiguration } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -702,7 +613,8 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) - out.BusConfigurationRef = in.BusConfigurationRef + out.QueueRef = in.QueueRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. @@ -726,7 +638,6 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - out.BusConfiguration = in.BusConfiguration } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. @@ -931,6 +842,95 @@ func (in *MonitoringConsoleStatus) DeepCopy() *MonitoringConsoleStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorage) DeepCopyInto(out *ObjectStorage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorage. +func (in *ObjectStorage) DeepCopy() *ObjectStorage { + if in == nil { + return nil + } + out := new(ObjectStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageList) DeepCopyInto(out *ObjectStorageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ObjectStorage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageList. +func (in *ObjectStorageList) DeepCopy() *ObjectStorageList { + if in == nil { + return nil + } + out := new(ObjectStorageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectStorageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) { + *out = *in + out.S3 = in.S3 +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec. +func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec { + if in == nil { + return nil + } + out := new(ObjectStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageStatus) DeepCopyInto(out *ObjectStorageStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageStatus. +func (in *ObjectStorageStatus) DeepCopy() *ObjectStorageStatus { + if in == nil { + return nil + } + out := new(ObjectStorageStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PhaseInfo) DeepCopyInto(out *PhaseInfo) { *out = *in @@ -977,9 +977,118 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + in.SQS.DeepCopyInto(&out.SQS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Spec) DeepCopyInto(out *S3Spec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec. +func (in *S3Spec) DeepCopy() *S3Spec { + if in == nil { + return nil + } + out := new(S3Spec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in + if in.VolList != nil { + in, out := &in.VolList, &out.VolList + *out = make([]VolumeSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSSpec. diff --git a/cmd/main.go b/cmd/main.go index 1984474fa..a037f87b1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,13 +230,6 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.BusConfigurationReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BusConfiguration") - os.Exit(1) - } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index d66e057fb..819c2f971 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,49 +5165,6 @@ spec: x-kubernetes-list-type: atomic type: object type: object - busConfigurationRef: - description: Bus configuration reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -5647,6 +5604,92 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -8285,6 +8328,13 @@ spec: type: object type: array type: object + x-kubernetes-validations: + - message: queueRef and objectStorageRef must both be set or both be empty + rule: has(self.queueRef) == has(self.objectStorageRef) + - message: queueRef is immutable once created + rule: '!has(oldSelf.queueRef) || self.queueRef == oldSelf.queueRef' + - message: objectStorageRef is immutable once created + rule: '!has(oldSelf.objectStorageRef) || self.objectStorageRef == oldSelf.objectStorageRef' status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8294,27 +8344,6 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object - busConfiguration: - description: Bus configuration - properties: - sqs: - properties: - authRegion: - type: string - deadLetterQueueName: - type: string - endpoint: - type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: - type: string - type: object - type: - type: string - type: object clusterManagerPhase: description: current phase of the cluster manager enum: @@ -8337,6 +8366,11 @@ spec: - Terminating - Error type: string + credentialSecretVersion: + description: Credential secret version to track changes to the secret + and trigger rolling restart of indexer cluster peers when the secret + is updated + type: string indexer_secret_changed_flag: description: Indicates when the idxc_secret has been changed for a peer @@ -8414,6 +8448,11 @@ spec: description: Indicates whether the manager is ready to begin servicing, based on whether it is initialized. type: boolean + serviceAccount: + description: Service account to track changes to the service account + and trigger rolling restart of indexer cluster peers when the service + account is updated + type: string type: object type: object served: true diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 82f1f868a..5f0e3dd88 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1141,49 +1141,6 @@ spec: type: object type: array type: object - busConfigurationRef: - description: Bus configuration reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1623,6 +1580,92 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -1656,8 +1699,10 @@ spec: type: integer type: object replicas: + default: 3 description: Number of ingestor pods format: int32 + minimum: 3 type: integer resources: description: resource requirements for the pod containers @@ -4259,7 +4304,15 @@ spec: - name type: object type: array + required: + - objectStorageRef + - queueRef type: object + x-kubernetes-validations: + - message: queueRef is immutable once created + rule: self.queueRef == oldSelf.queueRef + - message: objectStorageRef is immutable once created + rule: self.objectStorageRef == oldSelf.objectStorageRef status: description: IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -4545,27 +4598,11 @@ spec: description: App Framework version info for future use type: integer type: object - busConfiguration: - description: Bus configuration - properties: - sqs: - properties: - authRegion: - type: string - deadLetterQueueName: - type: string - endpoint: - type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: - type: string - type: object - type: - type: string - type: object + credentialSecretVersion: + description: Credential secret version to track changes to the secret + and trigger rolling restart of indexer cluster peers when the secret + is updated + type: string message: description: Auxillary message describing CR status type: string @@ -4596,6 +4633,11 @@ spec: selector: description: Selector for pods used by HorizontalPodAutoscaler type: string + serviceAccount: + description: Service account to track changes to the service account + and trigger rolling restart of indexer cluster peers when the service + account is updated + type: string telAppInstalled: description: Telemetry App installation flag type: boolean diff --git a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml similarity index 63% rename from config/crd/bases/enterprise.splunk.com_busconfigurations.yaml rename to config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 9f80cdbea..015d5797d 100644 --- a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: busconfigurations.enterprise.splunk.com + name: objectstorages.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: BusConfiguration - listKind: BusConfigurationList - plural: busconfigurations + kind: ObjectStorage + listKind: ObjectStorageList + plural: objectstorages shortNames: - - bus - singular: busconfiguration + - os + singular: objectstorage scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of bus configuration + - description: Status of object storage jsonPath: .status.phase name: Phase type: string - - description: Age of bus configuration resource + - description: Age of object storage resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: BusConfiguration is the Schema for the busconfigurations API + description: ObjectStorage is the Schema for the objectstorages API properties: apiVersion: description: |- @@ -52,34 +52,46 @@ spec: metadata: type: object spec: - description: BusConfigurationSpec defines the desired state of BusConfiguration + description: ObjectStorageSpec defines the desired state of ObjectStorage properties: - sqs: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs properties: - authRegion: - type: string - deadLetterQueueName: - type: string endpoint: + description: S3-compatible Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: + path: + description: S3 bucket path + pattern: ^(?:s3://)?[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string + required: + - path type: object - type: - type: string + required: + - provider + - s3 type: object + x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) status: - description: BusConfigurationStatus defines the observed state of BusConfiguration. + description: ObjectStorageStatus defines the observed state of ObjectStorage. properties: message: description: Auxillary message describing CR status type: string phase: - description: Phase of the bus configuration + description: Phase of the object storage enum: - Pending - Ready diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml new file mode 100644 index 000000000..e10ee536a --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -0,0 +1,166 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: queues.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: Queue + listKind: QueueList + plural: queues + shortNames: + - queue + singular: queue + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of queue + jsonPath: .status.phase + name: Phase + type: string + - description: Age of queue resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Auxillary message describing CR status + jsonPath: .status.message + name: Message + type: string + name: v4 + schema: + openAPIV3Schema: + description: Queue is the Schema for the queues API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: QueueSpec defines the desired state of Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https?://[^\s/$.?#].[^\s]*$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where apps + reside. Used for aws, if provided. Not used for minio + and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3, + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' + type: string + type: object + type: array + required: + - dlq + - name + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs.name is immutable once created + rule: self.sqs.name == oldSelf.sqs.name + - message: sqs.authRegion is immutable once created + rule: self.sqs.authRegion == oldSelf.sqs.authRegion + - message: sqs.dlq is immutable once created + rule: self.sqs.dlq == oldSelf.sqs.dlq + - message: sqs.endpoint is immutable once created + rule: self.sqs.endpoint == oldSelf.sqs.endpoint + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) + status: + description: QueueStatus defines the observed state of Queue + properties: + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the queue + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + resourceRevMap: + additionalProperties: + type: string + description: Resource revision tracker + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 679c1dc72..0304146cd 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,7 +11,8 @@ resources: - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml -- bases/enterprise.splunk.com_busconfigurations.yaml +- bases/enterprise.splunk.com_queues.yaml +- bases/enterprise.splunk.com_objectstorages.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/busconfiguration_editor_role.yaml b/config/rbac/objectstorage_editor_role.yaml similarity index 88% rename from config/rbac/busconfiguration_editor_role.yaml rename to config/rbac/objectstorage_editor_role.yaml index fde8687f7..70323227f 100644 --- a/config/rbac/busconfiguration_editor_role.yaml +++ b/config/rbac/objectstorage_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-editor-role + name: objectstorage-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get diff --git a/config/rbac/busconfiguration_viewer_role.yaml b/config/rbac/objectstorage_viewer_role.yaml similarity index 87% rename from config/rbac/busconfiguration_viewer_role.yaml rename to config/rbac/objectstorage_viewer_role.yaml index 6230863a9..9764699bc 100644 --- a/config/rbac/busconfiguration_viewer_role.yaml +++ b/config/rbac/objectstorage_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-viewer-role + name: objectstorage-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get diff --git a/config/rbac/queue_editor_role.yaml b/config/rbac/queue_editor_role.yaml new file mode 100644 index 000000000..bf7e4d890 --- /dev/null +++ b/config/rbac/queue_editor_role.yaml @@ -0,0 +1,30 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: queue-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get diff --git a/config/rbac/queue_viewer_role.yaml b/config/rbac/queue_viewer_role.yaml new file mode 100644 index 000000000..b186c8650 --- /dev/null +++ b/config/rbac/queue_viewer_role.yaml @@ -0,0 +1,26 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: queue-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 78231b303..973105d16 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,7 +47,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations - clustermanagers - clustermasters - indexerclusters @@ -55,6 +54,8 @@ rules: - licensemanagers - licensemasters - monitoringconsoles + - objectstorages + - queues - searchheadclusters - standalones verbs: @@ -68,7 +69,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers @@ -76,6 +76,8 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - objectstorages/finalizers + - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -83,7 +85,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status - clustermanagers/status - clustermasters/status - indexerclusters/status @@ -91,6 +92,8 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - objectstorages/status + - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/config/samples/enterprise_v4_busconfiguration.yaml b/config/samples/enterprise_v4_objectstorage.yaml similarity index 72% rename from config/samples/enterprise_v4_busconfiguration.yaml rename to config/samples/enterprise_v4_objectstorage.yaml index 0cc1aed31..b693a14e0 100644 --- a/config/samples/enterprise_v4_busconfiguration.yaml +++ b/config/samples/enterprise_v4_objectstorage.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: ObjectStorage metadata: - name: busconfiguration-sample + name: objectstorage-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/enterprise_v4_queue.yaml b/config/samples/enterprise_v4_queue.yaml new file mode 100644 index 000000000..374d4adb2 --- /dev/null +++ b/config/samples/enterprise_v4_queue.yaml @@ -0,0 +1,8 @@ +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: queue-sample + finalizers: + - "enterprise.splunk.com/delete-pvc" +spec: {} +# TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 88c71025d..34c05ab05 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -14,5 +14,6 @@ resources: - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml -- enterprise_v4_busconfiguration.yaml +- enterprise_v4_queue.yaml +- enterprise_v4_objectstorage.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 6461d4488..cd29cde79 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -18,9 +18,11 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [LicenseManager Resource Spec Parameters](#licensemanager-resource-spec-parameters) - [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters) - [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) + - [Queue Resource Spec Parameters](#queue-resource-spec-parameters) - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) + - [ObjectStorage Resource Spec Parameters](#objectstorage-resource-spec-parameters) - [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters) - [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos) - [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example) @@ -279,6 +281,41 @@ spec: cpu: "4" ``` +## Queue Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: queue +spec: + replicas: 3 + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test +``` + +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | + +SQS message queue inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| name | string | [Required] Name of the queue | +| region | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | + +Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## ClusterManager Resource Spec Parameters ClusterManager resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below - ```yaml @@ -338,10 +375,12 @@ metadata: name: ic spec: replicas: 3 - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` -Note: `busConfigurationRef` is required field in case of IngestorCluster resource since it will be used to connect the IngestorCluster to BusConfiguration resource. +Note: `queueRef` and `objectStorageRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and ObjectStorage resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), @@ -351,6 +390,36 @@ the `IngestorCluster` resource provides the following `Spec` configuration param | ---------- | ------- | ----------------------------------------------------- | | replicas | integer | The number of ingestor peers (minimum of 3 which is the default) | +## ObjectStorage Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os +spec: + provider: s3 + s3: + path: ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com +``` + +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | + +S3 object storage inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint + +Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## MonitoringConsole Resource Spec Parameters ```yaml @@ -462,10 +531,12 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | Customer Resource Definition | Annotation | | ----------- | --------- | +| queue.enterprise.splunk.com | "queue.enterprise.splunk.com/paused" | | clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" | | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | | ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" | +| objectstorage.enterprise.splunk.com | "objectstorage.enterprise.splunk.com/paused" | | licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" | | monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" | | searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index dd53922ff..0dfdc3eb0 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -1,67 +1,108 @@ +--- +title: Index and Ingestion Separation +parent: Deploy & Configure +nav_order: 6 +--- + # Background Separation between ingestion and indexing services within Splunk Operator for Kubernetes enables the operator to independently manage the ingestion service while maintaining seamless integration with the indexing service. This separation enables: - Independent scaling: Match resource allocation to ingestion or indexing workload. -- Data durability: Off‑load buffer management and retry logic to a durable message bus. +- Data durability: Off‑load buffer management and retry logic to a durable message queue. - Operational clarity: Separate monitoring dashboards for ingestion throughput vs indexing latency. # Important Note > [!WARNING] -> **As of now, only brand new deployments are supported for Index and Ingestion Separation. No migration path is implemented, described or tested for existing deployments to move from a standard model to Index & Ingestion separation model.** +> **For customers deploying SmartBus on CMP, the Splunk Operator for Kubernetes (SOK) manages the configuration and lifecycle of the ingestor tier. The following SOK guide provides implementation details for setting up ingestion separation and integrating with existing indexers. This reference is primarily intended for CMP users leveraging SOK-managed ingestors.** # Document Variables - SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version -# BusConfiguration +# Queue -BusConfiguration is introduced to store message bus configuration to be shared among IngestorCluster and IndexerCluster. +Queue is introduced to store message queue information to be shared among IngestorCluster and IndexerCluster. ## Spec -BusConfiguration inputs can be found in the table below. As of now, only SQS type of message bus is supported. +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| type | string | Type of message bus (Only sqs_smartbus as of now) | -| sqs | SQS | SQS message bus inputs | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | -SQS message bus inputs can be found in the table below. +SQS message queue inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| queueName | string | Name of the SQS queue | -| authRegion | string | Region where the SQS queue is located | -| endpoint | string | AWS SQS endpoint -| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint | -| largeMessageStorePath | string | S3 path for Large Message Store | -| deadLetterQueueName | string | Name of the SQS dead letter queue | +| name | string | [Required] Name of the queue | +| authRegion | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on authRegion] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | +| volumes | []VolumeSpec | [Optional] List of remote storage volumes used to mount the credentials for queue and bucket access (must contain s3_access_key and s3_secret_key) | -Change of any of the bus inputs does not restart Splunk. It just updates the config values with no disruptions. +**SOK doesn't support update of any of the Queue inputs except from the volumes which allow the change of secrets.** ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Queue metadata: - name: bus-config + name: queue spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test + name: sqs-test authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret +``` + +# ObjectStorage + +ObjectStorage is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. + +## Spec + +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | + +S3 object storage inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on authRegion] S3-compatible service endpoint + +**SOK doesn't support update of any of the ObjectStorage inputs.** + +## Example +``` +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os +spec: + provider: s3 + s3: + path: ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com ``` # IngestorCluster -IngestorCluster is introduced for high‑throughput data ingestion into a durable message bus. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message bus. +IngestorCluster is introduced for high‑throughput data ingestion into a durable message queue. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message queue. ## Spec @@ -70,13 +111,18 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| queueRef | corev1.ObjectReference | Message queue reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | + +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Ingestor Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Push Bus reference allows the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -89,13 +135,15 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` # IndexerCluster -IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the bus (inputs.conf) and index them. +IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the queue (inputs.conf) and index them. ## Spec @@ -104,13 +152,18 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| queueRef | corev1.ObjectReference | Message queue reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | + +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Pull Bus reference allows the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -135,8 +188,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` # Common Spec @@ -145,24 +200,35 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -An IngestorCluster template has been added to the splunk/splunk-enterprise Helm chart. The IndexerCluster template has also been enhanced to support new inputs. +Queue, ObjectStorage and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for BusConfiguration, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Queue, ObjectStorage, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` -busConfiguration:: +queue: enabled: true - name: bus-config - type: sqs_smartbus + name: queue + provider: sqs sqs: - queueName: sqs-test + name: sqs-test authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret +``` + +``` +objectStorage: + enabled: true + name: os + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: ingestion/smartbus-test ``` ``` @@ -171,8 +237,10 @@ ingestorCluster: name: ingestor replicaCount: 3 serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` ``` @@ -189,8 +257,10 @@ indexerCluster: serviceAccount: ingestor-sa clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` # Service Account @@ -376,6 +446,14 @@ In the following example, the dashboard presents ingestion and indexing data in - [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) +# App Installation for Ingestor Cluster Instances + +Application installation is supported for Ingestor Cluster instances. However, as of now, applications are installed using local scope and if any application requires Splunk restart, there is no automated way to detect it and trigger automatically via Splunk Operator. + +Therefore, to be able to enforce Splunk restart for each of the Ingestor Cluster pods, it is recommended to add/update IngestorCluster CR annotations/labels and apply the new configuration which will trigger the rolling restart of Splunk pods for Ingestor Cluster. + +We are under the investigation on how to make it fully automated. What is more, ideally, update of annotations and labels should not trigger pod restart at all and we are investigating on how to fix this behaviour eventually. + # Example 1. Install CRDs and Splunk Operator for Kubernetes. @@ -492,45 +570,43 @@ $ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon- } ``` -3. Install BusConfiguration resource. +3. Install Queue resource. ``` -$ cat bus.yaml +$ cat queue.yaml apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Queue metadata: - name: bus + name: queue finalizers: - enterprise.splunk.com/delete-pvc spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test + name: sqs-test authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test ``` ``` -$ kubectl apply -f bus.yaml +$ kubectl apply -f queue.yaml ``` ``` -$ kubectl get busconfiguration +$ kubectl get queue NAME PHASE AGE MESSAGE -bus Ready 20s +queue Ready 20s ``` ``` -kubectl describe busconfiguration -Name: bus +kubectl describe queue +Name: queue Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: BusConfiguration +Kind: Queue Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -540,13 +616,65 @@ Metadata: UID: 12345678-1234-5678-1234-012345678911 Spec: Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test + Auth Region: us-west-2 + DLQ: sqs-dlq-test Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Name: sqs-test + Provider: sqs +Status: + Message: + Phase: Ready + Resource Rev Map: +Events: +``` + +4. Install ObjectStorage resource. + +``` +$ cat os.yaml +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: ingestion/smartbus-test +``` + +``` +$ kubectl apply -f os.yaml +``` + +``` +$ kubectl get os +NAME PHASE AGE MESSAGE +os Ready 20s +``` + +``` +kubectl describe os +Name: os +Namespace: default +Labels: +Annotations: +API Version: enterprise.splunk.com/v4 +Kind: ObjectStorage +Metadata: + Creation Timestamp: 2025-10-27T10:25:53Z + Finalizers: + enterprise.splunk.com/delete-pvc + Generation: 1 + Resource Version: 12345678 + UID: 12345678-1234-5678-1234-012345678911 +Spec: + S3: + Endpoint: https://s3.us-west-2.amazonaws.com + Path: ingestion/smartbus-test + Provider: s3 Status: Message: Phase: Ready @@ -554,7 +682,7 @@ Status: Events: ``` -4. Install IngestorCluster resource. +5. Install IngestorCluster resource. ``` $ cat ingestor.yaml @@ -568,8 +696,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` ``` @@ -598,10 +728,13 @@ Metadata: Resource Version: 12345678 UID: 12345678-1234-1234-1234-1234567890123 Spec: - Bus Configuration Ref: - Name: bus-config + Queue Ref: + Name: queue Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + Object Storage Ref: + Name: os + Namespace: default Replicas: 3 Service Account: ingestor-sa Status: @@ -616,15 +749,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Bus Configuration: - Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test - Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Credential Secret Version: 33744270 Message: Phase: Ready Ready Replicas: 3 @@ -678,7 +803,7 @@ remote_queue.sqs_smartbus.send_interval = 5s remote_queue.type = sqs_smartbus ``` -5. Install IndexerCluster resource. +6. Install IndexerCluster resource. ``` $ cat idxc.yaml @@ -704,8 +829,10 @@ spec: clusterManagerRef: name: cm serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os ``` ``` @@ -777,7 +904,7 @@ disabled = false disabled = true ``` -6. Install Horizontal Pod Autoscaler for IngestorCluster. +7. Install Horizontal Pod Autoscaler for IngestorCluster. ``` $ cat hpa-ing.yaml @@ -860,7 +987,7 @@ NAME REFERENCE TARGETS MINPODS MAXPODS REPLICA ing-hpa IngestorCluster/ingestor cpu: 115%/50% 3 10 10 8m54s ``` -7. Generate fake load. +8. Generate fake load. - HEC_TOKEN: HEC token for making fake calls diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml deleted file mode 100644 index 2a746968e..000000000 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.busConfiguration }} -{{- if .Values.busConfiguration.enabled }} -apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration -metadata: - name: {{ .Values.busConfiguration.name }} - namespace: {{ default .Release.Namespace .Values.busConfiguration.namespaceOverride }} - {{- with .Values.busConfiguration.additionalLabels }} - labels: -{{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.busConfiguration.additionalAnnotations }} - annotations: -{{ toYaml . | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.busConfiguration.type | quote }} - {{- with .Values.busConfiguration.sqs }} - sqs: - {{- if .queueName }} - queueName: {{ .queueName | quote }} - {{- end }} - {{- if .authRegion }} - authRegion: {{ .authRegion | quote }} - {{- end }} - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .largeMessageStoreEndpoint }} - largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} - {{- end }} - {{- if .largeMessageStorePath }} - largeMessageStorePath: {{ .largeMessageStorePath | quote }} - {{- end }} - {{- if .deadLetterQueueName }} - deadLetterQueueName: {{ .deadLetterQueueName | quote }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 77c24d500..e5541e017 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -163,8 +163,15 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.busConfigurationRef }} - busConfigurationRef: + {{- with $.Values.indexerCluster.queueRef }} + queueRef: + name: {{ .name }} + {{- if .namespace }} + namespace: {{ .namespace }} + {{- end }} + {{- end }} + {{- with $.Values.indexerCluster.objectStorageRef }} + objectStorageRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index fd72da310..e5ab1258c 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -95,11 +95,18 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 4 }} {{- end }} - {{- with $.Values.ingestorCluster.busConfigurationRef }} - busConfigurationRef: - name: {{ $.Values.ingestorCluster.busConfigurationRef.name }} - {{- if $.Values.ingestorCluster.busConfigurationRef.namespace }} - namespace: {{ $.Values.ingestorCluster.busConfigurationRef.namespace }} + {{- with $.Values.ingestorCluster.queueRef }} + queueRef: + name: {{ $.Values.ingestorCluster.queueRef.name }} + {{- if $.Values.ingestorCluster.queueRef.namespace }} + namespace: {{ $.Values.ingestorCluster.queueRef.namespace }} + {{- end }} + {{- end }} + {{- with $.Values.ingestorCluster.objectStorageRef }} + objectStorageRef: + name: {{ $.Values.ingestorCluster.objectStorageRef.name }} + {{- if $.Values.ingestorCluster.objectStorageRef.namespace }} + namespace: {{ $.Values.ingestorCluster.objectStorageRef.namespace }} {{- end }} {{- end }} {{- with .Values.ingestorCluster.extraEnv }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml new file mode 100644 index 000000000..033aed904 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml @@ -0,0 +1,28 @@ +{{- if .Values.objectStorage }} +{{- if .Values.objectStorage.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: {{ .Values.objectStorage.name }} + namespace: {{ default .Release.Namespace .Values.objectStorage.namespaceOverride }} + {{- with .Values.objectStorage.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.objectStorage.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.objectStorage.provider | quote }} + {{- with .Values.objectStorage.s3 }} + s3: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .path }} + path: {{ .path | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml new file mode 100644 index 000000000..06a3c5dbd --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml @@ -0,0 +1,38 @@ +{{- if .Values.queue }} +{{- if .Values.queue.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: {{ .Values.queue.name }} + namespace: {{ default .Release.Namespace .Values.queue.namespaceOverride }} + {{- with .Values.queue.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.queue.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.queue.provider | quote }} + {{- with .Values.queue.sqs }} + sqs: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .dlq }} + dlq: {{ .dlq | quote }} + {{- end }} + {{- if .name }} + name: {{ .name | quote }} + {{- end }} + {{- if .authRegion }} + authRegion: {{ .authRegion | quote }} + {{- end }} + {{- if .volumes }} + volumes: + {{ toYaml . | indent 4 }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index e49073398..6643728fa 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,7 +350,9 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 - busConfigurationRef: {} + queueRef: {} + + objectStorageRef: {} searchHeadCluster: @@ -899,4 +901,6 @@ ingestorCluster: affinity: {} - busConfigurationRef: {} \ No newline at end of file + queueRef: {} + + objectStorageRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml index 2b5d51ec9..a952b174c 100644 --- a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml +++ b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml @@ -222,6 +222,32 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: @@ -300,6 +326,58 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml similarity index 78% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml rename to helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml index 1475add32..d90f7673b 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - create - delete @@ -25,19 +25,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - create - delete @@ -49,7 +49,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml similarity index 76% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml rename to helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml index 500b1d100..ec9358b8d 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - get - list @@ -21,19 +21,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - objectstorages verbs: - get - list @@ -41,7 +41,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - objectstorages/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml new file mode 100644 index 000000000..6c04be75b --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml @@ -0,0 +1,55 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml new file mode 100644 index 000000000..2c81b98fd --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml @@ -0,0 +1,47 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 4eab5275e..77be54727 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -251,7 +251,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - queues verbs: - create - delete @@ -263,13 +263,39 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers + - queues/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - queues/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status verbs: - get - patch diff --git a/internal/controller/busconfiguration_controller.go b/internal/controller/busconfiguration_controller.go deleted file mode 100644 index c8519c017..000000000 --- a/internal/controller/busconfiguration_controller.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "time" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/common" - metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" - enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// BusConfigurationReconciler reconciles a BusConfiguration object -type BusConfigurationReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BusConfiguration object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "BusConfiguration")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "BusConfiguration") - - reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("busconfiguration", req.NamespacedName) - - // Fetch the BusConfiguration - instance := &enterpriseApi.BusConfiguration{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after - // reconcile request. Owned objects are automatically - // garbage collected. For additional cleanup logic use - // finalizers. Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load bus configuration data") - } - - // If the reconciliation is paused, requeue - annotations := instance.GetAnnotations() - if annotations != nil { - if _, ok := annotations[enterpriseApi.BusConfigurationPausedAnnotation]; ok { - return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil - } - } - - reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - - result, err := ApplyBusConfiguration(ctx, r.Client, instance) - if result.Requeue && result.RequeueAfter != 0 { - reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) - } - - return result, err -} - -var ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return enterprise.ApplyBusConfiguration(ctx, client, instance) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *BusConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.BusConfiguration{}). - WithEventFilter(predicate.Or( - common.GenerationChangedPredicate(), - common.AnnotationChangedPredicate(), - common.LabelChangedPredicate(), - common.SecretChangedPredicate(), - common.ConfigMapChangedPredicate(), - common.StatefulsetChangedPredicate(), - common.PodChangedPredicate(), - common.CrdChangedPredicate(), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: enterpriseApi.TotalWorker, - }). - Complete(r) -} diff --git a/internal/controller/busconfiguration_controller_test.go b/internal/controller/busconfiguration_controller_test.go deleted file mode 100644 index e08154211..000000000 --- a/internal/controller/busconfiguration_controller_test.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/testutils" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("BusConfiguration Controller", func() { - BeforeEach(func() { - time.Sleep(2 * time.Second) - }) - - AfterEach(func() { - - }) - - Context("BusConfiguration Management", func() { - - It("Get BusConfiguration custom resource should fail", func() { - namespace := "ns-splunk-bus-1" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - _, err := GetBusConfiguration("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("busconfigurations.enterprise.splunk.com \"test\" not found")) - - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create BusConfiguration custom resource with annotations should pause", func() { - namespace := "ns-splunk-bus-2" - annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - icSpec, _ := GetBusConfiguration("test", nsSpecs.Name) - annotations = map[string]string{} - icSpec.Annotations = annotations - icSpec.Status.Phase = "Ready" - UpdateBusConfiguration(icSpec, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create BusConfiguration custom resource should succeeded", func() { - namespace := "ns-splunk-bus-3" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - annotations := make(map[string]string) - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Cover Unused methods", func() { - namespace := "ns-splunk-bus-4" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - ctx := context.TODO() - builder := fake.NewClientBuilder() - c := builder.Build() - instance := BusConfigurationReconciler{ - Client: c, - Scheme: scheme.Scheme, - } - request := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "test", - Namespace: namespace, - }, - } - _, err := instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - bcSpec := testutils.NewBusConfiguration("test", namespace, "image") - Expect(c.Create(ctx, bcSpec)).Should(Succeed()) - - annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - annotations = map[string]string{} - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - bcSpec.DeletionTimestamp = &metav1.Time{} - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -func GetBusConfiguration(name string, namespace string) (*enterpriseApi.BusConfiguration, error) { - By("Expecting BusConfiguration custom resource to be retrieved successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - bc := &enterpriseApi.BusConfiguration{} - - err := k8sClient.Get(context.Background(), key, bc) - if err != nil { - return nil, err - } - - return bc, err -} - -func CreateBusConfiguration(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be created successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - ingSpec := &enterpriseApi.BusConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - } - - Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - bc := &enterpriseApi.BusConfiguration{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) - if status != "" { - fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return bc -} - -func UpdateBusConfiguration(instance *enterpriseApi.BusConfiguration, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be updated successfully") - - key := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Namespace, - } - - bcSpec := testutils.NewBusConfiguration(instance.Name, instance.Namespace, "image") - bcSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), bcSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - bc := &enterpriseApi.BusConfiguration{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) - if status != "" { - fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return bc -} - -func DeleteBusConfiguration(name string, namespace string) { - By("Expecting BusConfiguration custom resource to be deleted successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - - Eventually(func() error { - bc := &enterpriseApi.BusConfiguration{} - _ = k8sClient.Get(context.Background(), key, bc) - err := k8sClient.Delete(context.Background(), bc) - return err - }, timeout, interval).Should(Succeed()) -} diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 3cc840baa..4f83f5abe 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -148,6 +148,57 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider indexer clusters in the same namespace as the Secret + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), @@ -172,9 +223,37 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Queue{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + b, ok := obj.(*enterpriseApi.Queue) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.QueueRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.QueueRef.Name == b.Name && ns == b.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.ObjectStorage{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + os, ok := obj.(*enterpriseApi.ObjectStorage) if !ok { return nil } @@ -184,11 +263,11 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.ObjectStorageRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index a2c5846df..b5aa3d911 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -50,6 +50,10 @@ type IngestorClusterReconciler struct { // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues;objectstorages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status;objectstorages/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers;objectstorages/finalizers,verbs=update + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by @@ -129,6 +133,57 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider ingestor clusters in the same namespace as the Secret + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), @@ -141,9 +196,37 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Queue{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + queue, ok := obj.(*enterpriseApi.Queue) + if !ok { + return nil + } + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.QueueRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.QueueRef.Name == queue.Name && ns == queue.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.ObjectStorage{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + os, ok := obj.(*enterpriseApi.ObjectStorage) if !ok { return nil } @@ -153,11 +236,11 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.ObjectStorageRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 5e7ae4b73..52519f724 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -71,12 +71,40 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady) + UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -91,7 +119,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -105,6 +161,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "ingestion/smartbus-test", + }, + }, + } + ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() @@ -121,7 +206,7 @@ var _ = Describe("IngestorCluster Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - icSpec := testutils.NewIngestorCluster("test", namespace, "image") + icSpec := testutils.NewIngestorCluster("test", namespace, "image", os, queue) Expect(c.Create(ctx, icSpec)).Should(Succeed()) annotations := make(map[string]string) @@ -164,7 +249,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -184,8 +269,13 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string }, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } @@ -208,7 +298,7 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string return ic } -func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be updated successfully") key := types.NamespacedName{ @@ -216,7 +306,7 @@ func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enter Namespace: instance.Namespace, } - icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image") + icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image", os, queue) icSpec.ResourceVersion = instance.ResourceVersion Expect(k8sClient.Update(context.Background(), icSpec)).Should(Succeed()) time.Sleep(2 * time.Second) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 52c4c1a1d..142a8720c 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -50,7 +50,6 @@ func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") - } var _ = BeforeSuite(func(ctx context.Context) { @@ -117,37 +116,37 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseManagerReconciler{ + if err := (&IngestorClusterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseMasterReconciler{ + if err := (&LicenseManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&MonitoringConsoleReconciler{ + if err := (&LicenseMasterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&SearchHeadClusterReconciler{ + if err := (&MonitoringConsoleReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&StandaloneReconciler{ + if err := (&SearchHeadClusterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&IngestorClusterReconciler{ + if err := (&StandaloneReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index 9ca78593c..63a291a1d 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -46,7 +46,7 @@ func NewStandalone(name, ns, image string) *enterpriseApi.Standalone { } // NewIngestorCluster returns new IngestorCluster instance with its config hash -func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { +func NewIngestorCluster(name, ns, image string, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: enterpriseApi.IngestorClusterSpec{ @@ -54,28 +54,31 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } } -// NewBusConfiguration returns new BusConfiguration instance with its config hash -func NewBusConfiguration(name, ns, image string) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// NewQueue returns new Queue instance with its config hash +func NewQueue(name, ns string, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, + Spec: spec, + } +} + +// NewObjectStorage returns new ObjectStorage instance with its config hash +func NewObjectStorage(name, ns string, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: spec, } } @@ -313,9 +316,6 @@ func NewIndexerCluster(name, ns, image string) *enterpriseApi.IndexerCluster { ad.Spec = enterpriseApi.IndexerClusterSpec{ CommonSplunkSpec: *cs, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", - }, } return ad } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 5ac9b4a7a..a4aaa0824 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,118 +1,5 @@ --- -# assert for bus configurtion custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration -metadata: - name: bus-config -spec: - type: sqs_smartbus - sqs: - queueName: sqs-test - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test -status: - phase: Ready - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: indexer -spec: - replicas: 3 - busConfigurationRef: - name: bus-config -status: - phase: Ready - busConfiguration: - type: sqs_smartbus - sqs: - queueName: sqs-test - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-indexer-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-indexer-indexer-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IngestorCluster -metadata: - name: ingestor -spec: - replicas: 3 - busConfigurationRef: - name: bus-config -status: - phase: Ready - busConfiguration: - type: sqs_smartbus - sqs: - queueName: sqs-test - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-ingestor-ingestor -status: - replicas: 3 - ---- -# check if secret object are created apiVersion: v1 kind: Secret metadata: - name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file + name: index-ing-sep-secret diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml new file mode 100644 index 000000000..591aa8fd5 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl create secret generic index-ing-sep-secret --from-literal=s3_access_key=$AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY --namespace $NAMESPACE + background: false + skipLogOutput: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index daa1ab4ab..99669acf1 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -1,30 +1,112 @@ --- -# assert for ingestor cluster custom resource to be ready +# assert for queue custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: Queue +metadata: + name: queue +spec: + provider: sqs + sqs: + name: index-ingest-separation-test-q + authRegion: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: index-ingest-separation-test-dlq + +--- +# assert for object storage custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: os +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: index-ingest-separation-test-bucket/smartbus-test + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer +spec: + replicas: 3 + queueRef: + name: queue + objectStorageRef: + name: os +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-indexer-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-indexer-indexer-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 kind: IngestorCluster metadata: name: ingestor spec: - replicas: 4 - busConfigurationRef: - name: bus-config + replicas: 3 + queueRef: + name: queue + objectStorageRef: + name: os status: phase: Ready - busConfiguration: - type: sqs_smartbus - sqs: - queueName: sqs-test - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test - ---- -# check for stateful sets and replicas updated + +--- +# check for stateful set and replicas as configured apiVersion: apps/v1 kind: StatefulSet metadata: name: splunk-ingestor-ingestor status: - replicas: 4 + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml new file mode 100644 index 000000000..8bf619148 --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -0,0 +1,23 @@ +--- +# assert for ingestor cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 4 + queueRef: + name: queue + objectStorageRef: + name: os +status: + phase: Ready + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 4 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml rename to kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 6e87733cc..8c733e53b 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -5,24 +5,35 @@ splunk-operator: persistentVolumeClaim: storageClassName: gp2 -busConfiguration: +queue: enabled: true - name: bus-config - type: sqs_smartbus + name: queue + provider: sqs sqs: - queueName: sqs-test + name: index-ingest-separation-test-q authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: index-ingest-separation-test-dlq + volumes: + - name: helm-bus-secret-ref-test + secretRef: index-ing-sep-secret + +objectStorage: + enabled: true + name: os + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: index-ingest-separation-test-bucket/smartbus-test ingestorCluster: enabled: true name: ingestor replicaCount: 3 - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os clusterManager: enabled: true @@ -35,5 +46,7 @@ indexerCluster: replicaCount: 3 clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + queueRef: + name: queue + objectStorageRef: + name: os diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go index 6eb4d2f87..e51688661 100644 --- a/pkg/splunk/client/enterprise.go +++ b/pkg/splunk/client/enterprise.go @@ -1015,22 +1015,3 @@ func (c *SplunkClient) UpdateConfFile(scopedLog logr.Logger, fileName, property } return err } - -// Deletes conf files properties -func (c *SplunkClient) DeleteConfFileProperty(scopedLog logr.Logger, fileName, property string) error { - endpoint := fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s/%s", c.ManagementURI, fileName, property) - - scopedLog.Info("Deleting conf file object", "fileName", fileName, "property", property) - request, err := http.NewRequest("DELETE", endpoint, nil) - if err != nil { - scopedLog.Error(err, "Failed to delete conf file object", "fileName", fileName, "property", property) - return err - } - - expectedStatus := []int{200, 201, 404} - err = c.Do(request, expectedStatus, nil) - if err != nil { - scopedLog.Error(err, fmt.Sprintf("Status not in %v for conf file object deletion", expectedStatus), "fileName", fileName, "property", property) - } - return err -} diff --git a/pkg/splunk/client/enterprise_test.go b/pkg/splunk/client/enterprise_test.go index 6b97c24d7..4934eedfc 100644 --- a/pkg/splunk/client/enterprise_test.go +++ b/pkg/splunk/client/enterprise_test.go @@ -705,35 +705,3 @@ func TestUpdateConfFile(t *testing.T) { t.Errorf("UpdateConfFile expected error on update, got nil") } } - -func TestDeleteConfFileProperty(t *testing.T) { - // Test successful deletion of conf property - property := "myproperty" - fileName := "outputs" - - reqLogger := log.FromContext(context.TODO()) - scopedLog := reqLogger.WithName("TestDeleteConfFileProperty") - - wantDeleteRequest, _ := http.NewRequest("DELETE", fmt.Sprintf("https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs/%s", property), nil) - - mockSplunkClient := &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 200, "", nil) - - c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd") - c.Client = mockSplunkClient - - err := c.DeleteConfFileProperty(scopedLog, fileName, property) - if err != nil { - t.Errorf("DeleteConfFileProperty err = %v", err) - } - mockSplunkClient.CheckRequests(t, "TestDeleteConfFileProperty") - - // Negative test: error on delete - mockSplunkClient = &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 500, "", nil) - c.Client = mockSplunkClient - err = c.DeleteConfFileProperty(scopedLog, fileName, property) - if err == nil { - t.Errorf("DeleteConfFileProperty expected error on delete, got nil") - } -} diff --git a/pkg/splunk/enterprise/busconfiguration.go b/pkg/splunk/enterprise/busconfiguration.go deleted file mode 100644 index 43fd35f68..000000000 --- a/pkg/splunk/enterprise/busconfiguration.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" - splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// ApplyBusConfiguration reconciles the state of an IngestorCluster custom resource -func ApplyBusConfiguration(ctx context.Context, client client.Client, cr *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - var err error - - // Unless modified, reconcile for this object will be requeued after 5 seconds - result := reconcile.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, - } - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("ApplyBusConfiguration") - - if cr.Status.ResourceRevMap == nil { - cr.Status.ResourceRevMap = make(map[string]string) - } - - eventPublisher, _ := newK8EventPublisher(client, cr) - ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - - cr.Kind = "BusConfiguration" - - // Initialize phase - cr.Status.Phase = enterpriseApi.PhaseError - - // Update the CR Status - defer updateCRStatus(ctx, client, cr, &err) - - // Validate and updates defaults for CR - err = validateBusConfigurationSpec(ctx, client, cr) - if err != nil { - eventPublisher.Warning(ctx, "validateBusConfigurationSpec", fmt.Sprintf("validate bus configuration spec failed %s", err.Error())) - scopedLog.Error(err, "Failed to validate bus configuration spec") - return result, err - } - - // Check if deletion has been requested - if cr.ObjectMeta.DeletionTimestamp != nil { - terminating, err := splctrl.CheckForDeletion(ctx, cr, client) - if terminating && err != nil { - cr.Status.Phase = enterpriseApi.PhaseTerminating - } else { - result.Requeue = false - } - return result, err - } - - cr.Status.Phase = enterpriseApi.PhaseReady - - // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. - // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. - if !result.Requeue { - result.RequeueAfter = 0 - } - - return result, nil -} - -// validateBusConfigurationSpec checks validity and makes default updates to a BusConfigurationSpec and returns error if something is wrong -func validateBusConfigurationSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.BusConfiguration) error { - return validateBusConfigurationInputs(cr) -} - -func validateBusConfigurationInputs(cr *enterpriseApi.BusConfiguration) error { - // sqs_smartbus type is supported for now - if cr.Spec.Type != "sqs_smartbus" { - return errors.New("only sqs_smartbus type is supported in bus configuration") - } - - // Cannot be empty fields check - cannotBeEmptyFields := []string{} - if cr.Spec.SQS.QueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") - } - - if cr.Spec.SQS.AuthRegion == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") - } - - if cr.Spec.SQS.DeadLetterQueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") - } - - if len(cannotBeEmptyFields) > 0 { - return errors.New("bus configuration sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") - } - - // Have to start with https:// or s3:// checks - haveToStartWithHttps := []string{} - if !strings.HasPrefix(cr.Spec.SQS.Endpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStoreEndpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") - } - - if len(haveToStartWithHttps) > 0 { - return errors.New("bus configuration sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStorePath, "s3://") { - return errors.New("bus configuration sqs largeMessageStorePath must start with s3://") - } - - return nil -} diff --git a/pkg/splunk/enterprise/busconfiguration_test.go b/pkg/splunk/enterprise/busconfiguration_test.go deleted file mode 100644 index 45d19bb40..000000000 --- a/pkg/splunk/enterprise/busconfiguration_test.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2025. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "os" - "path/filepath" - "testing" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func init() { - GetReadinessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) - return fileLocation - } - GetLivenessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) - return fileLocation - } - GetStartupScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) - return fileLocation - } -} - -func TestApplyBusConfiguration(t *testing.T) { - os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - - ctx := context.TODO() - - scheme := runtime.NewScheme() - _ = enterpriseApi.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - c := fake.NewClientBuilder().WithScheme(scheme).Build() - - // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - Namespace: "test", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, - } - c.Create(ctx, busConfig) - - // ApplyBusConfiguration - result, err := ApplyBusConfiguration(ctx, c, busConfig) - assert.NoError(t, err) - assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, busConfig.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, busConfig.Status.Phase) -} - -func TestValidateBusConfigurationInputs(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "othertype", - SQS: enterpriseApi.SQSSpec{}, - }, - } - - err := validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "only sqs_smartbus type is supported in bus configuration", err.Error()) - - busConfig.Spec.Type = "sqs_smartbus" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, authRegion, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.QueueName = "test-queue" - busConfig.Spec.SQS.DeadLetterQueueName = "dlq-test" - busConfig.Spec.SQS.AuthRegion = "" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs authRegion cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) - - busConfig.Spec.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" - busConfig.Spec.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.Nil(t, err) -} diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 269753c5c..150dfdbbe 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -22,7 +22,6 @@ import ( "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" @@ -427,9 +426,9 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr return splunkClient.BundlePush(true) } - + // helper function to get the list of ClusterManager types in the current namespace -func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (int, error) { +func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (int, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getClusterManagerList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 74b1b0a91..c3fb51615 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -19,7 +19,6 @@ import ( "context" "errors" "fmt" - "reflect" "regexp" "sort" "strconv" @@ -37,7 +36,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -78,7 +76,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.CredentialSecretVersion = "0" + cr.Status.ServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -119,7 +118,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterManagerPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -245,35 +244,87 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + err = client.Get(ctx, types.NamespacedName{ + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &busConfig) + }, &queue) if err != nil { return result, err } } + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + // Object Storage + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace + } + err = client.Get(ctx, types.NamespacedName{ + Name: cr.Spec.ObjectStorageRef.Name, + Namespace: ns, + }, &os) + if err != nil { + return result, err + } + } + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.CredentialSecretVersion != version + serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount + + // If queue is updated + if cr.Spec.QueueRef.Name != "" { + if secretChanged || serviceAccountChanged { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } + + cr.Status.CredentialSecretVersion = version + cr.Status.ServiceAccount = cr.Spec.ServiceAccount } } @@ -366,7 +417,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.CredentialSecretVersion = "0" + cr.Status.ServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -409,7 +461,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterMasterPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -536,35 +588,86 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.QueueRef.Name, + Namespace: ns, + }, &queue) + if err != nil { + return result, err + } + } + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } + + // Object Storage + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &busConfig) + }, &os) if err != nil { return result, err } } + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } + + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + secretChanged := cr.Status.CredentialSecretVersion != version + serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + if cr.Spec.QueueRef.Name != "" { + if secretChanged || serviceAccountChanged { + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } + + cr.Status.CredentialSecretVersion = version + cr.Status.ServiceAccount = cr.Spec.ServiceAccount } } @@ -647,12 +750,13 @@ type indexerClusterPodManager struct { } // newIndexerClusterPodManager function to create pod manager this is added to write unit test case -var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { +var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } @@ -1154,7 +1258,7 @@ func validateIndexerClusterSpec(ctx context.Context, c splcommon.ControllerClien } // helper function to get the list of IndexerCluster types in the current namespace -func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.IndexerClusterList, error) { +func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.IndexerClusterList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getIndexerClusterList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) @@ -1231,12 +1335,12 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri return extractedValue } -var newSplunkClientForBusPipeline = splclient.NewSplunkClient +var newSplunkClientForQueuePipeline = splclient.NewSplunkClient -// Checks if only PullBus or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +// updateIndexerConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePullBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIndexerConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas @@ -1250,56 +1354,39 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne if err != nil { return err } - splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { - updateErr = err - } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { - updateErr = err - } - afterDelete = true - } + queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, accessKey, secretKey) - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, afterDelete) - - for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range queueOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, pbVal := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { + for _, field := range pipelineInputs { if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(busConfig *enterpriseApi.BusConfiguration, busConfigIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare bus fields - oldPB := busConfigIndexerStatus.Status.BusConfiguration - newPB := busConfig.Spec - - // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(&oldPB, &newPB, afterDelete) +// getQueueAndPipelineInputsForIndexerConfFiles returns a list of queue and pipeline inputs for indexer pods conf files +func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, accessKey, secretKey) - // Always set all pipeline fields, not just changed ones - pipelineChangedFields = pipelineConfig(true) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(true) return } @@ -1315,34 +1402,52 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (inputs, outputs [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", newBus.Type}) - } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) - } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) - } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) - } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) - } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) +// getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (inputs, outputs [][]string) { + queueProvider := "" + authRegion := "" + endpoint := "" + dlq := "" + if queue.Provider == "sqs" { + queueProvider = "sqs_smartbus" + authRegion = queue.SQS.AuthRegion + endpoint = queue.SQS.Endpoint + dlq = queue.SQS.DLQ + } + + path := "" + osEndpoint := "" + osProvider := "" + if os.Provider == "s3" { + osProvider = "sqs_smartbus" + osEndpoint = os.S3.Endpoint + path = os.S3.Path + if !strings.HasPrefix(path, "s3://") { + path = "s3://" + path + } } + inputs = append(inputs, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), authRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), osEndpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), dlq}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) + // TODO: Handle credentials change + if accessKey != "" && secretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) + } + outputs = inputs outputs = append(outputs, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, ) return inputs, outputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index e541fc4f6..86f6d2074 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1344,23 +1344,21 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -1371,8 +1369,8 @@ func TestGetIndexerStatefulSet(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -1571,7 +1569,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { return nil } - newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { + newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, @@ -1581,6 +1579,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { c.Client = mclient return c }, + c: c, } } @@ -2047,62 +2046,81 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedBusFieldsForIndexer(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ +func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) { + provider := "sqs_smartbus" + + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } - newCR := &enterpriseApi.IndexerCluster{ - Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + os := &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "bucket/key", }, }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, false) - assert.Equal(t, 8, len(busChangedFieldsInputs)) + key := "key" + secret := "secret" + + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getQueueAndPipelineInputsForIndexerConfFiles(&queue.Spec, &os.Spec, key, secret) + assert.Equal(t, 10, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - }, busChangedFieldsInputs) - - assert.Equal(t, 10, len(busChangedFieldsOutputs)) + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, + }, queueChangedFieldsInputs) + + assert.Equal(t, 12, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - }, busChangedFieldsOutputs) + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + }, queueChangedFieldsOutputs) assert.Equal(t, 5, len(pipelineChangedFields)) assert.Equal(t, [][]string{ @@ -2114,31 +2132,57 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePullBusChange(t *testing.T) { +func TestUpdateIndexerConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + accessKey := "accessKey" + secretKey := "secretKey" + + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, queue) + + os := enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "bucket/key", }, }, } + c.Create(ctx, &os) - newCR := &enterpriseApi.IndexerCluster{ + cr := &enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -2147,14 +2191,20 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, + ReadyReplicas: 3, + CredentialSecretVersion: "123", }, } + c.Create(ctx, cr) pod0 := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -2196,6 +2246,10 @@ func TestHandlePullBusChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-indexer-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -2206,18 +2260,9 @@ func TestHandlePullBusChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, &busConfig) - c.Create(ctx, newCR) - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -2226,43 +2271,43 @@ func TestHandlePullBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, } propertyKVListOutputs := propertyKVList - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}) - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -2274,7 +2319,7 @@ func TestHandlePullBusChange(t *testing.T) { {"pipeline:typing", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-indexer-%d", i) baseURL := fmt.Sprintf("https://%s.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName) @@ -2288,9 +2333,9 @@ func TestHandlePullBusChange(t *testing.T) { } } - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } @@ -2308,26 +2353,26 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-indexer-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { - newSplunkClientForBusPipeline = func(uri, user, pass string) *splclient.SplunkClient { +func newTestIndexerQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { + newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -2336,11 +2381,11 @@ func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inde } } return &indexerClusterPodManager{ - newSplunkClient: newSplunkClientForBusPipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } -func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { +func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -2352,28 +2397,45 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, queue) + + os := &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "bucket/key", }, }, } - c.Create(ctx, &busConfig) + c.Create(ctx, os) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2395,9 +2457,13 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { }, Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ @@ -2511,14 +2577,14 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} base := "https://splunk-test-indexer-0.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs" - queue := "remote_queue:test-queue" + q := "remote_queue:test-queue" - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, q), ""), 200, "", nil) // inputs.conf - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, q), ""), 200, "", nil) // default-mode.conf pipelineFields := []string{ diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 4f96f05bc..8280ddc0d 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "reflect" + "strings" "time" "github.com/go-logr/logr" @@ -71,9 +72,9 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.CredentialSecretVersion = "0" + cr.Status.ServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas @@ -210,34 +211,86 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // No need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(ctx, types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &busConfig) + }, &queue) if err != nil { return result, err } } + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } - // If bus config is updated - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { - mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + // Object Storage + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace + } + err = client.Get(ctx, types.NamespacedName{ + Name: cr.Spec.ObjectStorageRef.Name, + Namespace: ns, + }, &os) + if err != nil { + return result, err + } + } + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + } + } + + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.CredentialSecretVersion != version + serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount - err = mgr.handlePushBusChange(ctx, cr, busConfig, client) + // If queue is updated + if secretChanged || serviceAccountChanged { + mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + for i := int32(0); i < cr.Spec.Replicas; i++ { + ingClient := mgr.getClient(ctx, i) + err = ingClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "ingestor", i) + } + + cr.Status.CredentialSecretVersion = version + cr.Status.ServiceAccount = cr.Spec.ServiceAccount } // Upgrade fron automated MC to MC CRD @@ -280,9 +333,30 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, nil } +// getClient for ingestorClusterPodManager returns a SplunkClient for the member n +func (mgr *ingestorClusterPodManager) getClient(ctx context.Context, n int32) *splclient.SplunkClient { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("ingestorClusterPodManager.getClient").WithValues("name", mgr.cr.GetName(), "namespace", mgr.cr.GetNamespace()) + + // Get Pod Name + memberName := GetSplunkStatefulsetPodName(SplunkIngestor, mgr.cr.GetName(), n) + + // Get Fully Qualified Domain Name + fqdnName := splcommon.GetServiceFQDN(mgr.cr.GetNamespace(), + fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIngestor, mgr.cr.GetName(), true))) + + // Retrieve admin password from Pod + adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, mgr.c, memberName, mgr.cr.GetNamespace(), "password") + if err != nil { + scopedLog.Error(err, "Couldn't retrieve the admin password from pod") + } + + return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", adminPwd) +} + // validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error { - // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in an ingestor cluster + // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in the ingestor cluster if cr.Spec.Replicas < 3 { cr.Spec.Replicas = 3 } @@ -310,10 +384,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only Bus or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +// updateIngestorConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s client.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePushBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIngestorConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.Replicas @@ -329,67 +403,57 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { - updateErr = err - } - afterDelete = true - } - - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, afterDelete) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, accessKey, secretKey) - for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + for _, input := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{input}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { + for _, input := range pipelineInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", input[0], [][]string{{input[1], input[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(busConfig *enterpriseApi.BusConfiguration, busConfigIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { - oldPB := &busConfigIngestorStatus.Status.BusConfiguration - newPB := &busConfig.Spec +// getQueueAndPipelineInputsForIngestorConfFiles returns a list of queue and pipeline inputs for ingestor pods conf files +func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, accessKey, secretKey) - // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, afterDelete) - - // Always changed pipeline fields - pipelineChangedFields = pipelineConfig(false) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(false) return } type ingestorClusterPodManager struct { + c splcommon.ControllerClient log logr.Logger cr *enterpriseApi.IngestorCluster secrets *corev1.Secret newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient } -// newIngestorClusterPodManager function to create pod manager this is added to write unit test case -var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) ingestorClusterPodManager { +// newIngestorClusterPodManager creates pod manager to handle unit test cases +var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } -func pipelineConfig(isIndexer bool) (output [][]string) { - output = append(output, +// getPipelineInputsForConfFile returns a list of pipeline inputs for conf file +func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) { + config = append(config, []string{"pipeline:remotequeueruleset", "disabled", "false"}, []string{"pipeline:ruleset", "disabled", "true"}, []string{"pipeline:remotequeuetyping", "disabled", "false"}, @@ -397,36 +461,54 @@ func pipelineConfig(isIndexer bool) (output [][]string) { []string{"pipeline:typing", "disabled", "true"}, ) if !isIndexer { - output = append(output, []string{"pipeline:indexerPipe", "disabled", "true"}) + config = append(config, []string{"pipeline:indexerPipe", "disabled", "true"}) } - return output + + return } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (output [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - output = append(output, []string{"remote_queue.type", newBus.Type}) - } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) - } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) +// getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (config [][]string) { + queueProvider := "" + authRegion := "" + endpoint := "" + dlq := "" + if queue.Provider == "sqs" { + queueProvider = "sqs_smartbus" + authRegion = queue.SQS.AuthRegion + endpoint = queue.SQS.Endpoint + dlq = queue.SQS.DLQ } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) - } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) - } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + + path := "" + osEndpoint := "" + osProvider := "" + if os.Provider == "s3" { + osProvider = "sqs_smartbus" + osEndpoint = os.S3.Endpoint + path = os.S3.Path + if !strings.HasPrefix(path, "s3://") { + path = "s3://" + path + } } - output = append(output, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}) + config = append(config, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), authRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), osEndpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), dlq}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + ) - return output + if accessKey != "" && secretKey != "" { + config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) + } + + return } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index bee3df4d6..cdeaa14ac 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -25,6 +25,7 @@ import ( "github.com/go-logr/logr" enterpriseApi "github.com/splunk/splunk-operator/api/v4" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" "github.com/stretchr/testify/assert" @@ -32,7 +33,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -63,28 +63,47 @@ func TestApplyIngestorCluster(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - c.Create(ctx, busConfig) + c.Create(ctx, queue) + + os := &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "bucket/key", + }, + }, + } + c.Create(ctx, os) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -98,11 +117,16 @@ func TestApplyIngestorCluster(t *testing.T) { Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 3, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Mock: true, + Mock: true, + ServiceAccount: "sa", + }, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } @@ -224,34 +248,12 @@ func TestApplyIngestorCluster(t *testing.T) { assert.True(t, result.Requeue) assert.NotEqual(t, enterpriseApi.PhaseError, cr.Status.Phase) - // Ensure stored StatefulSet status reflects readiness after any reconcile modifications - fetched := &appsv1.StatefulSet{} - _ = c.Get(ctx, types.NamespacedName{Name: "splunk-test-ingestor", Namespace: "test"}, fetched) - fetched.Status.Replicas = replicas - fetched.Status.ReadyReplicas = replicas - fetched.Status.UpdatedReplicas = replicas - if fetched.Status.UpdateRevision == "" { - fetched.Status.UpdateRevision = "v1" - } - c.Update(ctx, fetched) - - // Guarantee all pods have matching revision label - for _, pn := range []string{"splunk-test-ingestor-0", "splunk-test-ingestor-1", "splunk-test-ingestor-2"} { - p := &corev1.Pod{} - if err := c.Get(ctx, types.NamespacedName{Name: pn, Namespace: "test"}, p); err == nil { - if p.Labels == nil { - p.Labels = map[string]string{} - } - p.Labels["controller-revision-hash"] = fetched.Status.UpdateRevision - c.Update(ctx, p) - } - } - // outputs.conf origNew := newIngestorClusterPodManager mockHTTPClient := &spltest.MockHTTPClient{} - newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc) ingestorClusterPodManager { + newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ + c: c, log: l, cr: cr, secrets: secret, newSplunkClient: func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ManagementURI: uri, Username: user, Password: pass, Client: mockHTTPClient} @@ -261,19 +263,20 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, busConfig, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -299,6 +302,13 @@ func TestApplyIngestorCluster(t *testing.T) { } } + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { + podName := fmt.Sprintf("splunk-test-ingestor-%d", i) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/services/server/control/restart", podName, cr.GetName(), cr.GetNamespace()) + req, _ := http.NewRequest("POST", baseURL, nil) + mockHTTPClient.AddHandler(req, 200, "", nil) + } + // Second reconcile should now yield Ready cr.Status.TelAppInstalled = true result, err = ApplyIngestorCluster(ctx, c, cr) @@ -310,23 +320,21 @@ func TestGetIngestorStatefulSet(t *testing.T) { // Object definitions os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -341,8 +349,8 @@ func TestGetIngestorStatefulSet(t *testing.T) { }, Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 2, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -395,54 +403,70 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedBusFieldsForIngestor(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ +func TestGetQueueAndPipelineInputsForIngestorConfFiles(t *testing.T) { + provider := "sqs_smartbus" + + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } - newCR := &enterpriseApi.IngestorCluster{ - Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + os := enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "bucket/key", }, }, - Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, false) + key := "key" + secret := "secret" + + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(&queue.Spec, &os.Spec, key, secret) - assert.Equal(t, 10, len(busChangedFields)) + assert.Equal(t, 12, len(queueInputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, - }, busChangedFields) - - assert.Equal(t, 6, len(pipelineChangedFields)) + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, + }, queueInputs) + + assert.Equal(t, 6, len(pipelineInputs)) assert.Equal(t, [][]string{ {"pipeline:remotequeueruleset", "disabled", "false"}, {"pipeline:ruleset", "disabled", "true"}, @@ -450,33 +474,56 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { {"pipeline:remotequeueoutput", "disabled", "false"}, {"pipeline:typing", "disabled", "true"}, {"pipeline:indexerPipe", "disabled", "true"}, - }, pipelineChangedFields) + }, pipelineInputs) } -func TestHandlePushBusChange(t *testing.T) { +func TestUpdateIngestorConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + accessKey := "accessKey" + secretKey := "secretKey" + + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "queue", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - newCR := &enterpriseApi.IngestorCluster{ + os := &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "bucket/key", + }, + }, + } + + cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", }, @@ -485,13 +532,17 @@ func TestHandlePushBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, + Replicas: 3, + ReadyReplicas: 3, + CredentialSecretVersion: "123", }, } @@ -535,6 +586,10 @@ func TestHandlePushBusChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-ingestor-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -545,17 +600,10 @@ func TestHandlePushBusChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -564,31 +612,31 @@ func TestHandlePushBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -601,9 +649,9 @@ func TestHandlePushBusChange(t *testing.T) { {"pipeline:indexerPipe", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-ingestor-%d", i) - baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, newCR.GetName(), newCR.GetNamespace()) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, cr.GetName(), cr.GetNamespace()) for _, field := range propertyKVList { req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0]))) @@ -615,32 +663,32 @@ func TestHandlePushBusChange(t *testing.T) { } } - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, busConfig *enterpriseApi.BusConfiguration, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { - newSplunkClientForPushBusPipeline := func(uri, user, pass string) *splclient.SplunkClient { +func newTestIngestorQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -649,6 +697,6 @@ func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inge } } return &ingestorClusterPodManager{ - newSplunkClient: newSplunkClientForPushBusPipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 64de4a2de..77c58c328 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -33,7 +33,6 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -207,7 +206,7 @@ func getMonitoringConsoleStatefulSet(ctx context.Context, client splcommon.Contr } // helper function to get the list of MonitoringConsole types in the current namespace -func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.MonitoringConsoleList, error) { +func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.MonitoringConsoleList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getMonitoringConsoleList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/objectstorage.go b/pkg/splunk/enterprise/objectstorage.go new file mode 100644 index 000000000..4db3dcaee --- /dev/null +++ b/pkg/splunk/enterprise/objectstorage.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyObjectStorage reconciles the state of an IngestorCluster custom resource +func ApplyObjectStorage(ctx context.Context, client client.Client, cr *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "ObjectStorage" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/objectstorage_test.go b/pkg/splunk/enterprise/objectstorage_test.go new file mode 100644 index 000000000..1c91f131d --- /dev/null +++ b/pkg/splunk/enterprise/objectstorage_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "path/filepath" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + GetReadinessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) + return fileLocation + } + GetLivenessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) + return fileLocation + } + GetStartupScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) + return fileLocation + } +} + +func TestApplyObjectStorage(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + os := &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: "test", + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "bucket/key", + }, + }, + } + c.Create(ctx, os) + + // ApplyObjectStorage + result, err := ApplyObjectStorage(ctx, c, os) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, os.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, os.Status.Phase) +} diff --git a/pkg/splunk/enterprise/queue.go b/pkg/splunk/enterprise/queue.go new file mode 100644 index 000000000..1f36f6bad --- /dev/null +++ b/pkg/splunk/enterprise/queue.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyQueue reconciles the state of an IngestorCluster custom resource +func ApplyQueue(ctx context.Context, client client.Client, cr *enterpriseApi.Queue) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "Queue" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/queue_test.go b/pkg/splunk/enterprise/queue_test.go new file mode 100644 index 000000000..767d33e83 --- /dev/null +++ b/pkg/splunk/enterprise/queue_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestApplyQueue(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + queue := &enterpriseApi.Queue{ + TypeMeta: metav1.TypeMeta{ + Kind: "Queue", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: "test", + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, queue) + + // ApplyQueue + result, err := ApplyQueue(ctx, c, queue) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, queue.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, queue.Status.Phase) +} diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 6ebd3df34..4267662d8 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -63,8 +63,11 @@ const ( // SplunkIngestor may be a standalone or clustered ingestion peer SplunkIngestor InstanceType = "ingestor" - // SplunkBusConfiguration is the bus configuration instance - SplunkBusConfiguration InstanceType = "busconfiguration" + // SplunkQueue is the queue instance + SplunkQueue InstanceType = "queue" + + // SplunkObjectStorage is the object storage instance + SplunkObjectStorage InstanceType = "object-storage" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -294,8 +297,10 @@ func KindToInstanceString(kind string) string { return SplunkIndexer.ToString() case "IngestorCluster": return SplunkIngestor.ToString() - case "BusConfiguration": - return SplunkBusConfiguration.ToString() + case "Queue": + return SplunkQueue.ToString() + case "ObjectStorage": + return SplunkObjectStorage.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index 5d50e8cec..71fc017da 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -10,7 +10,6 @@ import ( appsv1 "k8s.io/api/apps/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - rclient "sigs.k8s.io/controller-runtime/pkg/client" runtime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -161,8 +160,8 @@ IndexerCluster: } // check if cluster is multisite if clusterInfo.MultiSite == "true" { - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } indexerList, err := getIndexerClusterList(ctx, c, cr, opts) if err != nil { @@ -220,8 +219,8 @@ SearchHeadCluster: // check if a search head cluster exists with the same ClusterManager instance attached searchHeadClusterInstance := enterpriseApi.SearchHeadCluster{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } searchHeadList, err := getSearchHeadClusterList(ctx, c, cr, opts) if err != nil { diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 38853aab0..88a85b448 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -417,6 +417,27 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil } +// GetQueueRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation +func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, string, error) { + namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) + if err != nil { + return "", "", "", err + } + + accessKey := string(namespaceScopedSecret.Data[s3AccessKey]) + secretKey := string(namespaceScopedSecret.Data[s3SecretKey]) + + version := namespaceScopedSecret.ResourceVersion + + if accessKey == "" { + return "", "", "", errors.New("access Key is missing") + } else if secretKey == "" { + return "", "", "", errors.New("secret Key is missing") + } + + return accessKey, secretKey, version, nil +} + // getLocalAppFileName generates the local app file name // For e.g., if the app package name is sample_app.tgz // and etag is "abcd1234", then it will be downloaded locally as sample_app.tgz_abcd1234 @@ -2291,19 +2312,33 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) return latestIngCR, nil - case "BusConfiguration": - latestBusCR := &enterpriseApi.BusConfiguration{} - err = client.Get(ctx, namespacedName, latestBusCR) + case "Queue": + latestQueueCR := &enterpriseApi.Queue{} + err = client.Get(ctx, namespacedName, latestQueueCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.BusConfiguration).Status.Message = "" + origCR.(*enterpriseApi.Queue).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.BusConfiguration).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.Queue).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.BusConfiguration).Status.DeepCopyInto(&latestBusCR.Status) - return latestBusCR, nil + origCR.(*enterpriseApi.Queue).Status.DeepCopyInto(&latestQueueCR.Status) + return latestQueueCR, nil + + case "ObjectStorage": + latestOsCR := &enterpriseApi.ObjectStorage{} + err = client.Get(ctx, namespacedName, latestOsCR) + if err != nil { + return nil, err + } + + origCR.(*enterpriseApi.ObjectStorage).Status.Message = "" + if (crError != nil) && ((*crError) != nil) { + origCR.(*enterpriseApi.ObjectStorage).Status.Message = (*crError).Error() + } + origCR.(*enterpriseApi.ObjectStorage).Status.DeepCopyInto(&latestOsCR.Status) + return latestOsCR, nil case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} @@ -2533,7 +2568,7 @@ func loadFixture(t *testing.T, filename string) string { if err != nil { t.Fatalf("Failed to load fixture %s: %v", filename, err) } - + // Compact the JSON to match the output from json.Marshal var compactJSON bytes.Buffer if err := json.Compact(&compactJSON, data); err != nil { diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index f5405b2cf..35523a028 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2624,6 +2624,9 @@ func TestUpdateCRStatus(t *testing.T) { WithStatusSubresource(&enterpriseApi.Standalone{}). WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). + WithStatusSubresource(&enterpriseApi.IngestorCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) c := builder.Build() ctx := context.TODO() @@ -3304,6 +3307,8 @@ func TestGetCurrentImage(t *testing.T) { WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}) client := builder.Build() client.Create(ctx, ¤t) diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go index ba0162ffa..2d150f5ac 100644 --- a/test/appframework_aws/c3/appframework_aws_test.go +++ b/test/appframework_aws/c3/appframework_aws_test.go @@ -3182,7 +3182,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go index afc7abae6..904433195 100644 --- a/test/appframework_aws/c3/manager_appframework_test.go +++ b/test/appframework_aws/c3/manager_appframework_test.go @@ -355,7 +355,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3324,7 +3324,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go index 0622700a4..c7fea6ff3 100644 --- a/test/appframework_az/c3/appframework_azure_test.go +++ b/test/appframework_az/c3/appframework_azure_test.go @@ -993,7 +993,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go index 2a0af0b3b..4412efe43 100644 --- a/test/appframework_az/c3/manager_appframework_azure_test.go +++ b/test/appframework_az/c3/manager_appframework_azure_test.go @@ -991,7 +991,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go index 02ad17cfb..66c553e47 100644 --- a/test/appframework_gcp/c3/manager_appframework_test.go +++ b/test/appframework_gcp/c3/manager_appframework_test.go @@ -361,7 +361,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3327,7 +3327,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index c040802f8..8bc789ac7 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -39,27 +39,32 @@ var ( testenvInstance *testenv.TestEnv testSuiteName = "indingsep-" + testenv.RandomDNSName(3) - bus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + queue = enterpriseApi.QueueSpec{ + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue", + Name: "index-ingest-separation-test-q", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "index-ingest-separation-test-dlq", + }, + } + objectStorage = enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "index-ingest-separation-test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" inputs = []string{ - "[remote_queue:test-queue]", + "[remote_queue:index-ingest-separation-test-q]", "remote_queue.type = sqs_smartbus", "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputs = append(inputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 5s") @@ -80,42 +85,10 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateBus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue-updated", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket-updated/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue-updated", - }, - } - - updatedInputs = []string{ - "[remote_queue:test-queue-updated]", - "remote_queue.type = sqs_smartbus", - "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue-updated", - "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket-updated/smartbus-test", - "remote_queue.sqs_smartbus.retry_policy = max", - "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} - updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") - updatedDefaultsAll = []string{ - "[pipeline:remotequeueruleset]\ndisabled = false", - "[pipeline:ruleset]\ndisabled = false", - "[pipeline:remotequeuetyping]\ndisabled = false", - "[pipeline:remotequeueoutput]\ndisabled = false", - "[pipeline:typing]\ndisabled = true", - } - updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") - inputsShouldNotContain = []string{ - "[remote_queue:test-queue]", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "[remote_queue:index-ingest-separation-test-q]", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputsShouldNotContain = append(inputs, "remote_queue.sqs_smartbus.send_interval = 5s") diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 8bccddb47..f3eac42e5 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -75,18 +75,28 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} + queue.SQS.VolList = volumeSpec + + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") + + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -96,7 +106,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -125,31 +135,42 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, ingest) Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) - // Delete the Bus Configuration - busConfiguration := &enterpriseApi.BusConfiguration{} - err = deployment.GetInstance(ctx, "bus-config", busConfiguration) - Expect(err).To(Succeed(), "Unable to get Bus Configuration instance", "Bus Configuration Name", busConfiguration) - err = deployment.DeleteCR(ctx, busConfiguration) - Expect(err).To(Succeed(), "Unable to delete Bus Configuration", "Bus Configuration Name", busConfiguration) + // Delete the Queue + q = &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, "queue", q) + Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", q) + err = deployment.DeleteCR(ctx, q) + Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", q) + + // Delete the ObjectStorage + objStorage = &enterpriseApi.ObjectStorage{} + err = deployment.GetInstance(ctx, "os", objStorage) + Expect(err).To(Succeed(), "Unable to get ObjectStorage instance", "ObjectStorage Name", objStorage) + err = deployment.DeleteCR(ctx, objStorage) + Expect(err).To(Succeed(), "Unable to delete ObjectStorage", "ObjectStorage Name", objStorage) }) }) Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers with additional configurations", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} + queue.SQS.VolList = volumeSpec - // Upload apps to S3 - testcaseEnvInst.Log.Info("Upload apps to S3") - appFileList := testenv.GetAppFileList(appListV1) - _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) - Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") + + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster with additional configurations (similar to standalone app framework test) appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) @@ -157,12 +178,13 @@ var _ = Describe("indingsep test", func() { appFrameworkSpec.MaxConcurrentAppDownloads = uint64(5) ic := &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: deployment.GetName() + "-ingest", - Namespace: testcaseEnvInst.GetName(), + Name: deployment.GetName() + "-ingest", + Namespace: testcaseEnvInst.GetName(), + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, Spec: enterpriseApi.IngestorClusterSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - ServiceAccount: serviceAccountName, + // ServiceAccount: serviceAccountName, LivenessInitialDelaySeconds: 600, ReadinessInitialDelaySeconds: 50, StartupProbe: &enterpriseApi.Probe{ @@ -188,9 +210,10 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - BusConfigurationRef: v1.ObjectReference{Name: bc.Name}, - Replicas: 3, - AppFrameworkConfig: appFrameworkSpec, + QueueRef: v1.ObjectReference{Name: q.Name}, + ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } @@ -202,6 +225,12 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + // Upload apps to S3 + testcaseEnvInst.Log.Info("Upload apps to S3") + appFileList := testenv.GetAppFileList(appListV1) + _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") + // Verify Ingestor Cluster Pods have apps installed testcaseEnvInst.Log.Info("Verify Ingestor Cluster Pods have apps installed") ingestorPod := []string{fmt.Sprintf(testenv.IngestorPod, deployment.GetName()+"-ingest", 0)} @@ -234,120 +263,28 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, integration, indingsep: Splunk Operator can deploy Ingestors and Indexers with correct setup", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) - - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") - - // Deploy Ingestor Cluster - testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") - - // Deploy Cluster Manager - testcaseEnvInst.Log.Info("Deploy Cluster Manager") - _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) - Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") - - // Deploy Indexer Cluster - testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") - - // Ensure that Ingestor Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Cluster Manager is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") - testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Ingestor Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") - ingest := &enterpriseApi.IngestorCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) - Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") - - // Verify Ingestor Cluster Status - testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(bus), "Ingestor bus configuration status is not the same as provided as input") - - // Get instance of current Indexer Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") - index := &enterpriseApi.IndexerCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) - Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") - - // Verify Indexer Cluster Status - testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(bus), "Indexer bus configuration status is not the same as provided as input") - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods := testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, outputs, true) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, defaultsAll, true) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} + queue.SQS.VolList = volumeSpec - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, defaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, inputs, true) - } - } - }) - }) - - Context("Ingestor and Indexer deployment", func() { - It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { - // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -357,7 +294,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -372,26 +309,6 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Bus Configuration CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Bus Configuration CR with latest config") - bus := &enterpriseApi.BusConfiguration{} - err = deployment.GetInstance(ctx, bc.Name, bus) - Expect(err).To(Succeed(), "Failed to get instance of Bus Configuration") - - // Update instance of BusConfiguration CR with new bus configuration - testcaseEnvInst.Log.Info("Update instance of BusConfiguration CR with new bus configuration") - bus.Spec = updateBus - err = deployment.UpdateCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration with updated CR") - - // Ensure that Ingestor Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster has not been restarted") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Ingestor Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") ingest := &enterpriseApi.IngestorCluster{} @@ -400,7 +317,8 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(updateBus), "Ingestor bus configuration status is not the same as provided as input") + Expect(ingest.Status.CredentialSecretVersion).To(Not(Equal("")), "Ingestor queue status credential access secret version is empty") + Expect(ingest.Status.CredentialSecretVersion).To(Not(Equal("0")), "Ingestor queue status credential access secret version is 0") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -410,7 +328,8 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(updateBus), "Indexer bus configuration status is not the same as provided as input") + Expect(index.Status.CredentialSecretVersion).To(Not(Equal("")), "Indexer queue status credential access secret version is empty") + Expect(index.Status.CredentialSecretVersion).To(Not(Equal("0")), "Indexer queue status credential access secret version is 0") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -424,8 +343,7 @@ var _ = Describe("indingsep test", func() { outputsPath := "opt/splunk/etc/system/local/outputs.conf" outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) + testenv.ValidateContent(outputsConf, outputs, true) // Verify default-mode.conf testcaseEnvInst.Log.Info("Verify default-mode.conf") @@ -451,52 +369,7 @@ var _ = Describe("indingsep test", func() { inputsPath := "opt/splunk/etc/system/local/inputs.conf" inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) - } - } - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods = testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, updatedDefaultsAll, true) - - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } - - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, updatedDefaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) + testenv.ValidateContent(inputsConf, inputs, true) } } }) diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 2e312c652..781e5b6f0 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, busConfig, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, busConfig, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -460,20 +460,36 @@ func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, cou return deployed.(*enterpriseApi.IngestorCluster), err } -// DeployBusConfiguration deploys the bus configuration -func (d *Deployment) DeployBusConfiguration(ctx context.Context, name string, busConfig enterpriseApi.BusConfigurationSpec) (*enterpriseApi.BusConfiguration, error) { - d.testenv.Log.Info("Deploying bus configuration", "name", name) +// DeployQueue deploys the queue +func (d *Deployment) DeployQueue(ctx context.Context, name string, queue enterpriseApi.QueueSpec) (*enterpriseApi.Queue, error) { + d.testenv.Log.Info("Deploying queue", "name", name) - busCfg := newBusConfiguration(name, d.testenv.namespace, busConfig) - pdata, _ := json.Marshal(busCfg) + queueCfg := newQueue(name, d.testenv.namespace, queue) + pdata, _ := json.Marshal(queueCfg) - d.testenv.Log.Info("bus configuration spec", "cr", string(pdata)) - deployed, err := d.deployCR(ctx, name, busCfg) + d.testenv.Log.Info("queue spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, queueCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.BusConfiguration), err + return deployed.(*enterpriseApi.Queue), err +} + +// DeployObjectStorage deploys the object storage +func (d *Deployment) DeployObjectStorage(ctx context.Context, name string, objStorage enterpriseApi.ObjectStorageSpec) (*enterpriseApi.ObjectStorage, error) { + d.testenv.Log.Info("Deploying object storage", "name", name) + + objStorageCfg := newObjectStorage(name, d.testenv.namespace, objStorage) + pdata, _ := json.Marshal(objStorageCfg) + + d.testenv.Log.Info("object storage spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, objStorageCfg) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.ObjectStorage), err } // DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration @@ -632,13 +648,22 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IngestorCluster) current.Spec = ucr.Spec cobject = current - case "BusConfiguration": - current := &enterpriseApi.BusConfiguration{} + case "Queue": + current := &enterpriseApi.Queue{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.Queue) + current.Spec = ucr.Spec + cobject = current + case "ObjectStorage": + current := &enterpriseApi.ObjectStorage{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.BusConfiguration) + ucr := cr.(*enterpriseApi.ObjectStorage) current.Spec = ucr.Spec cobject = current case "ClusterMaster": @@ -740,7 +765,7 @@ func (d *Deployment) DeploySingleSiteCluster(ctx context.Context, name string, i } // Deploy the indexer cluster - _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -798,7 +823,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHead(ctx context.Cont multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -870,7 +895,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(ctx context.Context, n multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -931,7 +956,7 @@ func (d *Deployment) DeployMultisiteCluster(ctx context.Context, name string, in multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1067,7 +1092,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(ctx context. multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1122,7 +1147,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndIndexes(ctx co multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1227,7 +1252,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx contex } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1305,7 +1330,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1405,7 +1430,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1509,7 +1534,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(c multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1590,7 +1615,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenMonitoringConsole(ctx conte } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1662,7 +1687,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenMonitoringConsole(ctx } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1756,7 +1781,7 @@ func (d *Deployment) DeployMultisiteClusterWithMonitoringConsole(ctx context.Con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1856,7 +1881,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go index 0eb2b485c..f696a4a17 100644 --- a/test/testenv/remote_index_utils.go +++ b/test/testenv/remote_index_utils.go @@ -86,6 +86,14 @@ func RollHotToWarm(ctx context.Context, deployment *Deployment, podName string, return true } +// GenerateQueueVolumeSpec return VolumeSpec struct with given values +func GenerateQueueVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { + return enterpriseApi.VolumeSpec{ + Name: name, + SecretRef: secretRef, + } +} + // GenerateIndexVolumeSpec return VolumeSpec struct with given values func GenerateIndexVolumeSpec(volumeName string, endpoint string, secretRef string, provider string, storageType string, region string) enterpriseApi.VolumeSpec { return enterpriseApi.VolumeSpec{ diff --git a/test/testenv/testcaseenv.go b/test/testenv/testcaseenv.go index a1081e0a0..737aaa9a6 100644 --- a/test/testenv/testcaseenv.go +++ b/test/testenv/testcaseenv.go @@ -35,24 +35,25 @@ import ( // TestCaseEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run test cases against type TestCaseEnv struct { - kubeClient client.Client - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string - clusterWideOperator string + kubeClient client.Client + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string + clusterWideOperator string } // GetKubeClient returns the kube client to talk to kube-apiserver @@ -79,21 +80,22 @@ func NewTestCaseEnv(kubeClient client.Client, name string, operatorImage string, } testenv := &TestCaseEnv{ - kubeClient: kubeClient, - name: name, - namespace: name, - serviceAccountName: name, - roleName: name, - roleBindingName: name, - operatorName: "splunk-op-" + name, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: name, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + name, - debug: os.Getenv("DEBUG"), - clusterWideOperator: installOperatorClusterWide, + kubeClient: kubeClient, + name: name, + namespace: name, + serviceAccountName: name, + roleName: name, + roleBindingName: name, + operatorName: "splunk-op-" + name, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: name, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + name, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), + clusterWideOperator: installOperatorClusterWide, } testenv.Log = logf.Log.WithValues("testcaseenv", testenv.name) @@ -156,6 +158,7 @@ func (testenv *TestCaseEnv) setup() error { switch ClusterProvider { case "eks": testenv.createIndexSecret() + testenv.createIndexIngestSepSecret() case "azure": testenv.createIndexSecretAzure() case "gcp": @@ -588,11 +591,41 @@ func (testenv *TestCaseEnv) createIndexSecretAzure() error { return nil } +// CreateIndexIngestSepSecret creates secret object +func (testenv *TestCaseEnv) createIndexIngestSepSecret() error { + secretName := testenv.indexIngestSepSecret + ns := testenv.namespace + + data := map[string][]byte{"s3_access_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID")), + "s3_secret_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY"))} + secret := newSecretSpec(ns, secretName, data) + + if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil { + testenv.Log.Error(err, "Unable to create index and ingestion sep secret object") + return err + } + + testenv.pushCleanupFunc(func() error { + err := testenv.GetKubeClient().Delete(context.TODO(), secret) + if err != nil { + testenv.Log.Error(err, "Unable to delete index and ingestion sep secret object") + return err + } + return nil + }) + return nil +} + // GetIndexSecretName return index secret object name func (testenv *TestCaseEnv) GetIndexSecretName() string { return testenv.s3IndexSecret } +// GetIndexSecretName return index and ingestion separation secret object name +func (testenv *TestCaseEnv) GetIndexIngestSepSecretName() string { + return testenv.indexIngestSepSecret +} + // GetLMConfigMap Return name of license config map func (testenv *TestCaseEnv) GetLMConfigMap() string { return testenv.licenseCMName diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index f82310015..06fe304d4 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -160,24 +160,25 @@ type cleanupFunc func() error // TestEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run tests against type TestEnv struct { - kubeAPIServer string - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - kubeClient client.Client - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string + kubeAPIServer string + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + kubeClient client.Client + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string } func init() { @@ -231,19 +232,20 @@ func NewTestEnv(name, commitHash, operatorImage, splunkImage, licenseFilePath st } testenv := &TestEnv{ - name: envName, - namespace: envName, - serviceAccountName: envName, - roleName: envName, - roleBindingName: envName, - operatorName: "splunk-op-" + envName, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: envName, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + envName, - debug: os.Getenv("DEBUG"), + name: envName, + namespace: envName, + serviceAccountName: envName, + roleName: envName, + roleBindingName: envName, + operatorName: "splunk-op-" + envName, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: envName, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + envName, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), } testenv.Log = logf.Log.WithValues("testenv", testenv.name) diff --git a/test/testenv/util.go b/test/testenv/util.go index b779ab3c3..366ea3668 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -396,8 +396,9 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + QueueRef: queue, + ObjectStorageRef: os, }, } @@ -405,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -425,24 +426,38 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, busCo Image: splunkImage, }, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + QueueRef: queue, + ObjectStorageRef: os, }, } } -// newBusConfiguration creates and initializes the CR for BusConfiguration Kind -func newBusConfiguration(name, ns string, busConfig enterpriseApi.BusConfigurationSpec) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// newQueue creates and initializes the CR for Queue Kind +func newQueue(name, ns string, queue enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Queue", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: busConfig, + Spec: queue, + } +} +// newObjectStorage creates and initializes the CR for ObjectStorage Kind +func newObjectStorage(name, ns string, objStorage enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ + TypeMeta: metav1.TypeMeta{ + Kind: "ObjectStorage", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: objStorage, } }